Commit 6cb7bfeb authored by David Gibson's avatar David Gibson Committed by Paul Mackerras

[PATCH] powerpc: Merge thread_info.h

Merge ppc32 and ppc64 versions of thread_info.h.  They were pretty
similar already, the chief changes are:

	- Instead of inline asm to implement current_thread_info(),
which needs to be different for ppc32 and ppc64, we use C with an
asm("r1") register variable.  gcc turns it into the same asm as we
used to have for both platforms.
	- We replace ppc32's 'local_flags' with the ppc64
'syscall_noerror' field.  The noerror flag was in fact the only thing
in the local_flags field anyway, so the ppc64 approach is simpler, and
means we only need a load-immediate/store instead of load/mask/store
when clearing the flag.
	- In readiness for 64k pages, when THREAD_SIZE will be less
than a page, ppc64 used kmalloc() rather than get_free_pages() to
allocate the kernel stack.  With this patch we do the same for ppc32,
since there's no strong reason not to.
	- For ppc64, we no longer export THREAD_SHIFT and THREAD_SIZE
via asm-offsets, thread_info.h can now be safely included in asm, as
on ppc32.

Built and booted on G4 Powerbook (ARCH=ppc and ARCH=powerpc) and
Power5 (ARCH=ppc64 and ARCH=powerpc).
Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent b0faa284
...@@ -56,8 +56,6 @@ int main(void) ...@@ -56,8 +56,6 @@ int main(void)
DEFINE(THREAD, offsetof(struct task_struct, thread)); DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(MM, offsetof(struct task_struct, mm)); DEFINE(MM, offsetof(struct task_struct, mm));
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
DEFINE(THREAD_SHIFT, THREAD_SHIFT);
DEFINE(THREAD_SIZE, THREAD_SIZE);
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
#else #else
DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
...@@ -94,12 +92,10 @@ int main(void) ...@@ -94,12 +92,10 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
#ifdef CONFIG_PPC64
DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror)); DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
#else #ifdef CONFIG_PPC32
DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
......
...@@ -199,10 +199,9 @@ _GLOBAL(DoSyscall) ...@@ -199,10 +199,9 @@ _GLOBAL(DoSyscall)
#ifdef SHOW_SYSCALLS #ifdef SHOW_SYSCALLS
bl do_show_syscall bl do_show_syscall
#endif /* SHOW_SYSCALLS */ #endif /* SHOW_SYSCALLS */
rlwinm r10,r1,0,0,18 /* current_thread_info() */ rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
lwz r11,TI_LOCAL_FLAGS(r10) li r11,0
rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR stb r11,TI_SC_NOERR(r10)
stw r11,TI_LOCAL_FLAGS(r10)
lwz r11,TI_FLAGS(r10) lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A andi. r11,r11,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace bne- syscall_dotrace
...@@ -225,10 +224,10 @@ ret_from_syscall: ...@@ -225,10 +224,10 @@ ret_from_syscall:
mr r6,r3 mr r6,r3
li r11,-_LAST_ERRNO li r11,-_LAST_ERRNO
cmplw 0,r3,r11 cmplw 0,r3,r11
rlwinm r12,r1,0,0,18 /* current_thread_info() */ rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
blt+ 30f blt+ 30f
lwz r11,TI_LOCAL_FLAGS(r12) lbz r11,TI_SC_NOERR(r12)
andi. r11,r11,_TIFL_FORCE_NOERROR cmpwi r11,0
bne 30f bne 30f
neg r3,r3 neg r3,r3
lwz r10,_CCR(r1) /* Set SO bit in CR */ lwz r10,_CCR(r1) /* Set SO bit in CR */
...@@ -315,7 +314,7 @@ syscall_exit_work: ...@@ -315,7 +314,7 @@ syscall_exit_work:
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
SYNC SYNC
MTMSRD(r10) /* disable interrupts again */ MTMSRD(r10) /* disable interrupts again */
rlwinm r12,r1,0,0,18 /* current_thread_info() */ rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
lwz r9,TI_FLAGS(r12) lwz r9,TI_FLAGS(r12)
5: 5:
andi. r0,r9,_TIF_NEED_RESCHED andi. r0,r9,_TIF_NEED_RESCHED
...@@ -630,7 +629,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_601) ...@@ -630,7 +629,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
.globl sigreturn_exit .globl sigreturn_exit
sigreturn_exit: sigreturn_exit:
subi r1,r3,STACK_FRAME_OVERHEAD subi r1,r3,STACK_FRAME_OVERHEAD
rlwinm r12,r1,0,0,18 /* current_thread_info() */ rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
lwz r9,TI_FLAGS(r12) lwz r9,TI_FLAGS(r12)
andi. r0,r9,_TIF_SYSCALL_T_OR_A andi. r0,r9,_TIF_SYSCALL_T_OR_A
beq+ ret_from_except_full beq+ ret_from_except_full
...@@ -657,7 +656,7 @@ ret_from_except: ...@@ -657,7 +656,7 @@ ret_from_except:
user_exc_return: /* r10 contains MSR_KERNEL here */ user_exc_return: /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */ /* Check current_thread_info()->flags */
rlwinm r9,r1,0,0,18 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r9,TI_FLAGS(r9) lwz r9,TI_FLAGS(r9)
andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED) andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
bne do_work bne do_work
...@@ -677,7 +676,7 @@ restore_user: ...@@ -677,7 +676,7 @@ restore_user:
/* N.B. the only way to get here is from the beq following ret_from_except. */ /* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel: resume_kernel:
/* check current_thread_info->preempt_count */ /* check current_thread_info->preempt_count */
rlwinm r9,r1,0,0,18 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r0,TI_PREEMPT(r9) lwz r0,TI_PREEMPT(r9)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore bne restore
...@@ -687,7 +686,7 @@ resume_kernel: ...@@ -687,7 +686,7 @@ resume_kernel:
andi. r0,r3,MSR_EE /* interrupts off? */ andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */ beq restore /* don't schedule if so */
1: bl preempt_schedule_irq 1: bl preempt_schedule_irq
rlwinm r9,r1,0,0,18 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r3,TI_FLAGS(r9) lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b bne- 1b
...@@ -889,7 +888,7 @@ recheck: ...@@ -889,7 +888,7 @@ recheck:
LOAD_MSR_KERNEL(r10,MSR_KERNEL) LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC SYNC
MTMSRD(r10) /* disable interrupts */ MTMSRD(r10) /* disable interrupts */
rlwinm r9,r1,0,0,18 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r9,TI_FLAGS(r9) lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_NEED_RESCHED andi. r0,r9,_TIF_NEED_RESCHED
bne- do_resched bne- do_resched
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/hvcall.h> #include <asm/hvcall.h>
#include <asm/iSeries/LparMap.h> #include <asm/iSeries/LparMap.h>
#include <asm/thread_info.h>
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE #define DO_SOFT_DISABLE
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/thread_info.h>
.text .text
......
...@@ -130,10 +130,10 @@ main(void) ...@@ -130,10 +130,10 @@ main(void)
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
......
...@@ -200,9 +200,8 @@ _GLOBAL(DoSyscall) ...@@ -200,9 +200,8 @@ _GLOBAL(DoSyscall)
bl do_show_syscall bl do_show_syscall
#endif /* SHOW_SYSCALLS */ #endif /* SHOW_SYSCALLS */
rlwinm r10,r1,0,0,18 /* current_thread_info() */ rlwinm r10,r1,0,0,18 /* current_thread_info() */
lwz r11,TI_LOCAL_FLAGS(r10) li r11,0
rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR stb r11,TI_SC_NOERR(r10)
stw r11,TI_LOCAL_FLAGS(r10)
lwz r11,TI_FLAGS(r10) lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A andi. r11,r11,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace bne- syscall_dotrace
...@@ -227,8 +226,8 @@ ret_from_syscall: ...@@ -227,8 +226,8 @@ ret_from_syscall:
cmplw 0,r3,r11 cmplw 0,r3,r11
rlwinm r12,r1,0,0,18 /* current_thread_info() */ rlwinm r12,r1,0,0,18 /* current_thread_info() */
blt+ 30f blt+ 30f
lwz r11,TI_LOCAL_FLAGS(r12) lbz r11,TI_SC_NOERR(r12)
andi. r11,r11,_TIFL_FORCE_NOERROR cmpwi r11,0
bne 30f bne 30f
neg r3,r3 neg r3,r3
lwz r10,_CCR(r1) /* Set SO bit in CR */ lwz r10,_CCR(r1) /* Set SO bit in CR */
......
...@@ -46,8 +46,6 @@ ...@@ -46,8 +46,6 @@
int main(void) int main(void)
{ {
/* thread struct on stack */ /* thread struct on stack */
DEFINE(THREAD_SHIFT, THREAD_SHIFT);
DEFINE(THREAD_SIZE, THREAD_SIZE);
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror)); DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/hvcall.h> #include <asm/hvcall.h>
#include <asm/iSeries/LparMap.h> #include <asm/iSeries/LparMap.h>
#include <asm/thread_info.h>
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE #define DO_SOFT_DISABLE
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/thread_info.h>
.text .text
......
/* thread_info.h: PPC low-level thread information /* thread_info.h: PowerPC low-level thread information
* adapted from the i386 version by Paul Mackerras * adapted from the i386 version by Paul Mackerras
* *
* Copyright (C) 2002 David Howells (dhowells@redhat.com) * Copyright (C) 2002 David Howells (dhowells@redhat.com)
* - Incorporating suggestions made by Linus Torvalds and Dave Miller * - Incorporating suggestions made by Linus Torvalds and Dave Miller
*/ */
#ifndef _ASM_THREAD_INFO_H #ifndef _ASM_POWERPC_THREAD_INFO_H
#define _ASM_THREAD_INFO_H #define _ASM_POWERPC_THREAD_INFO_H
#ifdef __KERNEL__ #ifdef __KERNEL__
/* We have 8k stacks on ppc32 and 16k on ppc64 */
#ifdef CONFIG_PPC64
#define THREAD_SHIFT 14
#else
#define THREAD_SHIFT 13
#endif
#define THREAD_SIZE (1 << THREAD_SHIFT)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/config.h> #include <linux/config.h>
#include <linux/cache.h> #include <linux/cache.h>
...@@ -24,7 +34,8 @@ struct thread_info { ...@@ -24,7 +34,8 @@ struct thread_info {
struct task_struct *task; /* main task structure */ struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */ struct exec_domain *exec_domain; /* execution domain */
int cpu; /* cpu we're on */ int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable, <0 => BUG */ int preempt_count; /* 0 => preemptable,
<0 => BUG */
struct restart_block restart_block; struct restart_block restart_block;
/* set by force_successful_syscall_return */ /* set by force_successful_syscall_return */
unsigned char syscall_noerror; unsigned char syscall_noerror;
...@@ -54,9 +65,6 @@ struct thread_info { ...@@ -54,9 +65,6 @@ struct thread_info {
/* thread information allocation */ /* thread information allocation */
#define THREAD_SHIFT 14
#define THREAD_ORDER (THREAD_SHIFT - PAGE_SHIFT)
#define THREAD_SIZE (1 << THREAD_SHIFT)
#ifdef CONFIG_DEBUG_STACK_USAGE #ifdef CONFIG_DEBUG_STACK_USAGE
#define alloc_thread_info(tsk) \ #define alloc_thread_info(tsk) \
({ \ ({ \
...@@ -68,7 +76,7 @@ struct thread_info { ...@@ -68,7 +76,7 @@ struct thread_info {
ret; \ ret; \
}) })
#else #else
#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
#endif #endif
#define free_thread_info(ti) kfree(ti) #define free_thread_info(ti) kfree(ti)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
...@@ -77,9 +85,11 @@ struct thread_info { ...@@ -77,9 +85,11 @@ struct thread_info {
/* how to get the thread information struct from C */ /* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
{ {
struct thread_info *ti; register unsigned long sp asm("r1");
__asm__("clrrdi %0,1,%1" : "=r"(ti) : "i" (THREAD_SHIFT));
return ti; /* gcc4, at least, is smart enough to turn this into a single
* rlwinm for ppc32 and clrrdi for ppc64 */
return (struct thread_info *)(sp & ~(THREAD_SIZE-1));
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
...@@ -122,4 +132,4 @@ static inline struct thread_info *current_thread_info(void) ...@@ -122,4 +132,4 @@ static inline struct thread_info *current_thread_info(void)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */ #endif /* _ASM_POWERPC_THREAD_INFO_H */
...@@ -57,7 +57,7 @@ extern unsigned long profile_pc(struct pt_regs *regs); ...@@ -57,7 +57,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
#define force_successful_syscall_return() \ #define force_successful_syscall_return() \
do { \ do { \
current_thread_info()->local_flags |= _TIFL_FORCE_NOERROR; \ current_thread_info()->syscall_noerror = 1; \
} while(0) } while(0)
/* /*
......
/* thread_info.h: PPC low-level thread information
* adapted from the i386 version by Paul Mackerras
*
* Copyright (C) 2002 David Howells (dhowells@redhat.com)
* - Incorporating suggestions made by Linus Torvalds and Dave Miller
*/
#ifndef _ASM_THREAD_INFO_H
#define _ASM_THREAD_INFO_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
/*
* low level task data.
* If you change this, change the TI_* offsets below to match.
*/
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long local_flags; /* non-racy flags */
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
<0 => BUG */
struct restart_block restart_block;
};
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.local_flags = 0, \
.cpu = 0, \
.preempt_count = 1, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/*
* macros/functions for gaining access to the thread information structure
*/
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
__asm__("rlwinm %0,1,0,0,18" : "=r"(ti));
return ti;
}
/* thread information allocation */
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */
/*
* Size of kernel stack for each process.
*/
#define THREAD_SIZE 8192 /* 2 pages */
#define PREEMPT_ACTIVE 0x10000000
/*
* thread information flag bit numbers
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_POLLING_NRFLAG 4 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
#define TIF_MEMDIE 5
#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
#define TIF_SECCOMP 7 /* secure computing */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
/*
* Non racy (local) flags bit numbers
*/
#define TIFL_FORCE_NOERROR 0 /* don't return error from current
syscall even if result < 0 */
/* as above, but as bit values */
#define _TIFL_FORCE_NOERROR (1<<TIFL_FORCE_NOERROR)
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment