Commit 9ca7810d authored by Russell King's avatar Russell King

Preempt support and fix ARM build for thread_info changes

parent 3d004984
......@@ -464,6 +464,7 @@ tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT
tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
bool 'Power Management support' CONFIG_PM
dep_bool 'Preemptible Kernel (experimental)' CONFIG_PREEMPT $CONFIG_CPU_32 $CONFIG_EXPERIMENTAL
dep_tristate 'Advanced Power Management Emulation' CONFIG_APM $CONFIG_PM
dep_tristate 'RISC OS personality' CONFIG_ARTHUR $CONFIG_CPU_32
......
......@@ -21,6 +21,7 @@
#include <linux/pm.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
#include <linux/smp_lock.h>
#include <asm/byteorder.h>
#include <asm/elf.h>
......@@ -273,3 +274,7 @@ EXPORT_SYMBOL_NOVERS(__down_trylock_failed);
EXPORT_SYMBOL_NOVERS(__up_wakeup);
EXPORT_SYMBOL(get_wchan);
#ifdef CONFIG_PREEMPT
EXPORT_SYMBOL(kernel_flag);
#endif
......@@ -15,6 +15,7 @@
*/
#include <linux/config.h>
#include "entry-header.S"
#include <asm/thread_info.h>
#ifdef IOC_BASE
......@@ -690,8 +691,7 @@ __dabt_svc: sub sp, sp, #S_FRAME_SIZE
msr cpsr_c, r9
mov r2, sp
bl SYMBOL_NAME(do_DataAbort)
mov r0, #PSR_I_BIT | MODE_SVC
msr cpsr_c, r0
set_cpsr_c r0, #PSR_I_BIT | MODE_SVC
ldr r0, [sp, #S_PSR]
msr spsr, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
......@@ -705,17 +705,50 @@ __irq_svc: sub sp, sp, #S_FRAME_SIZE
add r4, sp, #S_SP
mov r6, lr
stmia r4, {r5, r6, r7, r8, r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
#ifdef CONFIG_PREEMPT
get_thread_info r8
ldr r9, [r8, #TI_PREEMPT] @ get preempt count
add r7, r9, #1 @ increment it
str r7, [r8, #TI_PREEMPT]
#endif
1: get_irqnr_and_base r0, r6, r5, lr
movne r1, sp
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
adrsvc ne, lr, 1b
bne do_IRQ
bne asm_do_IRQ
#ifdef CONFIG_PREEMPT
ldr r0, [r8, #TI_FLAGS] @ get flags
tst r0, #_TIF_NEED_RESCHED
ldrne r6, .LCirq_stat
blne svc_preempt
preempt_return:
ldr r0, [r8, #TI_PREEMPT] @ read preempt value
teq r0, r7
strne r0, [r0, -r0] @ bug()
str r9, [r8, #TI_PREEMPT] @ restore preempt count
#endif
ldr r0, [sp, #S_PSR] @ irqs are already disabled
msr spsr, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#ifdef CONFIG_PREEMPT
svc_preempt: teq r9, #0
movne pc, lr
ldr r0, [r6, #4] @ local_irq_count
ldr r1, [r6, #8] @ local_b_count
adds r0, r0, r1
movne pc, lr
1: set_cpsr_c r0, #MODE_SVC @ enable IRQs
bl SYMBOL_NAME(preempt_schedule)
set_cpsr_c r0, #PSR_I_BIT | MODE_SVC @ disable IRQs
ldr r0, [r8, #TI_FLAGS]
tst r0, #_TIF_NEED_RESCHED
bne 1b
b preempt_return
#endif
.align 5
__und_svc: sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} @ save r0 - r12
......@@ -733,8 +766,7 @@ __und_svc: sub sp, sp, #S_FRAME_SIZE
mov r1, sp @ struct pt_regs *regs
bl SYMBOL_NAME(do_undefinstr)
1: mov r0, #PSR_I_BIT | MODE_SVC
msr cpsr_c, r0
1: set_cpsr_c r0, #PSR_I_BIT | MODE_SVC
ldr lr, [sp, #S_PSR] @ Get SVC cpsr
msr spsr, lr
ldmia sp, {r0 - pc}^ @ Restore SVC registers
......@@ -755,8 +787,7 @@ __pabt_svc: sub sp, sp, #S_FRAME_SIZE
mov r0, r2 @ address (pc)
mov r1, sp @ regs
bl SYMBOL_NAME(do_PrefetchAbort) @ call abort handler
mov r0, #PSR_I_BIT | MODE_SVC
msr cpsr_c, r0
set_cpsr_c r0, #PSR_I_BIT | MODE_SVC
ldr r0, [sp, #S_PSR]
msr spsr, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
......@@ -769,6 +800,9 @@ __pabt_svc: sub sp, sp, #S_FRAME_SIZE
.LCprocfns: .word SYMBOL_NAME(processor)
#endif
.LCfp: .word SYMBOL_NAME(fp_enter)
#ifdef CONFIG_PREEMPT
.LCirq_stat: .word SYMBOL_NAME(irq_stat)
#endif
irq_prio_table
......@@ -793,8 +827,7 @@ __dabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
#else
bl cpu_data_abort
#endif
mov r2, #MODE_SVC
msr cpsr_c, r2 @ Enable interrupts
set_cpsr_c r2, #MODE_SVC @ Enable interrupts
mov r2, sp
adrsvc al, lr, ret_from_exception
b SYMBOL_NAME(do_DataAbort)
......@@ -809,15 +842,29 @@ __irq_usr: sub sp, sp, #S_FRAME_SIZE
stmdb r8, {sp, lr}^
alignment_trap r4, r7, __temp_irq
zero_fp
#ifdef CONFIG_PREEMPT
get_thread_info r8
ldr r9, [r8, #TI_PREEMPT] @ get preempt count
add r7, r9, #1 @ increment it
str r7, [r8, #TI_PREEMPT]
#endif
1: get_irqnr_and_base r0, r6, r5, lr
movne r1, sp
adrsvc ne, lr, 1b
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
bne do_IRQ
bne asm_do_IRQ
#ifdef CONFIG_PREEMPT
ldr r0, [r8, #TI_PREEMPT]
teq r0, r7
strne r0, [r0, -r0]
str r9, [r8, #TI_PREEMPT]
mov tsk, r8
#else
get_thread_info tsk
#endif
mov why, #0
get_current_task tsk
b ret_to_user
.align 5
......@@ -833,15 +880,15 @@ __und_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
adrsvc al, r9, ret_from_exception @ r9 = normal FP return
adrsvc al, lr, fpundefinstr @ lr = undefined instr return
call_fpe: get_current_task r10
call_fpe: get_thread_info r10 @ get current thread
ldr r4, [r10, #TI_TASK] @ get current task
mov r8, #1
strb r8, [r10, #TSK_USED_MATH] @ set current->used_math
strb r8, [r4, #TSK_USED_MATH] @ set current->used_math
ldr r4, .LCfp
add r10, r10, #TSS_FPESAVE @ r10 = workspace
add r10, r10, #TI_FPSTATE @ r10 = workspace
ldr pc, [r4] @ Call FP module USR entry point
fpundefinstr: mov r0, #MODE_SVC
msr cpsr_c, r0 @ Enable interrupts
fpundefinstr: set_cpsr_c r0, #MODE_SVC @ Enable interrupts
mov r0, lr
mov r1, sp
adrsvc al, lr, ret_from_exception
......@@ -857,8 +904,7 @@ __pabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
stmdb r8, {sp, lr}^ @ Save sp_usr lr_usr
alignment_trap r4, r7, __temp_abt
zero_fp
mov r0, #MODE_SVC
msr cpsr_c, r0 @ Enable interrupts
set_cpsr_c r0, #MODE_SVC @ Enable interrupts
mov r0, r5 @ address (pc)
mov r1, sp @ regs
bl SYMBOL_NAME(do_PrefetchAbort) @ call abort handler
......@@ -867,7 +913,7 @@ __pabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
* This is the return code to user mode for abort handlers
*/
ENTRY(ret_from_exception)
get_current_task tsk
get_thread_info tsk
mov why, #0
b ret_to_user
......@@ -877,16 +923,16 @@ ENTRY(fp_enter)
.text
/*
* Register switch for ARMv3 and ARMv4 processors
* r0 = previous, r1 = next, return previous.
* r0 = previous thread_info, r1 = next thread_info, return previous.
* previous and next are guaranteed not to be the same.
*/
ENTRY(__switch_to)
stmfd sp!, {r4 - sl, fp, lr} @ Store most regs on stack
mrs ip, cpsr
str ip, [sp, #-4]! @ Save cpsr_SVC
str sp, [r0, #TSS_SAVE] @ Save sp_SVC
ldr sp, [r1, #TSS_SAVE] @ Get saved sp_SVC
ldr r2, [r1, #TSS_DOMAIN]
str sp, [r0, #TI_CPU_SAVE] @ Save sp_SVC
ldr sp, [r1, #TI_CPU_SAVE] @ Get saved sp_SVC
ldr r2, [r1, #TI_CPU_DOMAIN]
ldr ip, [sp], #4
mcr p15, 0, r2, c3, c0 @ Set domain register
msr spsr, ip @ Save tasks CPSR into SPSR for this return
......
......@@ -9,6 +9,7 @@
*/
#include <linux/config.h>
#include "entry-header.S"
#include <asm/thread_info.h>
/*
* We rely on the fact that R0 is at the bottom of the stack (due to
......@@ -34,39 +35,42 @@ ENTRY(__do_softirq)
* stack.
*/
ret_fast_syscall:
#error ldr r1, [tsk, #TSK_NEED_RESCHED]
#error ldr r2, [tsk, #TSK_SIGPENDING]
teq r1, #0 @ need_resched || sigpending
teqeq r2, #0
bne slow
set_cpsr_c r1, #PSR_I_BIT | MODE_SVC @ disable interrupts
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne ret_fast_work
fast_restore_user_regs
/*
* Ok, we need to do extra processing, enter the slow path.
*/
slow: str r0, [sp, #S_R0+S_OFF]! @ returned r0
b 1f
ret_fast_work:
str r0, [sp, #S_R0+S_OFF]! @ returned r0
b work_pending
work_resched:
bl SYMBOL_NAME(schedule)
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.
*/
reschedule:
bl SYMBOL_NAME(schedule)
ENTRY(ret_to_user)
ret_slow_syscall:
#error ldr r1, [tsk, #TSK_NEED_RESCHED]
#error ldr r2, [tsk, #TSK_SIGPENDING]
1: teq r1, #0 @ need_resched => schedule()
bne reschedule
teq r2, #0 @ sigpending => do_signal()
blne __do_signal
set_cpsr_c r1, #PSR_I_BIT | MODE_SVC
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
beq no_work_pending
work_pending:
tst r1, #_TIF_NEED_RESCHED
bne work_resched
tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
blne __do_notify_resume
no_work_pending:
restore_user_regs
__do_signal:
mov r0, #0 @ NULL 'oldset'
mov r1, sp @ 'regs'
__do_notify_resume:
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
#error b SYMBOL_NAME(do_signal) @ note the bl above sets lr
b SYMBOL_NAME(do_notify_resume) @ note the bl above sets lr
/*
* This is how we return from a fork. __switch_to will be calling us
......@@ -75,14 +79,14 @@ __do_signal:
*/
ENTRY(ret_from_fork)
bl SYMBOL_NAME(schedule_tail)
get_current_task tsk
ldr ip, [tsk, #TSK_PTRACE] @ check for syscall tracing
get_thread_info tsk
ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
mov why, #1
tst ip, #PT_TRACESYS @ are we tracing syscalls?
tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
beq ret_slow_syscall
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
#error bl SYMBOL_NAME(syscall_trace)
bl SYMBOL_NAME(syscall_trace)
b ret_slow_syscall
......@@ -134,12 +138,12 @@ ENTRY(vector_swi)
str r4, [sp, #-S_OFF]! @ push fifth arg
get_current_task tsk
ldr ip, [tsk, #TSK_PTRACE] @ check for syscall tracing
get_thread_info tsk
ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
bic scno, scno, #0xff000000 @ mask off SWI op-code
eor scno, scno, #OS_NUMBER << 20 @ check OS number
adr tbl, sys_call_table @ load syscall table pointer
tst ip, #PT_TRACESYS @ are we tracing syscalls?
tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
bne __sys_trace
adrsvc al, lr, ret_fast_syscall @ return address
......@@ -160,7 +164,7 @@ ENTRY(vector_swi)
__sys_trace:
add r1, sp, #S_OFF
mov r0, #0 @ trace entry [IP = 0]
#error bl SYMBOL_NAME(syscall_trace)
bl SYMBOL_NAME(syscall_trace)
adrsvc al, lr, __sys_trace_return @ return address
add r1, sp, #S_R0 + S_OFF @ pointer to regs
......@@ -173,7 +177,7 @@ __sys_trace_return:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
#error bl SYMBOL_NAME(syscall_trace)
bl SYMBOL_NAME(syscall_trace)
b ret_slow_syscall
.align 5
......
......@@ -113,7 +113,7 @@
msr cpsr_c, \temp
.endm
.macro get_current_task, rd
.macro get_thread_info, rd
mov \rd, sp, lsr #13
mov \rd, \rd, lsl #13
.endm
......@@ -171,7 +171,7 @@
.macro initialise_traps_extra
.endm
.macro get_current_task, rd
.macro get_thread_info, rd
mov \rd, sp, lsr #13
mov \rd, \rd, lsl #13
.endm
......@@ -197,10 +197,10 @@
*
* We must set at least "tsk" and "why" when calling ret_with_reschedule.
*/
scno .req r7 @ syscall number
tbl .req r8 @ syscall table pointer
why .req r8 @ Linux syscall (!= 0)
tsk .req r9 @ current task
scno .req r7 @ syscall number
tbl .req r8 @ syscall table pointer
why .req r8 @ Linux syscall (!= 0)
tsk .req r9 @ current thread_info
/*
* Get the system call number.
......@@ -217,3 +217,12 @@ tsk .req r9 @ current task
#endif
.endm
.macro set_cpsr_c, reg, mode
#if 1
mov \reg, \mode
msr cpsr_c, \reg
#else
msr cpsr_c, \mode
#endif
.endm
......@@ -149,7 +149,7 @@ __switch_data: .long __mmap_switched
.long SYMBOL_NAME(processor_id)
.long SYMBOL_NAME(__machine_arch_type)
.long SYMBOL_NAME(cr_alignment)
.long SYMBOL_NAME(init_task_union)+8192
.long SYMBOL_NAME(init_thread_union)+8192
.type __ret, %function
__ret: ldr lr, __switch_data
......
......@@ -16,7 +16,7 @@ static struct signal_struct init_signals = INIT_SIGNALS;
struct mm_struct init_mm = INIT_MM(init_mm);
/*
* Initial task structure.
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by making sure
......@@ -25,5 +25,13 @@ struct mm_struct init_mm = INIT_MM(init_mm);
*
* The things we do for performance..
*/
union task_union init_task_union __attribute__((__section__(".init.task"))) =
{ INIT_TASK(init_task_union.task) };
union thread_union init_thread_union
__attribute__((__section__(".init.task"))) =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
......@@ -19,6 +19,7 @@
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/a.out.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/interrupt.h>
......@@ -27,6 +28,7 @@
#include <asm/system.h>
#include <asm/io.h>
#include <asm/leds.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
/*
......@@ -83,9 +85,7 @@ void (*pm_power_off)(void);
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
init_idle();
current->nice = 20;
preempt_disable();
while (1) {
void (*idle)(void) = pm_idle;
if (!idle)
......@@ -228,51 +228,61 @@ void show_fpregs(struct user_fp *regs)
/*
* Task structure and kernel stack allocation.
*/
static struct task_struct *task_struct_head;
static unsigned int nr_task_struct;
static unsigned long *thread_info_head;
static unsigned int nr_thread_info;
#ifdef CONFIG_CPU_32
#define EXTRA_TASK_STRUCT 4
#define ll_alloc_task_struct() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define ll_free_task_struct(p) free_pages((unsigned long)(p),1)
#else
extern unsigned long get_page_8k(int priority);
extern void free_page_8k(unsigned long page);
#define EXTRA_TASK_STRUCT 0
#define ll_alloc_task_struct() ((struct task_struct *)get_page_8k(GFP_KERNEL))
#define ll_free_task_struct(p) free_page_8k((unsigned long)(p))
#endif
struct task_struct *alloc_task_struct(void)
struct thread_info *alloc_thread_info(void)
{
struct task_struct *tsk;
struct thread_info *thread = NULL;
if (EXTRA_TASK_STRUCT)
tsk = task_struct_head;
else
tsk = NULL;
if (EXTRA_TASK_STRUCT) {
unsigned long *p = thread_info_head;
if (tsk) {
task_struct_head = tsk->next_task;
nr_task_struct -= 1;
} else
tsk = ll_alloc_task_struct();
if (p) {
thread_info_head = (unsigned long *)p[0];
nr_thread_info -= 1;
}
thread = (struct thread_info *)p;
}
if (!thread)
thread = ll_alloc_task_struct();
#ifdef CONFIG_SYSRQ
/*
* The stack must be cleared if you want SYSRQ-T to
* give sensible stack usage information
*/
if (tsk) {
char *p = (char *)tsk;
if (thread) {
char *p = (char *)thread;
memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE);
}
#endif
return tsk;
return thread;
}
void __free_task_struct(struct task_struct *p)
void free_thread_info(struct thread_info *thread)
{
if (EXTRA_TASK_STRUCT && nr_task_struct < EXTRA_TASK_STRUCT) {
p->next_task = task_struct_head;
task_struct_head = p;
nr_task_struct += 1;
if (EXTRA_TASK_STRUCT && nr_thread_info < EXTRA_TASK_STRUCT) {
unsigned long *p = (unsigned long *)thread;
p[0] = (unsigned long)thread_info_head;
thread_info_head = p;
nr_thread_info += 1;
} else
ll_free_task_struct(p);
ll_free_task_struct(thread);
}
/*
......@@ -284,10 +294,13 @@ void exit_thread(void)
void flush_thread(void)
{
memset(&current->thread.debug, 0, sizeof(struct debug_info));
memset(&current->thread.fpstate, 0, sizeof(union fp_state));
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = current;
memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
memset(&thread->fpstate, 0, sizeof(union fp_state));
current->used_math = 0;
current->flags &= ~PF_USEDFPU;
}
void release_thread(struct task_struct *dead_task)
......@@ -300,21 +313,19 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
unsigned long unused,
struct task_struct * p, struct pt_regs * regs)
{
struct pt_regs * childregs;
struct context_save_struct * save;
struct pt_regs *childregs;
struct cpu_context_save *save;
atomic_set(&p->thread.refcount, 1);
childregs = ((struct pt_regs *)((unsigned long)p + 8192)) - 1;
childregs = ((struct pt_regs *)((unsigned long)p->thread_info + THREAD_SIZE)) - 1;
*childregs = *regs;
childregs->ARM_r0 = 0;
childregs->ARM_sp = esp;
save = ((struct context_save_struct *)(childregs)) - 1;
save = ((struct cpu_context_save *)(childregs)) - 1;
*save = INIT_CSS;
save->pc |= (unsigned long)ret_from_fork;
p->thread.save = save;
p->thread_info->cpu_context = save;
return 0;
}
......@@ -324,10 +335,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
*/
int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
{
if (current->used_math)
memcpy(fp, &current->thread.fpstate.soft, sizeof (*fp));
struct thread_info *thread = current_thread_info();
int used_math = current->used_math;
if (used_math)
memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
return current->used_math;
return used_math;
}
/*
......@@ -405,7 +419,7 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
stack_page = 4096 + (unsigned long)p;
fp = get_css_fp(&p->thread);
fp = thread_saved_fp(p);
do {
if (fp < stack_page || fp > 4092+stack_page)
return 0;
......
......@@ -73,7 +73,7 @@ put_stack_long(struct task_struct *task, int offset, long data)
newregs = *regs;
newregs.uregs[offset] = data;
if (valid_user_regs(&newregs)) {
regs->uregs[offset] = data;
ret = 0;
......@@ -456,9 +456,9 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
if ((unsigned long) data > _NSIG)
break;
if (request == PTRACE_SYSCALL)
child->ptrace |= PT_TRACESYS;
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
else
child->ptrace &= ~PT_TRACESYS;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
/* make sure single-step breakpoint is gone. */
__ptrace_cancel_bpt(child);
......@@ -491,7 +491,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
if ((unsigned long) data > _NSIG)
break;
child->thread.debug.nsaved = -1;
child->ptrace &= ~PT_TRACESYS;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
/* give it a chance to run. */
wake_up_process(child);
......@@ -548,7 +548,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
break;
/* we should check child->used_math here */
ret = __copy_to_user((void *)data, &child->thread.fpstate,
ret = __copy_to_user((void *)data, &child->thread_info->fpstate,
sizeof(struct user_fp)) ? -EFAULT : 0;
break;
......@@ -561,7 +561,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
break;
child->used_math = 1;
ret = __copy_from_user(&child->thread.fpstate, (void *)data,
ret = __copy_from_user(&child->thread_info->fpstate, (void *)data,
sizeof(struct user_fp)) ? -EFAULT : 0;
break;
......@@ -617,7 +617,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
ret = do_ptrace(request, child, addr, data);
out_tsk:
free_task_struct(child);
put_task_struct(child);
out:
unlock_kernel();
return ret;
......@@ -627,8 +627,9 @@ asmlinkage void syscall_trace(int why, struct pt_regs *regs)
{
unsigned long ip;
if ((current->ptrace & (PT_PTRACED|PT_TRACESYS))
!= (PT_PTRACED|PT_TRACESYS))
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
/*
......
......@@ -38,6 +38,10 @@
#define CONFIG_CMDLINE ""
#endif
#ifdef CONFIG_PREEMPT
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
#endif
#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
char fpe_type[8];
......
......@@ -480,6 +480,7 @@ static void
handle_signal(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
{
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = current;
int usig = sig;
int ret;
......@@ -487,8 +488,8 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
/*
* translate the signal
*/
if (usig < 32 && tsk->exec_domain && tsk->exec_domain->signal_invmap)
usig = tsk->exec_domain->signal_invmap[usig];
if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
usig = thread->exec_domain->signal_invmap[usig];
/*
* Set up the stack frame
......@@ -532,7 +533,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
{
struct k_sigaction *ka;
siginfo_t info;
......@@ -678,3 +679,10 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
ptrace_set_bpt(current);
return 0;
}
asmlinkage void
do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
if (thread_flags & _TIF_SIGPENDING)
do_signal(NULL, regs, syscall);
}
......@@ -115,7 +115,7 @@ static void dump_instr(struct pt_regs *regs)
static void dump_stack(struct task_struct *tsk, unsigned long sp)
{
dump_mem("Stack: ", sp - 16, 8192+(unsigned long)tsk);
dump_mem("Stack: ", sp - 16, 8192+(unsigned long)tsk->thread_info);
}
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
......@@ -146,7 +146,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
void show_trace_task(struct task_struct *tsk)
{
if (tsk != current) {
unsigned int fp = tsk->thread.save->fp;
unsigned int fp = thread_saved_fp(tsk);
c_backtrace(fp, 0x10);
}
}
......@@ -304,16 +304,17 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode)
static int bad_syscall(int n, struct pt_regs *regs)
{
struct thread_info *thread = current_thread_info();
siginfo_t info;
/* You might think just testing `handler' would be enough, but PER_LINUX
* points it to no_lcall7 to catch undercover SVr4 binaries. Gutted.
*/
if (current->personality != PER_LINUX && current->exec_domain->handler) {
if (current->personality != PER_LINUX && thread->exec_domain->handler) {
/* Hand it off to iBCS. The extra parameter and consequent type
* forcing is necessary because of the weird ARM calling convention.
*/
current->exec_domain->handler(n, regs);
thread->exec_domain->handler(n, regs);
return regs->ARM_r0;
}
......
......@@ -27,12 +27,13 @@
* Note also that it is intended that __get_user_bad is not global.
*/
#include <asm/constants.h>
#include <asm/thread_info.h>
.global __get_user_1
__get_user_1:
bic r1, sp, #0x1f00
bic r1, r1, #0x00ff
ldr r1, [r1, #TSK_ADDR_LIMIT]
ldr r1, [r1, #TI_ADDR_LIMIT]
sub r1, r1, #1
cmp r0, r1
1: ldrlsbt r1, [r0]
......@@ -44,7 +45,7 @@ __get_user_1:
__get_user_2:
bic r2, sp, #0x1f00
bic r2, r2, #0x00ff
ldr r2, [r2, #TSK_ADDR_LIMIT]
ldr r2, [r2, #TI_ADDR_LIMIT]
sub r2, r2, #2
cmp r0, r2
2: ldrlsbt r1, [r0], #1
......@@ -62,7 +63,7 @@ __get_user_2:
__get_user_4:
bic r1, sp, #0x1f00
bic r1, r1, #0x00ff
ldr r1, [r1, #TSK_ADDR_LIMIT]
ldr r1, [r1, #TI_ADDR_LIMIT]
sub r1, r1, #4
cmp r0, r1
4: ldrlst r1, [r0]
......@@ -74,7 +75,7 @@ __get_user_4:
__get_user_8:
bic r2, sp, #0x1f00
bic r2, r2, #0x00ff
ldr r2, [r2, #TSK_ADDR_LIMIT]
ldr r2, [r2, #TI_ADDR_LIMIT]
sub r2, r2, #8
cmp r0, r2
5: ldrlst r1, [r0], #4
......
......@@ -27,12 +27,13 @@
* Note also that it is intended that __put_user_bad is not global.
*/
#include <asm/constants.h>
#include <asm/thread_info.h>
.global __put_user_1
__put_user_1:
bic r2, sp, #0x1f00
bic r2, r2, #0x00ff
ldr r2, [r2, #TSK_ADDR_LIMIT]
ldr r2, [r2, #TI_ADDR_LIMIT]
sub r2, r2, #1
cmp r0, r2
1: strlsbt r1, [r0]
......@@ -44,7 +45,7 @@ __put_user_1:
__put_user_2:
bic r2, sp, #0x1f00
bic r2, r2, #0x00ff
ldr r2, [r2, #TSK_ADDR_LIMIT]
ldr r2, [r2, #TI_ADDR_LIMIT]
sub r2, r2, #2
cmp r0, r2
movls r2, r1, lsr #8
......@@ -63,7 +64,7 @@ __put_user_2:
__put_user_4:
bic r2, sp, #0x1f00
bic r2, r2, #0x00ff
ldr r2, [r2, #TSK_ADDR_LIMIT]
ldr r2, [r2, #TI_ADDR_LIMIT]
sub r2, r2, #4
cmp r0, r2
4: strlst r1, [r0]
......@@ -75,7 +76,7 @@ __put_user_4:
__put_user_8:
bic ip, sp, #0x1f00
bic ip, ip, #0x00ff
ldr ip, [ip, #TSK_ADDR_LIMIT]
ldr ip, [ip, #TI_ADDR_LIMIT]
sub ip, ip, #8
cmp r0, ip
5: strlst r1, [r0], #4
......
......@@ -32,23 +32,21 @@
#endif
#define OFF_TSK(n) (unsigned long)&(((struct task_struct *)0)->n)
#define OFF_VMA(n) (unsigned long)&(((struct vm_area_struct *)0)->n)
#define DEFN(name,off) asm("\n#define "name" %0" :: "I" (off))
void func(void)
{
#error DEFN("TSK_SIGPENDING", OFF_TSK(sigpending));
DEFN("TSK_ADDR_LIMIT", OFF_TSK(addr_limit));
#error DEFN("TSK_NEED_RESCHED", OFF_TSK(need_resched));
#error DEFN("TSK_PTRACE", OFF_TSK(ptrace));
DEFN("TSK_USED_MATH", OFF_TSK(used_math));
DEFN("TSK_ACTIVE_MM", OFF_TSK(active_mm));
DEFN("TSS_SAVE", OFF_TSK(thread.save));
DEFN("TSS_FPESAVE", OFF_TSK(thread.fpstate.soft.save));
DEFN("VMA_VM_MM", OFF_VMA(vm_mm));
DEFN("VMA_VM_FLAGS", OFF_VMA(vm_flags));
#ifdef CONFIG_CPU_32
DEFN("TSS_DOMAIN", OFF_TSK(thread.domain));
DEFN("VM_EXEC", VM_EXEC);
#ifdef CONFIG_CPU_32
DEFN("HPTE_TYPE_SMALL", PTE_TYPE_SMALL);
DEFN("HPTE_AP_READ", PTE_AP_READ);
DEFN("HPTE_AP_WRITE", PTE_AP_WRITE);
......
......@@ -301,6 +301,23 @@ static inline unsigned long ffz(unsigned long word)
return k;
}
/*
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
static inline unsigned long __ffs(unsigned long word)
{
int k;
k = 31;
if (word & 0x0000ffff) { k -= 16; word <<= 16; }
if (word & 0x00ff0000) { k -= 8; word <<= 8; }
if (word & 0x0f000000) { k -= 4; word <<= 4; }
if (word & 0x30000000) { k -= 2; word <<= 2; }
if (word & 0x40000000) { k -= 1; }
return k;
}
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
......
#ifndef _ASMARM_CURRENT_H
#define _ASMARM_CURRENT_H
#include <asm/thread_info.h>
static inline struct task_struct *get_current(void) __attribute__ (( __const__ ));
static inline struct task_struct *get_current(void)
{
register unsigned long sp asm ("sp");
return (struct task_struct *)(sp & ~0x1fff);
return current_thread_info()->task;
}
#define current (get_current())
......
/*
* linux/include/asm-arm/fpstate.h
*
* Copyright (C) 1995 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_FPSTATE_H
#define __ASM_ARM_FPSTATE_H
#define FP_SIZE 35
struct fp_hard_struct {
unsigned int save[FP_SIZE]; /* as yet undefined */
};
struct fp_soft_struct {
unsigned int save[FP_SIZE]; /* undefined information */
};
union fp_state {
struct fp_hard_struct hard;
struct fp_soft_struct soft;
};
#endif
......@@ -34,6 +34,7 @@ typedef struct {
#define irq_exit(cpu,irq) (local_irq_count(cpu)--)
#define synchronize_irq() do { } while (0)
#define release_irqlock(cpu) do { } while (0)
#else
#error SMP not supported
......
......@@ -42,14 +42,34 @@ static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, unsigned int cpu)
{
if (prev != next) {
if (prev != next)
cpu_switch_mm(next->pgd, tsk);
clear_bit(cpu, &prev->cpu_vm_mask);
}
set_bit(cpu, &next->cpu_vm_mask);
}
#define activate_mm(prev, next) \
switch_mm((prev),(next),NULL,smp_processor_id())
/*
* Find first bit set in a 168-bit bitmap, where the first
* 128 bits are unlikely to be set.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
#error update this function
#endif
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (unlikely(b[3]))
return __ffs(b[3]) + 96;
if (b[4])
return __ffs(b[4]) + MAX_RT_PRIO;
return __ffs(b[5]) + 32 + MAX_RT_PRIO;
}
#endif
......@@ -57,40 +57,48 @@ static inline pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
preempt_disable();
if ((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)__pgd_next(ret);
ret[1] = ret[2];
clean_dcache_entry(ret + 1);
pgtable_cache_size--;
}
preempt_enable();
return (pgd_t *)ret;
}
static inline void free_pgd_fast(pgd_t *pgd)
{
preempt_disable();
__pgd_next(pgd) = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
preempt_enable();
}
static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
unsigned long *ret;
preempt_disable();
if((ret = pte_quicklist) != NULL) {
pte_quicklist = (unsigned long *)__pte_next(ret);
ret[0] = 0;
clean_dcache_entry(ret);
pgtable_cache_size--;
}
preempt_enable();
return (pte_t *)ret;
}
static inline void free_pte_fast(pte_t *pte)
{
preempt_disable();
__pte_next(pte) = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
preempt_enable();
}
#else /* CONFIG_NO_PGT_CACHE */
......
......@@ -23,7 +23,7 @@
#define KERNEL_STACK_SIZE 4096
struct context_save_struct {
struct cpu_context_save {
unsigned long r4;
unsigned long r5;
unsigned long r6;
......@@ -35,7 +35,7 @@ struct context_save_struct {
unsigned long pc;
};
#define INIT_CSS (struct context_save_struct){ 0, 0, 0, 0, 0, 0, 0, 0, SVC26_MODE }
#define INIT_CSS (struct cpu_context_save){ 0, 0, 0, 0, 0, 0, 0, 0, SVC26_MODE }
typedef struct {
void (*put_byte)(void); /* Special calling convention */
......@@ -74,14 +74,4 @@ extern uaccess_t uaccess_user, uaccess_kernel;
#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1020])
/* Allocation and freeing of basic task resources. */
/*
* NOTE! The task struct and the stack go together
*/
extern unsigned long get_page_8k(int priority);
extern void free_page_8k(unsigned long page);
#define ll_alloc_task_struct() ((struct task_struct *)get_page_8k(GFP_KERNEL))
#define ll_free_task_struct(p) free_page_8k((unsigned long)(p))
#endif
......@@ -22,7 +22,7 @@
#define KERNEL_STACK_SIZE PAGE_SIZE
struct context_save_struct {
struct cpu_context_save {
unsigned long cpsr;
unsigned long r4;
unsigned long r5;
......@@ -35,15 +35,12 @@ struct context_save_struct {
unsigned long pc;
};
#define INIT_CSS (struct context_save_struct){ SVC_MODE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
#define INIT_CSS (struct cpu_context_save){ SVC_MODE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
#define EXTRA_THREAD_STRUCT \
unsigned int domain;
#define EXTRA_THREAD_STRUCT_INIT \
domain: domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT)
#define INIT_EXTRA_THREAD_INFO \
cpu_domain: domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT)
#define start_thread(regs,pc,sp) \
({ \
......@@ -64,11 +61,4 @@ struct context_save_struct {
#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1021])
#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1019])
/* Allocation and freeing of basic task resources. */
/*
* NOTE! The task struct and the stack go together
*/
#define ll_alloc_task_struct() ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define ll_free_task_struct(p) free_pages((unsigned long)(p),1)
#endif
......@@ -16,8 +16,7 @@
static inline void set_fs (mm_segment_t fs)
{
current->addr_limit = fs;
current_thread_info()->addr_limit = fs;
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
}
......@@ -26,7 +25,7 @@ static inline void set_fs (mm_segment_t fs)
unsigned long flag, sum; \
__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
: "=&r" (flag), "=&r" (sum) \
: "r" (addr), "Ir" (size), "0" (current->addr_limit) \
: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
: "cc"); \
flag; })
......@@ -34,7 +33,7 @@ static inline void set_fs (mm_segment_t fs)
unsigned long flag; \
__asm__("cmp %2, %0; movlo %0, #0" \
: "=&r" (flag) \
: "0" (current->addr_limit), "r" (addr) \
: "0" (current_thread_info()->addr_limit), "r" (addr) \
: "cc"); \
(flag == 0); })
......
......@@ -17,23 +17,6 @@
*/
#define current_text_addr() ({ __label__ _l; _l: &&_l;})
#define FP_SIZE 35
struct fp_hard_struct {
unsigned int save[FP_SIZE]; /* as yet undefined */
};
struct fp_soft_struct {
unsigned int save[FP_SIZE]; /* undefined information */
};
union fp_state {
struct fp_hard_struct hard;
struct fp_soft_struct soft;
};
typedef unsigned long mm_segment_t; /* domain register */
#ifdef __KERNEL__
#define EISA_bus 0
......@@ -54,37 +37,15 @@ struct debug_info {
};
struct thread_struct {
atomic_t refcount;
/* fault info */
unsigned long address;
unsigned long trap_no;
unsigned long error_code;
/* floating point */
union fp_state fpstate;
/* debugging */
struct debug_info debug;
/* context info */
struct context_save_struct *save;
EXTRA_THREAD_STRUCT
};
#define INIT_THREAD { \
refcount: ATOMIC_INIT(1), \
EXTRA_THREAD_STRUCT_INIT \
}
/*
* Return saved PC of a blocked thread.
*/
static inline unsigned long thread_saved_pc(struct thread_struct *t)
{
return t->save ? pc_pointer(t->save->pc) : 0;
}
static inline unsigned long get_css_fp(struct thread_struct *t)
{
return t->save ? t->save->fp : 0;
}
#define INIT_THREAD { }
/* Forward declaration, a strange C thing */
struct task_struct;
......@@ -98,21 +59,7 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
#define THREAD_SIZE (8192)
extern struct task_struct *alloc_task_struct(void);
extern void __free_task_struct(struct task_struct *);
#define get_task_struct(p) atomic_inc(&(p)->thread.refcount)
#define free_task_struct(p) \
do { \
if (atomic_dec_and_test(&(p)->thread.refcount)) \
__free_task_struct((p)); \
} while (0)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
#define cpu_relax() do { } while (0)
#define cpu_relax() do { } while (0)
/*
* Create a new kernel thread
......
......@@ -3,31 +3,34 @@
*
* Default SMP lock implementation
*/
#include <linux/config.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#ifdef CONFIG_PREEMPT
#define kernel_locked() preempt_get_count()
#else
#define kernel_locked() spin_is_locked(&kernel_flag)
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
#define release_kernel_lock(task, cpu) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
......@@ -40,8 +43,14 @@ do { \
*/
static inline void lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
spin_lock(&kernel_flag);
++current->lock_depth;
#else
if (!++current->lock_depth)
spin_lock(&kernel_flag);
#endif
}
static inline void unlock_kernel(void)
......
......@@ -5,20 +5,22 @@
#include <asm/hardirq.h>
#define __cpu_bh_enable(cpu) \
do { barrier(); local_bh_count(cpu)--; } while (0)
do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
#define cpu_bh_disable(cpu) \
do { local_bh_count(cpu)++; barrier(); } while (0)
do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
#define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#define local_bh_enable() \
#define _local_bh_enable() \
do { \
unsigned int *ptr = &local_bh_count(smp_processor_id()); \
if (!--*ptr && ptr[-2]) \
__asm__("bl%? __do_softirq": : : "lr");/* out of line */\
} while (0)
#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
#endif /* __ASM_SOFTIRQ_H */
......@@ -6,6 +6,8 @@
#include <linux/config.h>
#include <linux/kernel.h>
struct thread_info;
/* information about the system we're running on */
extern unsigned int system_rev;
extern unsigned int system_serial_low;
......@@ -48,12 +50,13 @@ extern asmlinkage void __backtrace(void);
* `prev' will never be the same as `next'.
* The `mb' is to tell GCC not to cache `current' across this call.
*/
extern struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next);
struct thread_info;
extern struct task_struct *__switch_to(struct thread_info *, struct thread_info *);
#define switch_to(prev,next,last) \
do { \
last = __switch_to(prev,next); \
mb(); \
#define switch_to(prev,next,last) \
do { \
last = __switch_to(prev->thread_info,next->thread_info); \
mb(); \
} while (0)
/* For spinlocks etc */
......
/*
* linux/include/asm-arm/thread_info.h
*
* Copyright (C) 2002 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_THREAD_INFO_H
#define __ASM_ARM_THREAD_INFO_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
struct task_struct;
struct exec_domain;
#include <asm/fpstate.h>
#include <asm/proc/processor.h>
#include <asm/ptrace.h>
#include <asm/types.h>
typedef unsigned long mm_segment_t; /* domain register */
/*
* low level task data that entry.S needs immediate access to.
*/
struct thread_info {
__u32 flags; /* low level flags */
__s32 preempt_count; /* 0 => preemptable, <0 => bug */
mm_segment_t addr_limit; /* address limit */
__u32 cpu; /* cpu */
struct cpu_context_save *cpu_context; /* cpu context */
__u32 cpu_domain; /* cpu domain */
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
union fp_state fpstate;
};
#define INIT_THREAD_INFO(tsk) \
{ \
task: &tsk, \
exec_domain: &default_exec_domain, \
flags: 0, \
preempt_count: 0, \
addr_limit: KERNEL_DS, \
INIT_EXTRA_THREAD_INFO, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/*
* how to get the thread information struct from C
*/
static inline struct thread_info *current_thread_info(void) __attribute__ (( __const__ ));
static inline struct thread_info *current_thread_info(void)
{
register unsigned long sp asm ("sp");
return (struct thread_info *)(sp & ~0x1fff);
}
#define THREAD_SIZE (8192)
extern struct thread_info *alloc_thread_info(void);
extern void free_thread_info(struct thread_info *);
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
static inline unsigned long __thread_saved_pc(struct thread_info *thread)
{
struct cpu_context_save *context = thread->cpu_context;
return context ? pc_pointer(context->pc) : 0;
}
static inline unsigned long __thread_saved_fp(struct thread_info *thread)
{
struct cpu_context_save *context = thread->cpu_context;
return context ? context->fp : 0;
}
#define thread_saved_pc(tsk) __thread_saved_pc((tsk)->thread_info)
#define thread_saved_fp(tsk) __thread_saved_fp((tsk)->thread_info)
#else /* !__ASSEMBLY__ */
#define TI_FLAGS 0
#define TI_PREEMPT 4
#define TI_ADDR_LIMIT 8
#define TI_CPU 12
#define TI_CPU_SAVE 16
#define TI_CPU_DOMAIN 20
#define TI_TASK 24
#define TI_EXEC_DOMAIN 28
#define TI_FPSTATE 32
#endif
/*
* thread information flags:
* TIF_SYSCALL_TRACE - syscall trace active
* TIF_NOTIFY_RESUME - resumption notification requested
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
* TIF_USEDFPU - FPU was used by this task this quantum (SMP)
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
*/
#define TIF_NOTIFY_RESUME 0
#define TIF_SIGPENDING 1
#define TIF_NEED_RESCHED 2
#define TIF_SYSCALL_TRACE 8
#define TIF_USED_FPU 16
#define TIF_POLLING_NRFLAG 17
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_USED_FPU (1 << TIF_USED_FPU)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
/*
* Change these and you break ASM code in entry-common.S
*/
#define _TIF_WORK_MASK 0x000000ff
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
......@@ -32,7 +32,7 @@ struct exception_table_entry
extern unsigned long search_exception_table(unsigned long);
#define get_ds() (KERNEL_DS)
#define get_fs() (current->addr_limit)
#define get_fs() (current_thread_info()->addr_limit)
#define segment_eq(a,b) ((a) == (b))
#include <asm/proc/uaccess.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment