Commit 0ff21ea7 authored by Geert Uytterhoeven's avatar Geert Uytterhoeven Committed by Linus Torvalds

[PATCH] M68k update (part 42)

M68k task and thread updates
parent b066fff8
......@@ -381,7 +381,7 @@ fpsp_done:
.Lnotkern:
SAVE_ALL_INT
GET_CURRENT(%d0)
tstl %curptr@(TASK_NEEDRESCHED)
tstb %curptr@(TASK_NEEDRESCHED)
jne ret_from_exception | deliver signals,
| reschedule etc..
RESTORE_ALL
......
......@@ -75,7 +75,7 @@ _060_isp_done:
.Lnotkern:
SAVE_ALL_INT
GET_CURRENT(%d0)
tstl %curptr@(TASK_NEEDRESCHED)
tstb %curptr@(TASK_NEEDRESCHED)
jne ret_from_exception | deliver signals,
| reschedule etc..
RESTORE_ALL
......
......@@ -68,21 +68,11 @@ ENTRY(trap)
addql #4,%sp
jra ret_from_exception
ENTRY(reschedule)
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
pea ret_from_exception
jmp schedule
| After a fork we jump here directly from resume,
| so that %d1 contains the previous task
| Theoretically only needed on SMP, but let's watch
| what happens in schedule_tail() in future...
| schedule_tail is only used with CONFIG_SMP
ENTRY(ret_from_fork)
movel %d1,%sp@-
#ifdef CONFIG_SMP
jsr schedule_tail
addql #4,%sp
#endif
jra ret_from_exception
badsys:
......@@ -128,38 +118,54 @@ ENTRY(system_call)
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
btst #PT_TRACESYS_BIT,%curptr@(TASK_PTRACE+PT_TRACESYS_OFF)
tstb %curptr@(TASK_SYSCALL_TRACE)
jne do_trace
cmpl #NR_syscalls,%d0
jcc badsys
jbsr @(sys_call_table,%d0:l:4)@(0)
movel %d0,%sp@(PT_D0) | save the return value
|oriw #0x0700,%sr
movel %curptr@(TASK_WORK),%d0
jne syscall_exit_work
1: RESTORE_ALL
syscall_exit_work:
btst #5,%sp@(PT_SR) | check if returning to kernel
bnes 1b | if so, skip resched, signals
tstw %d0
jeq do_signal_return
tstb %d0
jne do_delayed_trace
pea resume_userspace
jmp schedule
ret_from_exception:
btst #5,%sp@(PT_SR) | check if returning to kernel
bnes 2f | if so, skip resched, signals
bnes 1f | if so, skip resched, signals
| only allow interrupts when we are really the last one on the
| kernel stack, otherwise stack overflow can occur during
| heavy interrupt load
andw #ALLOWINT,%sr
tstl %curptr@(TASK_NEEDRESCHED)
jne reschedule
#if 0
cmpl #task,%curptr | task[0] cannot have signals
jeq 2f
#endif
| check for delayed trace
bclr #PT_DTRACE_BIT,%curptr@(TASK_PTRACE+PT_DTRACE_OFF)
jne do_delayed_trace
5:
tstl %curptr@(TASK_STATE) | state
jne reschedule
tstl %curptr@(TASK_SIGPENDING)
jne Lsignal_return
2: RESTORE_ALL
resume_userspace:
movel %curptr@(TASK_WORK),%d0
lsrl #8,%d0
jne exit_work
1: RESTORE_ALL
exit_work:
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
tstb %d0
jeq do_signal_return
pea resume_userspace
jmp schedule
Lsignal_return:
do_signal_return:
|andw #ALLOWINT,%sr
subql #4,%sp | dummy return address
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
......@@ -168,7 +174,7 @@ Lsignal_return:
addql #8,%sp
RESTORE_SWITCH_STACK
addql #4,%sp
RESTORE_ALL
jbra resume_userspace
do_delayed_trace:
bclr #7,%sp@(PT_SR) | clear trace bit in SR
......@@ -178,13 +184,13 @@ do_delayed_trace:
jbsr send_sig
addql #8,%sp
addql #4,%sp
jra 5b
jbra resume_userspace
#if 0
#if CONFIG_AMIGA
ami_inthandler:
addql #1,irq_stat+8 | local_irq_count
addql #1,irq_stat+CPUSTAT_LOCAL_IRQ_COUNT
SAVE_ALL_INT
GET_CURRENT(%d0)
......@@ -216,7 +222,7 @@ ENTRY(nmi_handler)
inthandler:
SAVE_ALL_INT
GET_CURRENT(%d0)
addql #1,irq_stat+8 | local_irq_count
addql #1,irq_stat+CPUSTAT_LOCAL_IRQ_COUNT
| put exception # in d0
bfextu %sp@(PT_VECTOR){#4,#10},%d0
......@@ -231,11 +237,11 @@ inthandler:
jbra 3f
1:
#endif
jbsr process_int| process the IRQ
jbsr process_int | process the IRQ
3: addql #8,%sp | pop parameters off stack
ret_from_interrupt:
subql #1,irq_stat+8 | local_irq_count
subql #1,irq_stat+CPUSTAT_LOCAL_IRQ_COUNT
jeq 1f
2:
RESTORE_ALL
......@@ -248,11 +254,8 @@ ret_from_interrupt:
jhi 2b
#endif
/* check if we need to do software interrupts */
movel irq_stat,%d0 | softirq_active
andl irq_stat+4,%d0 | softirq_mask
tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
jeq ret_from_exception
pea ret_from_exception
jra do_softirq
......
......@@ -1543,8 +1543,8 @@ L(cache_done):
/*
* Setup initial stack pointer
*/
lea init_task_union,%curptr
lea 0x2000(%curptr),%sp
lea init_task,%curptr
lea init_thread_union+THREAD_SIZE,%sp
putc 'K'
......
......@@ -25,8 +25,11 @@ int main(void)
DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
#error DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, work.sigpending));
#error DEFINE(TASK_NEEDRESCHED, offsetof(struct task_struct, work.need_resched));
DEFINE(TASK_WORK, offsetof(struct task_struct, thread.work));
DEFINE(TASK_NEEDRESCHED, offsetof(struct task_struct, thread.work.need_resched));
DEFINE(TASK_SYSCALL_TRACE, offsetof(struct task_struct, thread.work.syscall_trace));
DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, thread.work.sigpending));
DEFINE(TASK_NOTIFY_RESUME, offsetof(struct task_struct, thread.work.notify_resume));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
......@@ -66,6 +69,12 @@ int main(void)
/* offsets into the kernel_stat struct */
DEFINE(STAT_IRQ, offsetof(struct kernel_stat, irqs));
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
DEFINE(CPUSTAT_LOCAL_IRQ_COUNT, offsetof(irq_cpustat_t, __local_irq_count));
DEFINE(CPUSTAT_LOCAL_BH_COUNT, offsetof(irq_cpustat_t, __local_bh_count));
DEFINE(CPUSTAT_SYSCALL_COUNT, offsetof(irq_cpustat_t, __syscall_count));
/* offsets into the bi_record struct */
DEFINE(BIR_TAG, offsetof(struct bi_record, tag));
DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
......
......@@ -24,6 +24,7 @@
#include <linux/user.h>
#include <linux/a.out.h>
#include <linux/reboot.h>
#include <linux/init_task.h>
#include <asm/uaccess.h>
#include <asm/system.h>
......@@ -33,7 +34,7 @@
#include <asm/pgtable.h>
/*
* Initial task structure. Make this a per-architecture thing,
* Initial task/thread structure. Make this a per-architecture thing,
* because different architectures tend to have different
* alignment requirements and potentially different initial
* setup.
......@@ -43,13 +44,33 @@ static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS;
struct mm_struct init_mm = INIT_MM(init_mm);
union task_union init_task_union
__attribute__((section("init_task"), aligned(THREAD_SIZE)))
= { task: INIT_TASK(init_task_union.task) };
union thread_union init_thread_union
__attribute__((section(".data.init_task"), aligned(THREAD_SIZE)))
= { INIT_THREAD_INFO(init_task) };
/* initial task structure */
struct task_struct init_task = INIT_TASK(init_task);
asmlinkage void ret_from_fork(void);
/*
* Return saved PC from a blocked thread
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
/* Check whether the thread is blocked in resume() */
if (sw->retpc > (unsigned long)scheduling_functions_start_here &&
sw->retpc < (unsigned long)scheduling_functions_end_here)
return ((unsigned long *)sw->a6)[1];
else
return sw->retpc;
}
/*
* The idle loop on an m68k..
*/
......@@ -133,23 +154,26 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
register long retval __asm__ ("d0");
register long clone_arg __asm__ ("d1") = flags | CLONE_VM;
retval = __NR_clone;
__asm__ __volatile__
("clrl %%d2\n\t"
"trap #0\n\t" /* Linux/m68k system call */
"tstl %0\n\t" /* child or parent */
"jne 1f\n\t" /* parent - jump */
"lea %%sp@(%c7),%6\n\t" /* reload current */
"movel %6@,%6\n\t"
"movel %3,%%sp@-\n\t" /* push argument */
"jsr %4@\n\t" /* call fn */
"movel %0,%%d1\n\t" /* pass exit value */
"movel %2,%0\n\t" /* exit */
"movel %2,%%d0\n\t" /* exit */
"trap #0\n"
"1:"
: "=d" (retval)
: "0" (__NR_clone), "i" (__NR_exit),
: "+d" (retval)
: "i" (__NR_clone), "i" (__NR_exit),
"r" (arg), "a" (fn), "d" (clone_arg), "r" (current),
"i" (-THREAD_SIZE)
: "d0", "d2");
: "d2");
pid = retval;
}
......@@ -366,7 +390,7 @@ unsigned long get_wchan(struct task_struct *p)
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)p;
stack_page = (unsigned long)(p->thread_info);
fp = ((struct switch_stack *)p->thread.ksp)->a6;
do {
if (fp < stack_page+sizeof(struct task_struct) ||
......
......@@ -98,12 +98,13 @@ void ptrace_disable(struct task_struct *child)
/* make sure the single step bit is not set. */
tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16);
put_reg(child, PT_SR, tmp);
child->thread.work.delayed_trace = 0;
child->thread.work.syscall_trace = 0;
}
asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
{
struct task_struct *child;
unsigned long flags;
int ret;
lock_kernel();
......@@ -243,14 +244,16 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
if (request == PTRACE_SYSCALL)
child->ptrace |= PT_TRACESYS;
else
child->ptrace &= ~PT_TRACESYS;
if (request == PTRACE_SYSCALL) {
child->thread.work.syscall_trace = ~0;
} else {
child->thread.work.syscall_trace = 0;
}
child->exit_code = data;
/* make sure the single step bit is not set. */
tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16);
put_reg(child, PT_SR, tmp);
child->thread.work.delayed_trace = 0;
wake_up_process(child);
ret = 0;
break;
......@@ -271,6 +274,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
/* make sure the single step bit is not set. */
tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16);
put_reg(child, PT_SR, tmp);
child->thread.work.delayed_trace = 0;
wake_up_process(child);
break;
}
......@@ -281,9 +285,10 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
child->ptrace &= ~PT_TRACESYS;
child->thread.work.syscall_trace = 0;
tmp = get_reg(child, PT_SR) | (TRACE_BITS << 16);
put_reg(child, PT_SR, tmp);
child->thread.work.delayed_trace = 1;
child->exit_code = data;
/* give it a chance to run. */
......@@ -354,7 +359,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
break;
}
out_tsk:
free_task_struct(child);
put_task_struct(child);
out:
unlock_kernel();
return ret;
......@@ -362,8 +367,8 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
asmlinkage void syscall_trace(void)
{
if ((current->ptrace & (PT_PTRACED|PT_TRACESYS))
!= (PT_PTRACED|PT_TRACESYS))
if (!current->thread.work.delayed_trace &&
!current->thread.work.syscall_trace)
return;
current->exit_code = SIGTRAP;
current->state = TASK_STOPPED;
......
......@@ -792,10 +792,10 @@ static void setup_frame (int sig, struct k_sigaction *ka,
regs->stkadj = fsize;
}
err |= __put_user((current->exec_domain
&& current->exec_domain->signal_invmap
err |= __put_user((current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current->exec_domain->signal_invmap[sig]
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig),
&frame->sig);
......@@ -870,10 +870,10 @@ static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
regs->stkadj = fsize;
}
err |= __put_user((current->exec_domain
&& current->exec_domain->signal_invmap
err |= __put_user((current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current->exec_domain->signal_invmap[sig]
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig),
&frame->sig);
err |= __put_user(&frame->info, &frame->pinfo);
......
......@@ -111,6 +111,7 @@ PT_DTRACE_BIT = 2
movel %sp,\reg
andw #-THREAD_SIZE,\reg
movel \reg,%curptr
movel %curptr@,%curptr
.endm
#else /* C source */
......
......@@ -59,10 +59,15 @@ extern inline void wrusp(unsigned long usp) {
#define EISA_bus 0
#define MCA_bus 0
/*
* if you change this structure, you must change the code and offsets
* in m68k/machasm.S
*/
struct task_work {
unsigned char sigpending;
unsigned char notify_resume; /* request for notification on
userspace execution resumption */
char need_resched;
unsigned char delayed_trace; /* single step a syscall */
unsigned char syscall_trace; /* count of syscall interceptors */
unsigned char pad[3];
};
struct thread_struct {
unsigned long ksp; /* kernel stack pointer */
......@@ -76,11 +81,13 @@ struct thread_struct {
unsigned long fp[8*3];
unsigned long fpcntl[3]; /* fp control regs */
unsigned char fpstate[FPSTATESIZE]; /* floating point state */
struct task_work work;
};
#define INIT_THREAD { \
sizeof(init_stack) + (unsigned long) init_stack, 0, \
PS_S, __KERNEL_DS, \
ksp: sizeof(init_stack) + (unsigned long) init_stack, \
sr: PS_S, \
fs: __KERNEL_DS, \
}
/*
......@@ -117,21 +124,7 @@ static inline void exit_thread(void)
{
}
/*
* Return saved PC of a blocked thread.
*/
extern inline unsigned long thread_saved_pc(struct thread_struct *t)
{
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
struct switch_stack *sw = (struct switch_stack *)t->ksp;
/* Check whether the thread is blocked in resume() */
if (sw->retpc > (unsigned long)scheduling_functions_start_here &&
sw->retpc < (unsigned long)scheduling_functions_end_here)
return ((unsigned long *)sw->a6)[1];
else
return sw->retpc;
}
extern unsigned long thread_saved_pc(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p);
......@@ -144,17 +137,6 @@ unsigned long get_wchan(struct task_struct *p);
eip; })
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
#define THREAD_SIZE (2*PAGE_SIZE)
/* Allocation and freeing of basic task resources. */
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
#define cpu_relax() do { } while (0)
#endif
......@@ -7,7 +7,10 @@
#include <asm/segment.h>
#include <asm/entry.h>
#define prepare_to_switch() do { } while(0)
#define prepare_arch_schedule(prev) do { } while(0)
#define finish_arch_schedule(prev) do { } while(0)
#define prepare_arch_switch(rq) do { } while(0)
#define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock)
/*
* switch_to(n) should switch tasks to task ptr, first checking that
......@@ -32,19 +35,16 @@
* 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
*
* Changed 96/09/19 by Andreas Schwab
* pass prev in a0, next in a1, offset of tss in d1, and whether
* the mm structures are shared in d2 (to avoid atc flushing).
* pass prev in a0, next in a1
*/
asmlinkage void resume(void);
#define switch_to(prev,next,last) { \
#define switch_to(prev,next,last) do { \
register void *_prev __asm__ ("a0") = (prev); \
register void *_next __asm__ ("a1") = (next); \
register void *_last __asm__ ("d1"); \
__asm__ __volatile__("jbsr resume" \
: "=d" (_last) : "a" (_prev), "a" (_next) \
: "d0", /* "d1", */ "d2", "d3", "d4", "d5", "a0", "a1"); \
(last) = _last; \
}
: : "a" (_prev), "a" (_next) \
: "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \
} while (0)
/* interrupt control.. */
......
#ifndef _ASM_M68K_THREAD_INFO_H
#define _ASM_M68K_THREAD_INFO_H
#include <asm/types.h>
#include <asm/processor.h>
#include <asm/page.h>
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
__s32 preempt_count; /* 0 => preemptable, <0 => BUG */
__u32 cpu; /* should always be 0 on m68k */
__u8 supervisor_stack[0];
};
#define PREEMPT_ACTIVE 0x4000000
#define INIT_THREAD_INFO(tsk) \
{ \
task: &tsk, \
exec_domain: &default_exec_domain, \
}
/* THREAD_SIZE should be 8k, so handle differently for 4k and 8k machines */
#if PAGE_SHIFT == 13 /* 8k machines */
#define alloc_thread_info() ((struct thread_info *)__get_free_pages(GFP_KERNEL,0))
#define free_thread_info(ti) free_pages((unsigned long)(ti),0)
#else /* otherwise assume 4k pages */
#define alloc_thread_info() ((struct thread_info *)__get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long)(ti),1)
#endif /* PAGE_SHIFT == 13 */
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
#define current_thread_info() (current->thread_info)
#define __HAVE_THREAD_FUNCTIONS
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_DELAYED_TRACE 1 /* single step a syscall */
#define TIF_NOTIFY_RESUME 2 /* resumption notification requested */
#define TIF_SIGPENDING 3 /* signal pending */
#define TIF_NEED_RESCHED 4 /* rescheduling necessary */
extern int thread_flag_fixme(void);
/*
* flag set/clear/test wrappers
* - pass TIF_xxxx constants to these functions
*/
#define __set_tsk_thread_flag(tsk, flag, val) ({ \
switch (flag) { \
case TIF_SIGPENDING: \
tsk->thread.work.sigpending = val; \
break; \
case TIF_NEED_RESCHED: \
tsk->thread.work.need_resched = val; \
break; \
default: \
thread_flag_fixme(); \
} \
})
#define __get_tsk_thread_flag(tsk, flag) ({ \
int ___res; \
switch (flag) { \
case TIF_SIGPENDING: \
___res = tsk->thread.work.sigpending; \
break; \
case TIF_NEED_RESCHED: \
___res = tsk->thread.work.need_resched; \
break; \
default: \
___res = thread_flag_fixme(); \
} \
___res; \
})
#define __get_set_tsk_thread_flag(tsk, flag, val) ({ \
int __res = __get_tsk_thread_flag(tsk, flag); \
__set_tsk_thread_flag(tsk, flag, val); \
__res; \
})
#define set_tsk_thread_flag(tsk, flag) __set_tsk_thread_flag(tsk, flag, ~0)
#define clear_tsk_thread_flag(tsk, flag) __set_tsk_thread_flag(tsk, flag, 0)
#define test_and_set_tsk_thread_flag(tsk, flag) __get_set_tsk_thread_flag(tsk, flag, ~0)
#define test_tsk_thread_flag(tsk, flag) __get_tsk_thread_flag(tsk, flag)
#define set_thread_flag(flag) set_tsk_thread_flag(current, flag)
#define clear_thread_flag(flag) clear_tsk_thread_flag(current, flag)
#define test_thread_flag(flag) test_tsk_thread_flag(current, flag)
#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
#endif /* _ASM_M68K_THREAD_INFO_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment