Commit 5a983302 authored by Paul Mackerras's avatar Paul Mackerras

Merge samba.org:/home/paulus/kernel/linux-2.5

into samba.org:/home/paulus/kernel/for-linus-ppc
parents 3627be6d e58d8e30
...@@ -173,9 +173,10 @@ ENTRY(lcall27) ...@@ -173,9 +173,10 @@ ENTRY(lcall27)
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
# NOTE: this function takes a parameter but it's unused on x86. pushl %eax
call schedule_tail call schedule_tail
GET_THREAD_INFO(%ebp) GET_THREAD_INFO(%ebp)
popl %eax
jmp syscall_exit jmp syscall_exit
/* /*
......
...@@ -423,8 +423,12 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) ...@@ -423,8 +423,12 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
* so the performance issues may eventually be a valid point. * so the performance issues may eventually be a valid point.
* More important, however, is the fact that this allows us much * More important, however, is the fact that this allows us much
* more flexibility. * more flexibility.
*
* The return value (in %eax) will be the "prev" task after
* the task-switch, and shows up in ret_from_fork in entry.S,
* for example.
*/ */
void __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{ {
struct thread_struct *prev = &prev_p->thread, struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread; *next = &next_p->thread;
...@@ -495,6 +499,7 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -495,6 +499,7 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/ */
tss->bitmap = INVALID_IO_BITMAP_OFFSET; tss->bitmap = INVALID_IO_BITMAP_OFFSET;
} }
return prev_p;
} }
asmlinkage int sys_fork(struct pt_regs regs) asmlinkage int sys_fork(struct pt_regs regs)
......
...@@ -9,26 +9,24 @@ ...@@ -9,26 +9,24 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
struct task_struct; /* one of the stranger aspects of C forward declarations.. */ struct task_struct; /* one of the stranger aspects of C forward declarations.. */
extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
#define switch_to(prev,next,last) do { \ #define switch_to(prev,next,last) do { \
unsigned long esi,edi; \
asm volatile("pushfl\n\t" \ asm volatile("pushfl\n\t" \
"pushl %%esi\n\t" \
"pushl %%edi\n\t" \
"pushl %%ebp\n\t" \ "pushl %%ebp\n\t" \
"movl %%esp,%0\n\t" /* save ESP */ \ "movl %%esp,%0\n\t" /* save ESP */ \
"movl %2,%%esp\n\t" /* restore ESP */ \ "movl %5,%%esp\n\t" /* restore ESP */ \
"movl $1f,%1\n\t" /* save EIP */ \ "movl $1f,%1\n\t" /* save EIP */ \
"pushl %3\n\t" /* restore EIP */ \ "pushl %6\n\t" /* restore EIP */ \
"jmp __switch_to\n" \ "jmp __switch_to\n" \
"1:\t" \ "1:\t" \
"popl %%ebp\n\t" \ "popl %%ebp\n\t" \
"popl %%edi\n\t" \ "popfl" \
"popl %%esi\n\t" \ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
"popfl\n\t" \ "=a" (last),"=S" (esi),"=D" (edi) \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip) \
:"m" (next->thread.esp),"m" (next->thread.eip), \ :"m" (next->thread.esp),"m" (next->thread.eip), \
"a" (prev), "d" (next)); \ "2" (prev), "d" (next)); \
} while (0) } while (0)
#define _set_base(addr,base) do { unsigned long __pr; \ #define _set_base(addr,base) do { unsigned long __pr; \
......
...@@ -62,6 +62,7 @@ ...@@ -62,6 +62,7 @@
{ \ { \
.state = 0, \ .state = 0, \
.thread_info = &init_thread_info, \ .thread_info = &init_thread_info, \
.usage = ATOMIC_INIT(2), \
.flags = 0, \ .flags = 0, \
.lock_depth = -1, \ .lock_depth = -1, \
.prio = MAX_PRIO-20, \ .prio = MAX_PRIO-20, \
......
...@@ -74,6 +74,9 @@ int nr_processes(void) ...@@ -74,6 +74,9 @@ int nr_processes(void)
void __put_task_struct(struct task_struct *tsk) void __put_task_struct(struct task_struct *tsk)
{ {
WARN_ON(!(tsk->state & (TASK_DEAD | TASK_ZOMBIE)));
WARN_ON(atomic_read(&tsk->usage));
if (tsk != current) { if (tsk != current) {
free_thread_info(tsk->thread_info); free_thread_info(tsk->thread_info);
kmem_cache_free(task_struct_cachep,tsk); kmem_cache_free(task_struct_cachep,tsk);
...@@ -217,7 +220,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) ...@@ -217,7 +220,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
*tsk = *orig; *tsk = *orig;
tsk->thread_info = ti; tsk->thread_info = ti;
ti->task = tsk; ti->task = tsk;
atomic_set(&tsk->usage,1);
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
return tsk; return tsk;
} }
......
...@@ -581,6 +581,8 @@ static inline void finish_task_switch(task_t *prev) ...@@ -581,6 +581,8 @@ static inline void finish_task_switch(task_t *prev)
finish_arch_switch(rq, prev); finish_arch_switch(rq, prev);
if (mm) if (mm)
mmdrop(mm); mmdrop(mm);
if (prev->state & (TASK_DEAD | TASK_ZOMBIE))
put_task_struct(prev);
} }
/** /**
...@@ -1185,7 +1187,7 @@ asmlinkage void schedule(void) ...@@ -1185,7 +1187,7 @@ asmlinkage void schedule(void)
* schedule() atomically, we ignore that path for now. * schedule() atomically, we ignore that path for now.
* Otherwise, whine if we are scheduling when we should not be. * Otherwise, whine if we are scheduling when we should not be.
*/ */
if (likely(current->state != TASK_ZOMBIE)) { if (likely(!(current->state & (TASK_DEAD | TASK_ZOMBIE)))) {
if (unlikely(in_atomic())) { if (unlikely(in_atomic())) {
printk(KERN_ERR "bad: scheduling while atomic!\n"); printk(KERN_ERR "bad: scheduling while atomic!\n");
dump_stack(); dump_stack();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment