Commit 21d3dc9c authored by William Lee Irwin III's avatar William Lee Irwin III Committed by Linus Torvalds

[PATCH] sched: consolidate init_idle() and fork_by_hand()

It appears that init_idle() and fork_by_hand() could be combined into a
single method that calls init_idle() on behalf of the caller.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 206f4a83
...@@ -411,15 +411,6 @@ secondary_cpu_start(int cpuid, struct task_struct *idle) ...@@ -411,15 +411,6 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
return 0; return 0;
} }
static struct task_struct * __init
fork_by_hand(void)
{
/* Don't care about the contents of regs since we'll never
reschedule the forked task. */
struct pt_regs regs;
return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
}
/* /*
* Bring one cpu online. * Bring one cpu online.
*/ */
...@@ -435,13 +426,10 @@ smp_boot_one_cpu(int cpuid) ...@@ -435,13 +426,10 @@ smp_boot_one_cpu(int cpuid)
the other task-y sort of data structures set up like we the other task-y sort of data structures set up like we
wish. We can't use kernel_thread since we must avoid wish. We can't use kernel_thread since we must avoid
rescheduling the child. */ rescheduling the child. */
idle = fork_by_hand(); idle = fork_idle(cpuid);
if (IS_ERR(idle)) if (IS_ERR(idle))
panic("failed fork for CPU %d", cpuid); panic("failed fork for CPU %d", cpuid);
init_idle(idle, cpuid);
unhash_process(idle);
DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n", DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
cpuid, idle->state, idle->flags)); cpuid, idle->state, idle->flags));
......
...@@ -496,16 +496,6 @@ extern struct { ...@@ -496,16 +496,6 @@ extern struct {
unsigned short ss; unsigned short ss;
} stack_start; } stack_start;
static struct task_struct * __init fork_by_hand(void)
{
struct pt_regs regs;
/*
* don't care about the eip and regs settings since
* we'll never reschedule the forked task.
*/
return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
}
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* which logical CPUs are on which nodes */ /* which logical CPUs are on which nodes */
...@@ -801,18 +791,10 @@ static int __init do_boot_cpu(int apicid) ...@@ -801,18 +791,10 @@ static int __init do_boot_cpu(int apicid)
* We can't use kernel_thread since we must avoid to * We can't use kernel_thread since we must avoid to
* reschedule the child. * reschedule the child.
*/ */
idle = fork_by_hand(); idle = fork_idle(cpu);
if (IS_ERR(idle)) if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu); panic("failed fork for CPU %d", cpu);
/* Make this the idle thread */
init_idle(idle, cpu);
idle->thread.eip = (unsigned long) start_secondary; idle->thread.eip = (unsigned long) start_secondary;
/* Remove it from the pidhash */
unhash_process(idle);
/* start_eip had better be page-aligned! */ /* start_eip had better be page-aligned! */
start_eip = setup_trampoline(); start_eip = setup_trampoline();
......
...@@ -523,15 +523,6 @@ start_secondary(void *unused) ...@@ -523,15 +523,6 @@ start_secondary(void *unused)
return cpu_idle(); return cpu_idle();
} }
static struct task_struct * __init
fork_by_hand(void)
{
struct pt_regs regs;
/* don't care about the eip and regs settings since we'll
* never reschedule the forked task. */
return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
}
/* Routine to kick start the given CPU and wait for it to report ready /* Routine to kick start the given CPU and wait for it to report ready
* (or timeout in startup). When this routine returns, the requested * (or timeout in startup). When this routine returns, the requested
...@@ -587,17 +578,10 @@ do_boot_cpu(__u8 cpu) ...@@ -587,17 +578,10 @@ do_boot_cpu(__u8 cpu)
hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF; hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF;
cpucount++; cpucount++;
idle = fork_by_hand(); idle = fork_idle(cpu);
if(IS_ERR(idle)) if(IS_ERR(idle))
panic("failed fork for CPU%d", cpu); panic("failed fork for CPU%d", cpu);
/* Make this the idle thread */
init_idle(idle, cpu);
idle->thread.eip = (unsigned long) start_secondary; idle->thread.eip = (unsigned long) start_secondary;
/* Remove it from the pidhash */
unhash_process(idle);
/* init_tasks (in sched.c) is indexed logically */ /* init_tasks (in sched.c) is indexed logically */
stack_start.esp = (void *) idle->thread.esp; stack_start.esp = (void *) idle->thread.esp;
......
...@@ -356,19 +356,10 @@ start_secondary (void *unused) ...@@ -356,19 +356,10 @@ start_secondary (void *unused)
return cpu_idle(); return cpu_idle();
} }
static struct task_struct * __devinit
fork_by_hand (void)
{
/*
* Don't care about the IP and regs settings since we'll never reschedule the
* forked task.
*/
return copy_process(CLONE_VM|CLONE_IDLETASK, 0, 0, 0, NULL, NULL);
}
struct create_idle { struct create_idle {
struct task_struct *idle; struct task_struct *idle;
struct completion done; struct completion done;
int cpu;
}; };
void void
...@@ -376,7 +367,7 @@ do_fork_idle(void *_c_idle) ...@@ -376,7 +367,7 @@ do_fork_idle(void *_c_idle)
{ {
struct create_idle *c_idle = _c_idle; struct create_idle *c_idle = _c_idle;
c_idle->idle = fork_by_hand(); c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done); complete(&c_idle->done);
} }
...@@ -384,10 +375,11 @@ static int __devinit ...@@ -384,10 +375,11 @@ static int __devinit
do_boot_cpu (int sapicid, int cpu) do_boot_cpu (int sapicid, int cpu)
{ {
int timeout; int timeout;
struct create_idle c_idle; struct create_idle c_idle = {
.cpu = cpu,
.done = COMPLETION_INITIALIZER(c_idle.done),
};
DECLARE_WORK(work, do_fork_idle, &c_idle); DECLARE_WORK(work, do_fork_idle, &c_idle);
init_completion(&c_idle.done);
/* /*
* We can't use kernel_thread since we must avoid to reschedule the child. * We can't use kernel_thread since we must avoid to reschedule the child.
*/ */
...@@ -400,13 +392,6 @@ do_boot_cpu (int sapicid, int cpu) ...@@ -400,13 +392,6 @@ do_boot_cpu (int sapicid, int cpu)
if (IS_ERR(c_idle.idle)) if (IS_ERR(c_idle.idle))
panic("failed fork for CPU %d", cpu); panic("failed fork for CPU %d", cpu);
/* Make this the idle thread */
init_idle(c_idle.idle, cpu);
/* Remove it from the pidhash */
unhash_process(c_idle.idle);
task_for_booting_cpu = c_idle.idle; task_for_booting_cpu = c_idle.idle;
Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
......
...@@ -254,16 +254,6 @@ void __devinit smp_prepare_boot_cpu(void) ...@@ -254,16 +254,6 @@ void __devinit smp_prepare_boot_cpu(void)
cpu_set(0, cpu_callin_map); cpu_set(0, cpu_callin_map);
} }
static struct task_struct * __init fork_by_hand(void)
{
struct pt_regs regs;
/*
* don't care about the eip and regs settings since
* we'll never reschedule the forked task.
*/
return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
}
/* /*
* Startup the CPU with this logical number * Startup the CPU with this logical number
*/ */
...@@ -275,16 +265,10 @@ static int __init do_boot_cpu(int cpu) ...@@ -275,16 +265,10 @@ static int __init do_boot_cpu(int cpu)
* The following code is purely to make sure * The following code is purely to make sure
* Linux can schedule processes on this slave. * Linux can schedule processes on this slave.
*/ */
idle = fork_by_hand(); idle = fork_idle(cpu);
if (IS_ERR(idle)) if (IS_ERR(idle))
panic("failed fork for CPU %d\n", cpu); panic("failed fork for CPU %d\n", cpu);
/* Make this the idle thread */
init_idle(idle, cpu);
/* Remove it from the pidhash */
unhash_process(idle);
prom_boot_secondary(cpu, idle); prom_boot_secondary(cpu, idle);
/* XXXKW timeout */ /* XXXKW timeout */
......
...@@ -485,24 +485,6 @@ void __init smp_callin(void) ...@@ -485,24 +485,6 @@ void __init smp_callin(void)
panic("smp_callin() AAAAaaaaahhhh....\n"); panic("smp_callin() AAAAaaaaahhhh....\n");
} }
/*
* Create the idle task for a new Slave CPU. DO NOT use kernel_thread()
* because that could end up calling schedule(). If it did, the new idle
* task could get scheduled before we had a chance to remove it from the
* run-queue...
*/
static struct task_struct *fork_by_hand(void)
{
struct pt_regs regs;
/*
* don't care about the regs settings since
* we'll never reschedule the forked task.
*/
return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
}
/* /*
* Bring one cpu online. * Bring one cpu online.
*/ */
...@@ -521,12 +503,10 @@ int __init smp_boot_one_cpu(int cpuid) ...@@ -521,12 +503,10 @@ int __init smp_boot_one_cpu(int cpuid)
* Sheesh . . . * Sheesh . . .
*/ */
idle = fork_by_hand();
if (IS_ERR(idle)) if (IS_ERR(idle))
panic("SMP: fork failed for CPU:%d", cpuid); panic("SMP: fork failed for CPU:%d", cpuid);
init_idle(idle, cpuid); idle = fork_idle(cpunum);
unhash_process(idle);
idle->thread_info->cpu = cpuid; idle->thread_info->cpu = cpuid;
/* Let _start know what logical CPU we're booting /* Let _start know what logical CPU we're booting
......
...@@ -364,20 +364,15 @@ int __devinit start_secondary(void *unused) ...@@ -364,20 +364,15 @@ int __devinit start_secondary(void *unused)
int __cpu_up(unsigned int cpu) int __cpu_up(unsigned int cpu)
{ {
struct pt_regs regs;
struct task_struct *p; struct task_struct *p;
char buf[32]; char buf[32];
int c; int c;
/* create a process for the processor */ /* create a process for the processor */
/* only regs.msr is actually used, and 0 is OK for it */ /* only regs.msr is actually used, and 0 is OK for it */
memset(&regs, 0, sizeof(struct pt_regs)); p = fork_idle(cpu);
p = copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
if (IS_ERR(p)) if (IS_ERR(p))
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
init_idle(p, cpu);
unhash_process(p);
secondary_ti = p->thread_info; secondary_ti = p->thread_info;
p->thread_info->cpu = cpu; p->thread_info->cpu = cpu;
......
...@@ -801,20 +801,12 @@ static void __devinit smp_store_cpu_info(int id) ...@@ -801,20 +801,12 @@ static void __devinit smp_store_cpu_info(int id)
static void __init smp_create_idle(unsigned int cpu) static void __init smp_create_idle(unsigned int cpu)
{ {
struct pt_regs regs;
struct task_struct *p; struct task_struct *p;
/* create a process for the processor */ /* create a process for the processor */
/* only regs.msr is actually used, and 0 is OK for it */ p = fork_idle(cpu);
memset(&regs, 0, sizeof(struct pt_regs));
p = copy_process(CLONE_VM | CLONE_IDLETASK,
0, &regs, 0, NULL, NULL);
if (IS_ERR(p)) if (IS_ERR(p))
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
init_idle(p, cpu);
unhash_process(p);
paca[cpu].__current = p; paca[cpu].__current = p;
current_set[cpu] = p->thread_info; current_set[cpu] = p->thread_info;
} }
......
...@@ -562,24 +562,15 @@ int __devinit start_secondary(void *cpuvoid) ...@@ -562,24 +562,15 @@ int __devinit start_secondary(void *cpuvoid)
static void __init smp_create_idle(unsigned int cpu) static void __init smp_create_idle(unsigned int cpu)
{ {
struct pt_regs regs;
struct task_struct *p; struct task_struct *p;
/* /*
* don't care about the psw and regs settings since we'll never * don't care about the psw and regs settings since we'll never
* reschedule the forked task. * reschedule the forked task.
*/ */
memset(&regs, 0, sizeof(struct pt_regs)); p = fork_idle(cpu);
p = copy_process(CLONE_VM | CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
if (IS_ERR(p)) if (IS_ERR(p))
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
/* Make this the idle thread */
init_idle(p, cpu);
/* Remove it from the pidhash */
unhash_process(p);
current_set[cpu] = p; current_set[cpu] = p;
} }
......
...@@ -98,17 +98,12 @@ void __devinit smp_prepare_boot_cpu(void) ...@@ -98,17 +98,12 @@ void __devinit smp_prepare_boot_cpu(void)
int __cpu_up(unsigned int cpu) int __cpu_up(unsigned int cpu)
{ {
struct task_struct *tsk; struct task_struct *tsk;
struct pt_regs regs;
memset(&regs, 0, sizeof(struct pt_regs)); tsk = fork_idle(cpu);
tsk = copy_process(CLONE_VM | CLONE_IDLETASK, 0, &regs, 0, 0, 0);
if (IS_ERR(tsk)) if (IS_ERR(tsk))
panic("Failed forking idle task for cpu %d\n", cpu); panic("Failed forking idle task for cpu %d\n", cpu);
init_idle(tsk, cpu);
unhash_process(tsk);
tsk->thread_info->cpu = cpu; tsk->thread_info->cpu = cpu;
cpu_set(cpu, cpu_online_map); cpu_set(cpu, cpu_online_map);
......
...@@ -201,18 +201,9 @@ void __init smp4d_boot_cpus(void) ...@@ -201,18 +201,9 @@ void __init smp4d_boot_cpus(void)
int no; int no;
/* Cook up an idler for this guy. */ /* Cook up an idler for this guy. */
kernel_thread(start_secondary, NULL, CLONE_IDLETASK); p = fork_idle(p, cpu);
cpucount++; cpucount++;
p = prev_task(&init_task);
init_idle(p, i);
current_set[i] = p->thread_info; current_set[i] = p->thread_info;
unhash_process(p);
for (no = 0; !cpu_find_by_instance(no, NULL, &mid) for (no = 0; !cpu_find_by_instance(no, NULL, &mid)
&& mid != i; no++) ; && mid != i; no++) ;
......
...@@ -303,14 +303,7 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) ...@@ -303,14 +303,7 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
struct task_struct *p; struct task_struct *p;
int timeout, ret, cpu_node; int timeout, ret, cpu_node;
kernel_thread(NULL, NULL, CLONE_IDLETASK); p = fork_idle(cpu);
p = prev_task(&init_task);
init_idle(p, cpu);
unhash_process(p);
callin_flag = 0; callin_flag = 0;
cpu_new_thread = p->thread_info; cpu_new_thread = p->thread_info;
cpu_set(cpu, cpu_callout_map); cpu_set(cpu, cpu_callout_map);
......
...@@ -392,16 +392,6 @@ void __init start_secondary(void) ...@@ -392,16 +392,6 @@ void __init start_secondary(void)
extern volatile unsigned long init_rsp; extern volatile unsigned long init_rsp;
extern void (*initial_code)(void); extern void (*initial_code)(void);
static struct task_struct * __init fork_by_hand(void)
{
struct pt_regs regs;
/*
* don't care about the eip and regs settings since
* we'll never reschedule the forked task.
*/
return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
}
#if APIC_DEBUG #if APIC_DEBUG
static inline void inquire_remote_apic(int apicid) static inline void inquire_remote_apic(int apicid)
{ {
...@@ -575,17 +565,11 @@ static void __init do_boot_cpu (int apicid) ...@@ -575,17 +565,11 @@ static void __init do_boot_cpu (int apicid)
* We can't use kernel_thread since we must avoid to * We can't use kernel_thread since we must avoid to
* reschedule the child. * reschedule the child.
*/ */
idle = fork_by_hand(); idle = fork_idle(cpu);
if (IS_ERR(idle)) if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu); panic("failed fork for CPU %d", cpu);
x86_cpu_to_apicid[cpu] = apicid; x86_cpu_to_apicid[cpu] = apicid;
/* Make this the idle thread */
init_idle(idle,cpu);
/* Remove it from the pidhash */
unhash_process(idle);
cpu_pda[cpu].pcurrent = idle; cpu_pda[cpu].pcurrent = idle;
start_rip = setup_trampoline(); start_rip = setup_trampoline();
......
...@@ -793,7 +793,7 @@ extern task_t *child_reaper; ...@@ -793,7 +793,7 @@ extern task_t *child_reaper;
extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
extern struct task_struct * copy_process(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); task_t *fork_idle(int);
extern void set_task_comm(struct task_struct *tsk, char *from); extern void set_task_comm(struct task_struct *tsk, char *from);
extern void get_task_comm(char *to, struct task_struct *tsk); extern void get_task_comm(char *to, struct task_struct *tsk);
......
...@@ -471,15 +471,6 @@ asmlinkage void __init start_kernel(void) ...@@ -471,15 +471,6 @@ asmlinkage void __init start_kernel(void)
* time - but meanwhile we still have a functioning scheduler. * time - but meanwhile we still have a functioning scheduler.
*/ */
sched_init(); sched_init();
/*
* Make us the idle thread. Technically, schedule() should not be
* called from this thread, however somewhere below it might be,
* but because we are the idle thread, we just pick up running again
* when this runqueue becomes "idle".
*/
init_idle(current, smp_processor_id());
build_all_zonelists(); build_all_zonelists();
page_alloc_init(); page_alloc_init();
printk("Kernel command line: %s\n", saved_command_line); printk("Kernel command line: %s\n", saved_command_line);
......
...@@ -865,7 +865,7 @@ asmlinkage long sys_set_tid_address(int __user *tidptr) ...@@ -865,7 +865,7 @@ asmlinkage long sys_set_tid_address(int __user *tidptr)
* parts of the process environment (as per the clone * parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller. * flags). The actual kick-off is left to the caller.
*/ */
struct task_struct *copy_process(unsigned long clone_flags, static task_t *copy_process(unsigned long clone_flags,
unsigned long stack_start, unsigned long stack_start,
struct pt_regs *regs, struct pt_regs *regs,
unsigned long stack_size, unsigned long stack_size,
...@@ -1153,6 +1153,20 @@ struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1153,6 +1153,20 @@ struct task_struct *copy_process(unsigned long clone_flags,
goto fork_out; goto fork_out;
} }
task_t * __init fork_idle(int cpu)
{
task_t *task;
struct pt_regs regs;
memset(&regs, 0, sizeof(struct pt_regs));
task = copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
if (!task)
return ERR_PTR(-ENOMEM);
init_idle(task, cpu);
unhash_process(task);
return task;
}
static inline int fork_traceflag (unsigned clone_flags) static inline int fork_traceflag (unsigned clone_flags)
{ {
if (clone_flags & (CLONE_UNTRACED | CLONE_IDLETASK)) if (clone_flags & (CLONE_UNTRACED | CLONE_IDLETASK))
......
...@@ -4580,6 +4580,14 @@ void __init sched_init(void) ...@@ -4580,6 +4580,14 @@ void __init sched_init(void)
*/ */
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
enter_lazy_tlb(&init_mm, current); enter_lazy_tlb(&init_mm, current);
/*
* Make us the idle thread. Technically, schedule() should not be
* called from this thread, however somewhere below it might be,
* but because we are the idle thread, we just pick up running again
* when this runqueue becomes "idle".
*/
init_idle(current, smp_processor_id());
} }
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment