Commit 2df40901 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] CPU scheduler cleanup

From: Ingo Molnar <mingo@elte.hu>

- move scheduling-state initializtion from copy_process() to
  sched_fork() (Nick Piggin)
parent ea9c300d
......@@ -579,6 +579,7 @@ extern int FASTCALL(wake_up_process(struct task_struct * tsk));
static inline void kick_process(struct task_struct *tsk) { }
#endif
extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk));
extern void FASTCALL(sched_fork(task_t * p));
extern void FASTCALL(sched_exit(task_t * p));
asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru);
......
......@@ -910,23 +910,7 @@ struct task_struct *copy_process(unsigned long clone_flags,
if (p->binfmt && !try_module_get(p->binfmt->module))
goto bad_fork_cleanup_put_domain;
#ifdef CONFIG_PREEMPT
/*
* schedule_tail drops this_rq()->lock so we compensate with a count
* of 1. Also, we want to start with kernel preemption disabled.
*/
p->thread_info->preempt_count = 1;
#endif
p->did_exec = 0;
/*
* We mark the process as running here, but have not actually
* inserted it onto the runqueue yet. This guarantees that
* nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either.
*/
p->state = TASK_RUNNING;
copy_flags(clone_flags, p);
if (clone_flags & CLONE_IDLETASK)
p->pid = 0;
......@@ -942,15 +926,12 @@ struct task_struct *copy_process(unsigned long clone_flags,
p->proc_dentry = NULL;
INIT_LIST_HEAD(&p->run_list);
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
INIT_LIST_HEAD(&p->posix_timers);
init_waitqueue_head(&p->wait_chldexit);
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
spin_lock_init(&p->switch_lock);
spin_lock_init(&p->proc_lock);
clear_tsk_thread_flag(p, TIF_SIGPENDING);
......@@ -965,7 +946,6 @@ struct task_struct *copy_process(unsigned long clone_flags,
p->tty_old_pgrp = 0;
p->utime = p->stime = 0;
p->cutime = p->cstime = 0;
p->array = NULL;
p->lock_depth = -1; /* -1 = no lock */
p->start_time = get_jiffies_64();
p->security = NULL;
......@@ -1014,38 +994,12 @@ struct task_struct *copy_process(unsigned long clone_flags,
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
/* Perform scheduler related setup */
sched_fork(p);
/*
* Share the timeslice between parent and child, thus the
* total amount of pending timeslices in the system doesn't change,
* resulting in more scheduling fairness.
*/
local_irq_disable();
p->time_slice = (current->time_slice + 1) >> 1;
/*
* The remainder of the first timeslice might be recovered by
* the parent if the child exits early enough.
*/
p->first_time_slice = 1;
current->time_slice >>= 1;
p->timestamp = sched_clock();
if (!current->time_slice) {
/*
* This case is rare, it happens when the parent has only
* a single jiffy left from its timeslice. Taking the
* runqueue lock is not a problem.
*/
current->time_slice = 1;
preempt_disable();
scheduler_tick(0, 0);
local_irq_enable();
preempt_enable();
} else
local_irq_enable();
/*
* Ok, add it to the run-queues and make it
* visible to the rest of the system.
*
* Let it rip!
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
p->tgid = p->pid;
p->group_leader = p;
......
......@@ -673,6 +673,60 @@ int wake_up_state(task_t *p, unsigned int state)
return try_to_wake_up(p, state, 0);
}
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
*/
void sched_fork(task_t *p)
{
/*
* We mark the process as running here, but have not actually
* inserted it onto the runqueue yet. This guarantees that
* nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either.
*/
p->state = TASK_RUNNING;
INIT_LIST_HEAD(&p->run_list);
p->array = NULL;
spin_lock_init(&p->switch_lock);
#ifdef CONFIG_PREEMPT
/*
* During context-switch we hold precisely one spinlock, which
* schedule_tail drops. (in the common case it's this_rq()->lock,
* but it also can be p->switch_lock.) So we compensate with a count
* of 1. Also, we want to start with kernel preemption disabled.
*/
p->thread_info->preempt_count = 1;
#endif
/*
* Share the timeslice between parent and child, thus the
* total amount of pending timeslices in the system doesn't change,
* resulting in more scheduling fairness.
*/
local_irq_disable();
p->time_slice = (current->time_slice + 1) >> 1;
/*
* The remainder of the first timeslice might be recovered by
* the parent if the child exits early enough.
*/
p->first_time_slice = 1;
current->time_slice >>= 1;
p->timestamp = sched_clock();
if (!current->time_slice) {
/*
* This case is rare, it happens when the parent has only
* a single jiffy left from its timeslice. Taking the
* runqueue lock is not a problem.
*/
current->time_slice = 1;
preempt_disable();
scheduler_tick(0, 0);
local_irq_enable();
preempt_enable();
} else
local_irq_enable();
}
/*
* wake_up_forked_process - wake up a freshly forked process.
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment