Commit c15d3bea authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] sched: remove balance on clone

This removes balance on clone capability altogether.  I told Andi we wouldn't
remove it yet, but provided it is in a single small patch, he mightn't get too
upset.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent b4f14b64
...@@ -574,11 +574,10 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) ...@@ -574,11 +574,10 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
#define SD_BALANCE_NEWIDLE 1 /* Balance when about to become idle */ #define SD_BALANCE_NEWIDLE 1 /* Balance when about to become idle */
#define SD_BALANCE_EXEC 2 /* Balance on exec */ #define SD_BALANCE_EXEC 2 /* Balance on exec */
#define SD_BALANCE_CLONE 4 /* Balance on clone */ #define SD_WAKE_IDLE 4 /* Wake to idle CPU on task wakeup */
#define SD_WAKE_IDLE 8 /* Wake to idle CPU on task wakeup */ #define SD_WAKE_AFFINE 8 /* Wake task to waking CPU */
#define SD_WAKE_AFFINE 16 /* Wake task to waking CPU */ #define SD_WAKE_BALANCE 16 /* Perform balancing at task wakeup */
#define SD_WAKE_BALANCE 32 /* Perform balancing at task wakeup */ #define SD_SHARE_CPUPOWER 32 /* Domain members share cpu power */
#define SD_SHARE_CPUPOWER 64 /* Domain members share cpu power */
struct sched_group { struct sched_group {
struct sched_group *next; /* Must be a circular list */ struct sched_group *next; /* Must be a circular list */
...@@ -759,7 +758,7 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, ...@@ -759,7 +758,7 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
#else #else
static inline void kick_process(struct task_struct *tsk) { } static inline void kick_process(struct task_struct *tsk) { }
#endif #endif
extern void FASTCALL(sched_fork(task_t * p, unsigned long clone_flags)); extern void FASTCALL(sched_fork(task_t * p));
extern void FASTCALL(sched_exit(task_t * p)); extern void FASTCALL(sched_exit(task_t * p));
extern int in_group_p(gid_t); extern int in_group_p(gid_t);
......
...@@ -1030,7 +1030,7 @@ struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1030,7 +1030,7 @@ struct task_struct *copy_process(unsigned long clone_flags,
p->pdeath_signal = 0; p->pdeath_signal = 0;
/* Perform scheduler related setup */ /* Perform scheduler related setup */
sched_fork(p, clone_flags); sched_fork(p);
/* /*
* Ok, make it visible to the rest of the system. * Ok, make it visible to the rest of the system.
......
...@@ -874,30 +874,10 @@ static int find_idlest_cpu(struct task_struct *p, int this_cpu, ...@@ -874,30 +874,10 @@ static int find_idlest_cpu(struct task_struct *p, int this_cpu,
* Perform scheduler related setup for a newly forked process p. * Perform scheduler related setup for a newly forked process p.
* p is forked by current. The cpu hotplug lock is held. * p is forked by current. The cpu hotplug lock is held.
*/ */
void fastcall sched_fork(task_t *p, unsigned long clone_flags) void fastcall sched_fork(task_t *p)
{ {
int cpu; int cpu = smp_processor_id();
#ifdef CONFIG_SMP
struct sched_domain *tmp, *sd = NULL;
preempt_disable();
cpu = smp_processor_id();
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) {
/*
* New thread that is not a vfork.
* Find the largest domain that this CPU is part of that
* is willing to balance on clone:
*/
for_each_domain(cpu, tmp)
if (tmp->flags & SD_BALANCE_CLONE)
sd = tmp;
if (sd)
cpu = find_idlest_cpu(p, cpu, sd);
}
preempt_enable();
#else
cpu = smp_processor_id();
#endif
/* /*
* The task hasn't been attached yet, so cpus_allowed mask cannot * The task hasn't been attached yet, so cpus_allowed mask cannot
* change. The cpus_allowed mask of the parent may have changed * change. The cpus_allowed mask of the parent may have changed
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment