Commit 6a991f77 authored by Andrew Theurer's avatar Andrew Theurer Committed by Linus Torvalds

[PATCH] sched: can_migrate exception for idle cpus

Fix can_migrate to allow aggressive steal for idle cpus.  This -was- in
mainline, but I believe sched_domains kind of blasted it outta there.  IMO,
it's a no brainer for an idle cpu (with all that cache going to waste) to
be granted to steal a task.  The one enhancement I have made was to make
sure the whole cpu was idle.

Signed-off-by: <habanero@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f16c759b
...@@ -455,6 +455,22 @@ static inline void rq_unlock(runqueue_t *rq) ...@@ -455,6 +455,22 @@ static inline void rq_unlock(runqueue_t *rq)
spin_unlock_irq(&rq->lock); spin_unlock_irq(&rq->lock);
} }
#ifdef CONFIG_SCHED_SMT
static int cpu_and_siblings_are_idle(int cpu)
{
int sib;
for_each_cpu_mask(sib, cpu_sibling_map[cpu]) {
if (idle_cpu(sib))
continue;
return 0;
}
return 1;
}
#else
#define cpu_and_siblings_are_idle(A) idle_cpu(A)
#endif
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
/* /*
* Called when a process is dequeued from the active array and given * Called when a process is dequeued from the active array and given
...@@ -1668,13 +1684,18 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, ...@@ -1668,13 +1684,18 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
if (!cpu_isset(this_cpu, p->cpus_allowed)) if (!cpu_isset(this_cpu, p->cpus_allowed))
return 0; return 0;
/* Aggressive migration if we've failed balancing */ /*
if (idle == NEWLY_IDLE || * Aggressive migration if:
sd->nr_balance_failed < sd->cache_nice_tries) { * 1) the [whole] cpu is idle, or
if (task_hot(p, rq->timestamp_last_tick, sd)) * 2) too many balance attempts have failed.
return 0; */
}
if (cpu_and_siblings_are_idle(this_cpu) || \
sd->nr_balance_failed > sd->cache_nice_tries)
return 1;
if (task_hot(p, rq->timestamp_last_tick, sd))
return 0;
return 1; return 1;
} }
...@@ -2089,23 +2110,6 @@ static inline void idle_balance(int this_cpu, runqueue_t *this_rq) ...@@ -2089,23 +2110,6 @@ static inline void idle_balance(int this_cpu, runqueue_t *this_rq)
} }
} }
#ifdef CONFIG_SCHED_SMT
static int cpu_and_siblings_are_idle(int cpu)
{
int sib;
for_each_cpu_mask(sib, cpu_sibling_map[cpu]) {
if (idle_cpu(sib))
continue;
return 0;
}
return 1;
}
#else
#define cpu_and_siblings_are_idle(A) idle_cpu(A)
#endif
/* /*
* active_load_balance is run by migration threads. It pushes running tasks * active_load_balance is run by migration threads. It pushes running tasks
* off the busiest CPU onto idle CPUs. It requires at least 1 task to be * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment