Commit a15b12ac authored by Kirill Tkhai's avatar Kirill Tkhai Committed by Ingo Molnar

sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running

If a task is queued but not running on it rq, we can simply migrate
it without migration thread and switching of context.
Signed-off-by: default avatarKirill Tkhai <ktkhai@parallels.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1410519814.3569.7.camel@tkhaiSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1ba93d42
......@@ -4629,6 +4629,33 @@ void init_idle(struct task_struct *idle, int cpu)
}
#ifdef CONFIG_SMP
/*
* move_queued_task - move a queued task to new rq.
*
* Returns (locked) new rq. Old rq's lock is released.
*/
static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
{
struct rq *rq = task_rq(p);
lockdep_assert_held(&rq->lock);
dequeue_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(p, new_cpu);
raw_spin_unlock(&rq->lock);
rq = cpu_rq(new_cpu);
raw_spin_lock(&rq->lock);
BUG_ON(task_cpu(p) != new_cpu);
p->on_rq = TASK_ON_RQ_QUEUED;
enqueue_task(rq, p, 0);
check_preempt_curr(rq, p, 0);
return rq;
}
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
if (p->sched_class && p->sched_class->set_cpus_allowed)
......@@ -4685,14 +4712,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
if (task_on_rq_queued(p) || p->state == TASK_WAKING) {
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, p, &flags);
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
return 0;
}
} else if (task_on_rq_queued(p))
rq = move_queued_task(p, dest_cpu);
out:
task_rq_unlock(rq, p, &flags);
......@@ -4735,19 +4763,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
* If we're not on a rq, the next wake-up will ensure we're
* placed properly.
*/
if (task_on_rq_queued(p)) {
dequeue_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(p, dest_cpu);
raw_spin_unlock(&rq->lock);
rq = cpu_rq(dest_cpu);
raw_spin_lock(&rq->lock);
BUG_ON(task_rq(p) != rq);
p->on_rq = TASK_ON_RQ_QUEUED;
enqueue_task(rq, p, 0);
check_preempt_curr(rq, p, 0);
}
if (task_on_rq_queued(p))
rq = move_queued_task(p, dest_cpu);
done:
ret = 1;
fail:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment