Commit faab2e12 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] set_cpus_allowed locking

From: Nick Piggin <piggin@cyberone.com.au>,
      Rusty Russell <rusty@rustcorp.com.au>

Prevents a race where sys_sched_setaffinity can race with
sched_migrate_task and cause sched_migrate_task to restore an invalid
cpu mask.
parent af7846e4
...@@ -540,6 +540,43 @@ inline int task_curr(task_t *p) ...@@ -540,6 +540,43 @@ inline int task_curr(task_t *p)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
typedef struct {
struct list_head list;
task_t *task;
struct completion done;
} migration_req_t;
/*
* The task's runqueue lock must be held, and the new mask must be valid.
* Returns true if you have to wait for migration thread.
*/
static int __set_cpus_allowed(task_t *p, cpumask_t new_mask,
migration_req_t *req)
{
runqueue_t *rq = task_rq(p);
p->cpus_allowed = new_mask;
/*
* Can the task run on the task's current CPU? If not then
* migrate the thread off to a proper CPU.
*/
if (cpu_isset(task_cpu(p), new_mask))
return 0;
/*
* If the task is not on a runqueue (and not running), then
* it is sufficient to simply update the task's cpu field.
*/
if (!p->array && !task_running(rq, p)) {
set_task_cpu(p, any_online_cpu(p->cpus_allowed));
return 0;
}
init_completion(&req->done);
req->task = p;
list_add(&req->list, &rq->migration_queue);
return 1;
}
/* /*
* wait_task_inactive - wait for a thread to unschedule. * wait_task_inactive - wait for a thread to unschedule.
...@@ -971,16 +1008,34 @@ static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) ...@@ -971,16 +1008,34 @@ static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
*/ */
static void sched_migrate_task(task_t *p, int dest_cpu) static void sched_migrate_task(task_t *p, int dest_cpu)
{ {
cpumask_t old_mask; runqueue_t *rq;
migration_req_t req;
unsigned long flags;
cpumask_t old_mask, new_mask = cpumask_of_cpu(dest_cpu);
rq = task_rq_lock(p, &flags);
old_mask = p->cpus_allowed; old_mask = p->cpus_allowed;
if (!cpu_isset(dest_cpu, old_mask)) if (!cpu_isset(dest_cpu, old_mask) || !cpu_online(dest_cpu))
return; goto out;
/* force the process onto the specified CPU */
set_cpus_allowed(p, cpumask_of_cpu(dest_cpu));
/* restore the cpus allowed mask */ /* force the process onto the specified CPU */
set_cpus_allowed(p, old_mask); if (__set_cpus_allowed(p, new_mask, &req)) {
/* Need to wait for migration thread. */
task_rq_unlock(rq, &flags);
wake_up_process(rq->migration_thread);
wait_for_completion(&req.done);
/* If we raced with sys_sched_setaffinity, don't
* restore mask. */
rq = task_rq_lock(p, &flags);
if (likely(cpus_equal(p->cpus_allowed, new_mask))) {
/* Restore old mask: won't need migration
* thread, since current cpu is allowed. */
BUG_ON(__set_cpus_allowed(p, old_mask, NULL));
}
}
out:
task_rq_unlock(rq, &flags);
} }
/* /*
...@@ -2628,12 +2683,6 @@ void __init init_idle(task_t *idle, int cpu) ...@@ -2628,12 +2683,6 @@ void __init init_idle(task_t *idle, int cpu)
* 7) we wake up and the migration is done. * 7) we wake up and the migration is done.
*/ */
typedef struct {
struct list_head list;
task_t *task;
struct completion done;
} migration_req_t;
/* /*
* Change a given task's CPU affinity. Migrate the thread to a * Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on * proper CPU and schedule it away if the CPU it's executing on
...@@ -2646,40 +2695,26 @@ typedef struct { ...@@ -2646,40 +2695,26 @@ typedef struct {
int set_cpus_allowed(task_t *p, cpumask_t new_mask) int set_cpus_allowed(task_t *p, cpumask_t new_mask)
{ {
unsigned long flags; unsigned long flags;
int ret = 0;
migration_req_t req; migration_req_t req;
runqueue_t *rq; runqueue_t *rq;
if (any_online_cpu(new_mask) == NR_CPUS)
return -EINVAL;
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
p->cpus_allowed = new_mask; if (any_online_cpu(new_mask) == NR_CPUS) {
/* ret = -EINVAL;
* Can the task run on the task's current CPU? If not then goto out;
* migrate the thread off to a proper CPU.
*/
if (cpu_isset(task_cpu(p), new_mask)) {
task_rq_unlock(rq, &flags);
return 0;
} }
/*
* If the task is not on a runqueue (and not running), then if (__set_cpus_allowed(p, new_mask, &req)) {
* it is sufficient to simply update the task's cpu field. /* Need help from migration thread: drop lock and wait. */
*/
if (!p->array && !task_running(rq, p)) {
set_task_cpu(p, any_online_cpu(p->cpus_allowed));
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
wake_up_process(rq->migration_thread);
wait_for_completion(&req.done);
return 0; return 0;
} }
init_completion(&req.done); out:
req.task = p;
list_add(&req.list, &rq->migration_queue);
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
return ret;
wake_up_process(rq->migration_thread);
wait_for_completion(&req.done);
return 0;
} }
EXPORT_SYMBOL_GPL(set_cpus_allowed); EXPORT_SYMBOL_GPL(set_cpus_allowed);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment