Commit 5be9361c authored by Gautham R Shenoy's avatar Gautham R Shenoy Committed by Linus Torvalds

Eliminate lock_cpu_hotplug in kernel/schedc

Eliminate lock_cpu_hotplug from kernel/sched.c and use sched_hotcpu_mutex
instead to postpone a hotplug event.

In the migration_call hotcpu callback function, take sched_hotcpu_mutex
while handling the event CPU_LOCK_ACQUIRE and release it while handling
CPU_LOCK_RELEASE event.

[akpm@linux-foundation.org: fix deadlock]
Signed-off-by: default avatarGautham R Shenoy <ego@in.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent baaca49f
...@@ -305,6 +305,7 @@ struct rq { ...@@ -305,6 +305,7 @@ struct rq {
}; };
static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp; static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
static DEFINE_MUTEX(sched_hotcpu_mutex);
static inline int cpu_of(struct rq *rq) static inline int cpu_of(struct rq *rq)
{ {
...@@ -4520,13 +4521,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) ...@@ -4520,13 +4521,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
struct task_struct *p; struct task_struct *p;
int retval; int retval;
lock_cpu_hotplug(); mutex_lock(&sched_hotcpu_mutex);
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
p = find_process_by_pid(pid); p = find_process_by_pid(pid);
if (!p) { if (!p) {
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
unlock_cpu_hotplug(); mutex_unlock(&sched_hotcpu_mutex);
return -ESRCH; return -ESRCH;
} }
...@@ -4553,7 +4554,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) ...@@ -4553,7 +4554,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
out_unlock: out_unlock:
put_task_struct(p); put_task_struct(p);
unlock_cpu_hotplug(); mutex_unlock(&sched_hotcpu_mutex);
return retval; return retval;
} }
...@@ -4610,7 +4611,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) ...@@ -4610,7 +4611,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
struct task_struct *p; struct task_struct *p;
int retval; int retval;
lock_cpu_hotplug(); mutex_lock(&sched_hotcpu_mutex);
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
retval = -ESRCH; retval = -ESRCH;
...@@ -4626,7 +4627,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) ...@@ -4626,7 +4627,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
out_unlock: out_unlock:
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
unlock_cpu_hotplug(); mutex_unlock(&sched_hotcpu_mutex);
if (retval) if (retval)
return retval; return retval;
...@@ -5388,6 +5389,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -5388,6 +5389,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
struct rq *rq; struct rq *rq;
switch (action) { switch (action) {
case CPU_LOCK_ACQUIRE:
mutex_lock(&sched_hotcpu_mutex);
break;
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
p = kthread_create(migration_thread, hcpu, "migration/%d",cpu); p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
if (IS_ERR(p)) if (IS_ERR(p))
...@@ -5433,7 +5438,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -5433,7 +5438,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
BUG_ON(rq->nr_running != 0); BUG_ON(rq->nr_running != 0);
/* No need to migrate the tasks: it was best-effort if /* No need to migrate the tasks: it was best-effort if
* they didn't do lock_cpu_hotplug(). Just wake up * they didn't take sched_hotcpu_mutex. Just wake up
* the requestors. */ * the requestors. */
spin_lock_irq(&rq->lock); spin_lock_irq(&rq->lock);
while (!list_empty(&rq->migration_queue)) { while (!list_empty(&rq->migration_queue)) {
...@@ -5447,6 +5452,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -5447,6 +5452,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
spin_unlock_irq(&rq->lock); spin_unlock_irq(&rq->lock);
break; break;
#endif #endif
case CPU_LOCK_RELEASE:
mutex_unlock(&sched_hotcpu_mutex);
break;
} }
return NOTIFY_OK; return NOTIFY_OK;
} }
...@@ -6822,10 +6830,10 @@ int arch_reinit_sched_domains(void) ...@@ -6822,10 +6830,10 @@ int arch_reinit_sched_domains(void)
{ {
int err; int err;
lock_cpu_hotplug(); mutex_lock(&sched_hotcpu_mutex);
detach_destroy_domains(&cpu_online_map); detach_destroy_domains(&cpu_online_map);
err = arch_init_sched_domains(&cpu_online_map); err = arch_init_sched_domains(&cpu_online_map);
unlock_cpu_hotplug(); mutex_unlock(&sched_hotcpu_mutex);
return err; return err;
} }
...@@ -6930,12 +6938,12 @@ void __init sched_init_smp(void) ...@@ -6930,12 +6938,12 @@ void __init sched_init_smp(void)
{ {
cpumask_t non_isolated_cpus; cpumask_t non_isolated_cpus;
lock_cpu_hotplug(); mutex_lock(&sched_hotcpu_mutex);
arch_init_sched_domains(&cpu_online_map); arch_init_sched_domains(&cpu_online_map);
cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
if (cpus_empty(non_isolated_cpus)) if (cpus_empty(non_isolated_cpus))
cpu_set(smp_processor_id(), non_isolated_cpus); cpu_set(smp_processor_id(), non_isolated_cpus);
unlock_cpu_hotplug(); mutex_unlock(&sched_hotcpu_mutex);
/* XXX: Theoretical race here - CPU may be hotplugged now */ /* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier(update_sched_domains, 0); hotcpu_notifier(update_sched_domains, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment