Commit 95402b38 authored by Gautham R Shenoy's avatar Gautham R Shenoy Committed by Ingo Molnar

cpu-hotplug: replace per-subsystem mutexes with get_online_cpus()

This patch converts the known per-subsystem mutexes to get_online_cpus
put_online_cpus. It also eliminates the CPU_LOCK_ACQUIRE and
CPU_LOCK_RELEASE hotplug notification events.
Signed-off-by: default avatarGautham  R Shenoy <ego@in.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 86ef5c9a
...@@ -207,9 +207,7 @@ static inline int notifier_to_errno(int ret) ...@@ -207,9 +207,7 @@ static inline int notifier_to_errno(int ret)
#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
#define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */ #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
#define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */
#define CPU_DYING 0x000A /* CPU (unsigned)v not running any task,
* not handling interrupts, soon dead */ * not handling interrupts, soon dead */
/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
......
...@@ -218,7 +218,6 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) ...@@ -218,7 +218,6 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
return -EINVAL; return -EINVAL;
cpu_hotplug_begin(); cpu_hotplug_begin();
raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
hcpu, -1, &nr_calls); hcpu, -1, &nr_calls);
if (err == NOTIFY_BAD) { if (err == NOTIFY_BAD) {
...@@ -271,7 +270,6 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) ...@@ -271,7 +270,6 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
out_allowed: out_allowed:
set_cpus_allowed(current, old_allowed); set_cpus_allowed(current, old_allowed);
out_release: out_release:
raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
cpu_hotplug_done(); cpu_hotplug_done();
return err; return err;
} }
...@@ -302,7 +300,6 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) ...@@ -302,7 +300,6 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
return -EINVAL; return -EINVAL;
cpu_hotplug_begin(); cpu_hotplug_begin();
raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
-1, &nr_calls); -1, &nr_calls);
if (ret == NOTIFY_BAD) { if (ret == NOTIFY_BAD) {
...@@ -326,7 +323,6 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) ...@@ -326,7 +323,6 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
if (ret != 0) if (ret != 0)
__raw_notifier_call_chain(&cpu_chain, __raw_notifier_call_chain(&cpu_chain,
CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
cpu_hotplug_done(); cpu_hotplug_done();
return ret; return ret;
......
...@@ -439,7 +439,6 @@ struct rq { ...@@ -439,7 +439,6 @@ struct rq {
}; };
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static DEFINE_MUTEX(sched_hotcpu_mutex);
static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
{ {
...@@ -4546,13 +4545,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) ...@@ -4546,13 +4545,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
struct task_struct *p; struct task_struct *p;
int retval; int retval;
mutex_lock(&sched_hotcpu_mutex); get_online_cpus();
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
p = find_process_by_pid(pid); p = find_process_by_pid(pid);
if (!p) { if (!p) {
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
mutex_unlock(&sched_hotcpu_mutex); put_online_cpus();
return -ESRCH; return -ESRCH;
} }
...@@ -4592,7 +4591,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) ...@@ -4592,7 +4591,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
} }
out_unlock: out_unlock:
put_task_struct(p); put_task_struct(p);
mutex_unlock(&sched_hotcpu_mutex); put_online_cpus();
return retval; return retval;
} }
...@@ -4649,7 +4648,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) ...@@ -4649,7 +4648,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
struct task_struct *p; struct task_struct *p;
int retval; int retval;
mutex_lock(&sched_hotcpu_mutex); get_online_cpus();
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
retval = -ESRCH; retval = -ESRCH;
...@@ -4665,7 +4664,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) ...@@ -4665,7 +4664,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
out_unlock: out_unlock:
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
mutex_unlock(&sched_hotcpu_mutex); put_online_cpus();
return retval; return retval;
} }
...@@ -5625,9 +5624,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -5625,9 +5624,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
struct rq *rq; struct rq *rq;
switch (action) { switch (action) {
case CPU_LOCK_ACQUIRE:
mutex_lock(&sched_hotcpu_mutex);
break;
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN: case CPU_UP_PREPARE_FROZEN:
...@@ -5697,9 +5693,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -5697,9 +5693,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
spin_unlock_irq(&rq->lock); spin_unlock_irq(&rq->lock);
break; break;
#endif #endif
case CPU_LOCK_RELEASE:
mutex_unlock(&sched_hotcpu_mutex);
break;
} }
return NOTIFY_OK; return NOTIFY_OK;
} }
...@@ -6655,10 +6648,10 @@ static int arch_reinit_sched_domains(void) ...@@ -6655,10 +6648,10 @@ static int arch_reinit_sched_domains(void)
{ {
int err; int err;
mutex_lock(&sched_hotcpu_mutex); get_online_cpus();
detach_destroy_domains(&cpu_online_map); detach_destroy_domains(&cpu_online_map);
err = arch_init_sched_domains(&cpu_online_map); err = arch_init_sched_domains(&cpu_online_map);
mutex_unlock(&sched_hotcpu_mutex); put_online_cpus();
return err; return err;
} }
...@@ -6769,12 +6762,12 @@ void __init sched_init_smp(void) ...@@ -6769,12 +6762,12 @@ void __init sched_init_smp(void)
{ {
cpumask_t non_isolated_cpus; cpumask_t non_isolated_cpus;
mutex_lock(&sched_hotcpu_mutex); get_online_cpus();
arch_init_sched_domains(&cpu_online_map); arch_init_sched_domains(&cpu_online_map);
cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
if (cpus_empty(non_isolated_cpus)) if (cpus_empty(non_isolated_cpus))
cpu_set(smp_processor_id(), non_isolated_cpus); cpu_set(smp_processor_id(), non_isolated_cpus);
mutex_unlock(&sched_hotcpu_mutex); put_online_cpus();
/* XXX: Theoretical race here - CPU may be hotplugged now */ /* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier(update_sched_domains, 0); hotcpu_notifier(update_sched_domains, 0);
......
...@@ -67,9 +67,8 @@ struct workqueue_struct { ...@@ -67,9 +67,8 @@ struct workqueue_struct {
#endif #endif
}; };
/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove /* Serializes the accesses to the list of workqueues. */
threads to each one as cpus come/go. */ static DEFINE_SPINLOCK(workqueue_lock);
static DEFINE_MUTEX(workqueue_mutex);
static LIST_HEAD(workqueues); static LIST_HEAD(workqueues);
static int singlethread_cpu __read_mostly; static int singlethread_cpu __read_mostly;
...@@ -592,8 +591,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on); ...@@ -592,8 +591,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
* Returns zero on success. * Returns zero on success.
* Returns -ve errno on failure. * Returns -ve errno on failure.
* *
* Appears to be racy against CPU hotplug.
*
* schedule_on_each_cpu() is very slow. * schedule_on_each_cpu() is very slow.
*/ */
int schedule_on_each_cpu(work_func_t func) int schedule_on_each_cpu(work_func_t func)
...@@ -605,7 +602,7 @@ int schedule_on_each_cpu(work_func_t func) ...@@ -605,7 +602,7 @@ int schedule_on_each_cpu(work_func_t func)
if (!works) if (!works)
return -ENOMEM; return -ENOMEM;
preempt_disable(); /* CPU hotplug */ get_online_cpus();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
struct work_struct *work = per_cpu_ptr(works, cpu); struct work_struct *work = per_cpu_ptr(works, cpu);
...@@ -613,8 +610,8 @@ int schedule_on_each_cpu(work_func_t func) ...@@ -613,8 +610,8 @@ int schedule_on_each_cpu(work_func_t func)
set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
} }
preempt_enable();
flush_workqueue(keventd_wq); flush_workqueue(keventd_wq);
put_online_cpus();
free_percpu(works); free_percpu(works);
return 0; return 0;
} }
...@@ -750,8 +747,10 @@ struct workqueue_struct *__create_workqueue_key(const char *name, ...@@ -750,8 +747,10 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
err = create_workqueue_thread(cwq, singlethread_cpu); err = create_workqueue_thread(cwq, singlethread_cpu);
start_workqueue_thread(cwq, -1); start_workqueue_thread(cwq, -1);
} else { } else {
mutex_lock(&workqueue_mutex); get_online_cpus();
spin_lock(&workqueue_lock);
list_add(&wq->list, &workqueues); list_add(&wq->list, &workqueues);
spin_unlock(&workqueue_lock);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
cwq = init_cpu_workqueue(wq, cpu); cwq = init_cpu_workqueue(wq, cpu);
...@@ -760,7 +759,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, ...@@ -760,7 +759,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
err = create_workqueue_thread(cwq, cpu); err = create_workqueue_thread(cwq, cpu);
start_workqueue_thread(cwq, cpu); start_workqueue_thread(cwq, cpu);
} }
mutex_unlock(&workqueue_mutex); put_online_cpus();
} }
if (err) { if (err) {
...@@ -775,7 +774,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) ...@@ -775,7 +774,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{ {
/* /*
* Our caller is either destroy_workqueue() or CPU_DEAD, * Our caller is either destroy_workqueue() or CPU_DEAD,
* workqueue_mutex protects cwq->thread * get_online_cpus() protects cwq->thread.
*/ */
if (cwq->thread == NULL) if (cwq->thread == NULL)
return; return;
...@@ -810,9 +809,11 @@ void destroy_workqueue(struct workqueue_struct *wq) ...@@ -810,9 +809,11 @@ void destroy_workqueue(struct workqueue_struct *wq)
struct cpu_workqueue_struct *cwq; struct cpu_workqueue_struct *cwq;
int cpu; int cpu;
mutex_lock(&workqueue_mutex); get_online_cpus();
spin_lock(&workqueue_lock);
list_del(&wq->list); list_del(&wq->list);
mutex_unlock(&workqueue_mutex); spin_unlock(&workqueue_lock);
put_online_cpus();
for_each_cpu_mask(cpu, *cpu_map) { for_each_cpu_mask(cpu, *cpu_map) {
cwq = per_cpu_ptr(wq->cpu_wq, cpu); cwq = per_cpu_ptr(wq->cpu_wq, cpu);
...@@ -835,13 +836,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, ...@@ -835,13 +836,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
action &= ~CPU_TASKS_FROZEN; action &= ~CPU_TASKS_FROZEN;
switch (action) { switch (action) {
case CPU_LOCK_ACQUIRE:
mutex_lock(&workqueue_mutex);
return NOTIFY_OK;
case CPU_LOCK_RELEASE:
mutex_unlock(&workqueue_mutex);
return NOTIFY_OK;
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
cpu_set(cpu, cpu_populated_map); cpu_set(cpu, cpu_populated_map);
...@@ -854,7 +848,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, ...@@ -854,7 +848,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
if (!create_workqueue_thread(cwq, cpu)) if (!create_workqueue_thread(cwq, cpu))
break; break;
printk(KERN_ERR "workqueue for %i failed\n", cpu); printk(KERN_ERR "workqueue [%s] for %i failed\n",
wq->name, cpu);
return NOTIFY_BAD; return NOTIFY_BAD;
case CPU_ONLINE: case CPU_ONLINE:
......
...@@ -730,8 +730,7 @@ static inline void init_lock_keys(void) ...@@ -730,8 +730,7 @@ static inline void init_lock_keys(void)
#endif #endif
/* /*
* 1. Guard access to the cache-chain. * Guard access to the cache-chain.
* 2. Protect sanity of cpu_online_map against cpu hotplug events
*/ */
static DEFINE_MUTEX(cache_chain_mutex); static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain; static struct list_head cache_chain;
...@@ -1331,12 +1330,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, ...@@ -1331,12 +1330,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
int err = 0; int err = 0;
switch (action) { switch (action) {
case CPU_LOCK_ACQUIRE:
mutex_lock(&cache_chain_mutex);
break;
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN: case CPU_UP_PREPARE_FROZEN:
mutex_lock(&cache_chain_mutex);
err = cpuup_prepare(cpu); err = cpuup_prepare(cpu);
mutex_unlock(&cache_chain_mutex);
break; break;
case CPU_ONLINE: case CPU_ONLINE:
case CPU_ONLINE_FROZEN: case CPU_ONLINE_FROZEN:
...@@ -1373,9 +1371,8 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, ...@@ -1373,9 +1371,8 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
#endif #endif
case CPU_UP_CANCELED: case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN: case CPU_UP_CANCELED_FROZEN:
mutex_lock(&cache_chain_mutex);
cpuup_canceled(cpu); cpuup_canceled(cpu);
break;
case CPU_LOCK_RELEASE:
mutex_unlock(&cache_chain_mutex); mutex_unlock(&cache_chain_mutex);
break; break;
} }
...@@ -2170,6 +2167,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2170,6 +2167,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* We use cache_chain_mutex to ensure a consistent view of * We use cache_chain_mutex to ensure a consistent view of
* cpu_online_map as well. Please see cpuup_callback * cpu_online_map as well. Please see cpuup_callback
*/ */
get_online_cpus();
mutex_lock(&cache_chain_mutex); mutex_lock(&cache_chain_mutex);
list_for_each_entry(pc, &cache_chain, next) { list_for_each_entry(pc, &cache_chain, next) {
...@@ -2396,6 +2394,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2396,6 +2394,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
panic("kmem_cache_create(): failed to create slab `%s'\n", panic("kmem_cache_create(): failed to create slab `%s'\n",
name); name);
mutex_unlock(&cache_chain_mutex); mutex_unlock(&cache_chain_mutex);
put_online_cpus();
return cachep; return cachep;
} }
EXPORT_SYMBOL(kmem_cache_create); EXPORT_SYMBOL(kmem_cache_create);
...@@ -2547,9 +2546,11 @@ int kmem_cache_shrink(struct kmem_cache *cachep) ...@@ -2547,9 +2546,11 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
int ret; int ret;
BUG_ON(!cachep || in_interrupt()); BUG_ON(!cachep || in_interrupt());
get_online_cpus();
mutex_lock(&cache_chain_mutex); mutex_lock(&cache_chain_mutex);
ret = __cache_shrink(cachep); ret = __cache_shrink(cachep);
mutex_unlock(&cache_chain_mutex); mutex_unlock(&cache_chain_mutex);
put_online_cpus();
return ret; return ret;
} }
EXPORT_SYMBOL(kmem_cache_shrink); EXPORT_SYMBOL(kmem_cache_shrink);
...@@ -2575,6 +2576,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep) ...@@ -2575,6 +2576,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
BUG_ON(!cachep || in_interrupt()); BUG_ON(!cachep || in_interrupt());
/* Find the cache in the chain of caches. */ /* Find the cache in the chain of caches. */
get_online_cpus();
mutex_lock(&cache_chain_mutex); mutex_lock(&cache_chain_mutex);
/* /*
* the chain is never empty, cache_cache is never destroyed * the chain is never empty, cache_cache is never destroyed
...@@ -2584,6 +2586,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep) ...@@ -2584,6 +2586,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
slab_error(cachep, "Can't free all objects"); slab_error(cachep, "Can't free all objects");
list_add(&cachep->next, &cache_chain); list_add(&cachep->next, &cache_chain);
mutex_unlock(&cache_chain_mutex); mutex_unlock(&cache_chain_mutex);
put_online_cpus();
return; return;
} }
...@@ -2592,6 +2595,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep) ...@@ -2592,6 +2595,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
__kmem_cache_destroy(cachep); __kmem_cache_destroy(cachep);
mutex_unlock(&cache_chain_mutex); mutex_unlock(&cache_chain_mutex);
put_online_cpus();
} }
EXPORT_SYMBOL(kmem_cache_destroy); EXPORT_SYMBOL(kmem_cache_destroy);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment