Commit 5d790751 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] lock_cpu_hotplug only if CONFIG_CPU_HOTPLUG

From: Rusty Russell <rusty@rustcorp.com.au>

The cpucontrol mutex is not required when no cpus can go up and down.
Andrew wrote a wrapper for it to avoid #ifdefs, this expands that to only
be defined for CONFIG_HOTPLUG_CPU, and uses it everywhere.

The only downside is that the cpucontrol lock was overloaded by my recent
patch to net/core/flow.c to protect it from reentrance, so this
reintroduces the local flow_flush_sem.  This code isn't speed critical, so
taking two locks when CONFIG_HOTPLUG_CPU=y is not really an issue.
parent 93a2d85f
......@@ -38,9 +38,6 @@ extern void unregister_cpu_notifier(struct notifier_block *nb);
int cpu_up(unsigned int cpu);
#define lock_cpu_hotplug() down(&cpucontrol)
#define unlock_cpu_hotplug() up(&cpucontrol)
#else
static inline int register_cpu_notifier(struct notifier_block *nb)
......@@ -51,12 +48,17 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
#define lock_cpu_hotplug() do { } while (0)
#define unlock_cpu_hotplug() do { } while (0)
#endif /* CONFIG_SMP */
extern struct sysdev_class cpu_sysdev_class;
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
extern struct semaphore cpucontrol;
#define lock_cpu_hotplug() down(&cpucontrol)
#define unlock_cpu_hotplug() up(&cpucontrol)
#else
#define lock_cpu_hotplug() do { } while (0)
#define unlock_cpu_hotplug() do { } while (0)
#endif
#endif /* _LINUX_CPU_H_ */
......@@ -554,7 +554,7 @@ static int stop_refcounts(void)
stopref_state = STOPREF_WAIT;
/* No CPUs can come up or down during this. */
down(&cpucontrol);
lock_cpu_hotplug();
for (i = 0; i < NR_CPUS; i++) {
if (i == cpu || !cpu_online(i))
......@@ -572,7 +572,7 @@ static int stop_refcounts(void)
/* If some failed, kill them all. */
if (ret < 0) {
stopref_set_state(STOPREF_EXIT, 1);
up(&cpucontrol);
unlock_cpu_hotplug();
return ret;
}
......@@ -595,7 +595,7 @@ static void restart_refcounts(void)
stopref_set_state(STOPREF_EXIT, 0);
local_irq_enable();
preempt_enable();
up(&cpucontrol);
unlock_cpu_hotplug();
}
#else /* ...!SMP */
static inline int stop_refcounts(void)
......
......@@ -283,10 +283,11 @@ static void flow_cache_flush_per_cpu(void *data)
void flow_cache_flush(void)
{
struct flow_flush_info info;
static DECLARE_MUTEX(flow_flush_sem);
/* Don't want cpus going down or up during this, also protects
* against multiple callers. */
/* Don't want cpus going down or up during this. */
lock_cpu_hotplug();
down(&flow_flush_sem);
atomic_set(&info.cpuleft, num_online_cpus());
init_completion(&info.completion);
......@@ -296,6 +297,7 @@ void flow_cache_flush(void)
local_bh_enable();
wait_for_completion(&info.completion);
up(&flow_flush_sem);
unlock_cpu_hotplug();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment