Commit cd6f6b70 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Greg Kroah-Hartman

md/raid5: Fix CPU hotplug callback registration

commit 789b5e03 upstream.

Subsystems that want to register CPU hotplug callbacks, as well as perform
initialization for the CPUs that are already online, often do it as shown
below:

	get_online_cpus();

	for_each_online_cpu(cpu)
		init_cpu(cpu);

	register_cpu_notifier(&foobar_cpu_notifier);

	put_online_cpus();

This is wrong, since it is prone to ABBA deadlocks involving the
cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently
with CPU hotplug operations).

Interestingly, the raid5 code can actually prevent double initialization and
hence can use the following simplified form of callback registration:

	register_cpu_notifier(&foobar_cpu_notifier);

	get_online_cpus();

	for_each_online_cpu(cpu)
		init_cpu(cpu);

	put_online_cpus();

A hotplug operation that occurs between registering the notifier and calling
get_online_cpus(), won't disrupt anything, because the code takes care to
perform the memory allocations only once.

So reorganize the code in raid5 this way to fix the deadlock with callback
registration.

Cc: linux-raid@vger.kernel.org
Fixes: 36d1c647Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
[Srivatsa: Fixed the unregister_cpu_notifier() deadlock, added the
free_scratch_buffer() helper to condense code further and wrote the changelog.]
Signed-off-by: default avatarSrivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 5a0d43c3
...@@ -4678,23 +4678,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) ...@@ -4678,23 +4678,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
return sectors * (raid_disks - conf->max_degraded); return sectors * (raid_disks - conf->max_degraded);
} }
static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
{
safe_put_page(percpu->spare_page);
kfree(percpu->scribble);
percpu->spare_page = NULL;
percpu->scribble = NULL;
}
static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
{
if (conf->level == 6 && !percpu->spare_page)
percpu->spare_page = alloc_page(GFP_KERNEL);
if (!percpu->scribble)
percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
free_scratch_buffer(conf, percpu);
return -ENOMEM;
}
return 0;
}
static void raid5_free_percpu(struct r5conf *conf) static void raid5_free_percpu(struct r5conf *conf)
{ {
struct raid5_percpu *percpu;
unsigned long cpu; unsigned long cpu;
if (!conf->percpu) if (!conf->percpu)
return; return;
get_online_cpus();
for_each_possible_cpu(cpu) {
percpu = per_cpu_ptr(conf->percpu, cpu);
safe_put_page(percpu->spare_page);
kfree(percpu->scribble);
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&conf->cpu_notify); unregister_cpu_notifier(&conf->cpu_notify);
#endif #endif
get_online_cpus();
for_each_possible_cpu(cpu)
free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
put_online_cpus(); put_online_cpus();
free_percpu(conf->percpu); free_percpu(conf->percpu);
...@@ -4720,15 +4740,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, ...@@ -4720,15 +4740,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
switch (action) { switch (action) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN: case CPU_UP_PREPARE_FROZEN:
if (conf->level == 6 && !percpu->spare_page) if (alloc_scratch_buffer(conf, percpu)) {
percpu->spare_page = alloc_page(GFP_KERNEL);
if (!percpu->scribble)
percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
if (!percpu->scribble ||
(conf->level == 6 && !percpu->spare_page)) {
safe_put_page(percpu->spare_page);
kfree(percpu->scribble);
pr_err("%s: failed memory allocation for cpu%ld\n", pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu); __func__, cpu);
return notifier_from_errno(-ENOMEM); return notifier_from_errno(-ENOMEM);
...@@ -4736,10 +4748,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, ...@@ -4736,10 +4748,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
break; break;
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
safe_put_page(percpu->spare_page); free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
kfree(percpu->scribble);
percpu->spare_page = NULL;
percpu->scribble = NULL;
break; break;
default: default:
break; break;
...@@ -4751,40 +4760,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, ...@@ -4751,40 +4760,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
static int raid5_alloc_percpu(struct r5conf *conf) static int raid5_alloc_percpu(struct r5conf *conf)
{ {
unsigned long cpu; unsigned long cpu;
struct page *spare_page; int err = 0;
struct raid5_percpu __percpu *allcpus;
void *scribble;
int err;
allcpus = alloc_percpu(struct raid5_percpu); conf->percpu = alloc_percpu(struct raid5_percpu);
if (!allcpus) if (!conf->percpu)
return -ENOMEM; return -ENOMEM;
conf->percpu = allcpus;
get_online_cpus();
err = 0;
for_each_present_cpu(cpu) {
if (conf->level == 6) {
spare_page = alloc_page(GFP_KERNEL);
if (!spare_page) {
err = -ENOMEM;
break;
}
per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
}
scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
if (!scribble) {
err = -ENOMEM;
break;
}
per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
conf->cpu_notify.notifier_call = raid456_cpu_notify; conf->cpu_notify.notifier_call = raid456_cpu_notify;
conf->cpu_notify.priority = 0; conf->cpu_notify.priority = 0;
if (err == 0)
err = register_cpu_notifier(&conf->cpu_notify); err = register_cpu_notifier(&conf->cpu_notify);
if (err)
return err;
#endif #endif
get_online_cpus();
for_each_present_cpu(cpu) {
err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
if (err) {
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
break;
}
}
put_online_cpus(); put_online_cpus();
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment