Commit ba54d4d4 authored by Coly Li's avatar Coly Li Committed by Song Liu

raid5: remove gfp flags from scribble_alloc()

Using GFP_NOIO flag to call scribble_alloc() from resize_chunk() does
not have the expected behavior. kvmalloc_array() inside scribble_alloc()
which receives the GFP_NOIO flag will eventually call kmalloc_node() to
allocate physically continuous pages.

Now we have memalloc scope APIs in mddev_suspend()/mddev_resume() to
prevent memory reclaim I/Os during raid array suspend context, calling
to kvmalloc_array() with GFP_KERNEL flag may avoid deadlock of recursive
I/O as expected.

This patch removes the useless gfp flags from parameters list of
scribble_alloc(), and call kvmalloc_array() with GFP_KERNEL flag. The
incorrect GFP_NOIO flag does not exist anymore.

Fixes: b330e6a4 ("md: convert to kvmalloc")
Suggested-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarColy Li <colyli@suse.de>
Signed-off-by: default avatarSong Liu <songliubraving@fb.com>
parent 78f57ef9
...@@ -2228,14 +2228,19 @@ static int grow_stripes(struct r5conf *conf, int num) ...@@ -2228,14 +2228,19 @@ static int grow_stripes(struct r5conf *conf, int num)
* of the P and Q blocks. * of the P and Q blocks.
*/ */
static int scribble_alloc(struct raid5_percpu *percpu, static int scribble_alloc(struct raid5_percpu *percpu,
int num, int cnt, gfp_t flags) int num, int cnt)
{ {
size_t obj_size = size_t obj_size =
sizeof(struct page *) * (num+2) + sizeof(struct page *) * (num+2) +
sizeof(addr_conv_t) * (num+2); sizeof(addr_conv_t) * (num+2);
void *scribble; void *scribble;
scribble = kvmalloc_array(cnt, obj_size, flags); /*
* If here is in raid array suspend context, it is in memalloc noio
* context as well, there is no potential recursive memory reclaim
* I/Os with the GFP_KERNEL flag.
*/
scribble = kvmalloc_array(cnt, obj_size, GFP_KERNEL);
if (!scribble) if (!scribble)
return -ENOMEM; return -ENOMEM;
...@@ -2267,8 +2272,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) ...@@ -2267,8 +2272,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
percpu = per_cpu_ptr(conf->percpu, cpu); percpu = per_cpu_ptr(conf->percpu, cpu);
err = scribble_alloc(percpu, new_disks, err = scribble_alloc(percpu, new_disks,
new_sectors / STRIPE_SECTORS, new_sectors / STRIPE_SECTORS);
GFP_NOIO);
if (err) if (err)
break; break;
} }
...@@ -6759,8 +6763,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu ...@@ -6759,8 +6763,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
conf->previous_raid_disks), conf->previous_raid_disks),
max(conf->chunk_sectors, max(conf->chunk_sectors,
conf->prev_chunk_sectors) conf->prev_chunk_sectors)
/ STRIPE_SECTORS, / STRIPE_SECTORS)) {
GFP_KERNEL)) {
free_scratch_buffer(conf, percpu); free_scratch_buffer(conf, percpu);
return -ENOMEM; return -ENOMEM;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment