Commit 208410b5 authored by Shaohua Li's avatar Shaohua Li

md/raid1/10: reset bio allocated from mempool

Data allocated from mempool doesn't always get initialized, this happens when
the data is reused instead of fresh allocation. In the raid1/10 case, we must
reinitialize the bios.
Reported-by: default avatarJonathan G. Underwood <jonathan.underwood@gmail.com>
Fixes: f0250618(md: raid10: don't use bio's vec table to manage resync pages)
Fixes: 98d30c58(md: raid1: don't use bio's vec table to manage resync pages)
Cc: stable@vger.kernel.org (4.12+)
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: default avatarShaohua Li <shli@fb.com>
parent 9c72a18e
...@@ -2564,6 +2564,23 @@ static int init_resync(struct r1conf *conf) ...@@ -2564,6 +2564,23 @@ static int init_resync(struct r1conf *conf)
return 0; return 0;
} }
static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
{
struct r1bio *r1bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
struct resync_pages *rps;
struct bio *bio;
int i;
for (i = conf->poolinfo->raid_disks; i--; ) {
bio = r1bio->bios[i];
rps = bio->bi_private;
bio_reset(bio);
bio->bi_private = rps;
}
r1bio->master_bio = NULL;
return r1bio;
}
/* /*
* perform a "sync" on one "block" * perform a "sync" on one "block"
* *
...@@ -2649,7 +2666,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2649,7 +2666,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bitmap_cond_end_sync(mddev->bitmap, sector_nr, bitmap_cond_end_sync(mddev->bitmap, sector_nr,
mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); r1_bio = raid1_alloc_init_r1buf(conf);
raise_barrier(conf, sector_nr); raise_barrier(conf, sector_nr);
......
...@@ -2798,6 +2798,35 @@ static int init_resync(struct r10conf *conf) ...@@ -2798,6 +2798,35 @@ static int init_resync(struct r10conf *conf)
return 0; return 0;
} }
static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
{
struct r10bio *r10bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
struct rsync_pages *rp;
struct bio *bio;
int nalloc;
int i;
if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
nalloc = conf->copies; /* resync */
else
nalloc = 2; /* recovery */
for (i = 0; i < nalloc; i++) {
bio = r10bio->devs[i].bio;
rp = bio->bi_private;
bio_reset(bio);
bio->bi_private = rp;
bio = r10bio->devs[i].repl_bio;
if (bio) {
rp = bio->bi_private;
bio_reset(bio);
bio->bi_private = rp;
}
}
return r10bio;
}
/* /*
* perform a "sync" on one "block" * perform a "sync" on one "block"
* *
...@@ -3027,7 +3056,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3027,7 +3056,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
atomic_inc(&mreplace->nr_pending); atomic_inc(&mreplace->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); r10_bio = raid10_alloc_init_r10buf(conf);
r10_bio->state = 0; r10_bio->state = 0;
raise_barrier(conf, rb2 != NULL); raise_barrier(conf, rb2 != NULL);
atomic_set(&r10_bio->remaining, 0); atomic_set(&r10_bio->remaining, 0);
...@@ -3236,7 +3265,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3236,7 +3265,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
} }
if (sync_blocks < max_sync) if (sync_blocks < max_sync)
max_sync = sync_blocks; max_sync = sync_blocks;
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); r10_bio = raid10_alloc_init_r10buf(conf);
r10_bio->state = 0; r10_bio->state = 0;
r10_bio->mddev = mddev; r10_bio->mddev = mddev;
...@@ -4360,7 +4389,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, ...@@ -4360,7 +4389,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
read_more: read_more:
/* Now schedule reads for blocks from sector_nr to last */ /* Now schedule reads for blocks from sector_nr to last */
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); r10_bio = raid10_alloc_init_r10buf(conf);
r10_bio->state = 0; r10_bio->state = 0;
raise_barrier(conf, sectors_done != 0); raise_barrier(conf, sectors_done != 0);
atomic_set(&r10_bio->remaining, 0); atomic_set(&r10_bio->remaining, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment