Commit 7ef6b12a authored by Ming Lin's avatar Ming Lin Committed by Jens Axboe

md/raid5: split bio for chunk_aligned_read

If a read request fits entirely in a chunk, it will be passed directly to the
underlying device (providing it hasn't failed of course).  If it doesn't fit,
the slightly less efficient path that uses the stripe_cache is used.
Requests that get to the stripe cache are always completely split up as
necessary.

So with RAID5, ripping out the merge_bvec_fn doesn't cause it to stop work,
but could cause it to take the less efficient path more often.

All that is needed to manage this is for 'chunk_aligned_read' do some bio
splitting, much like the RAID0 code does.

Cc: Neil Brown <neilb@suse.de>
Cc: linux-raid@vger.kernel.org
Acked-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarMing Lin <ming.l@ssi.samsung.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent b49a0871
...@@ -4800,7 +4800,7 @@ static int bio_fits_rdev(struct bio *bi) ...@@ -4800,7 +4800,7 @@ static int bio_fits_rdev(struct bio *bi)
return 1; return 1;
} }
static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
{ {
struct r5conf *conf = mddev->private; struct r5conf *conf = mddev->private;
int dd_idx; int dd_idx;
...@@ -4809,7 +4809,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) ...@@ -4809,7 +4809,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
sector_t end_sector; sector_t end_sector;
if (!in_chunk_boundary(mddev, raid_bio)) { if (!in_chunk_boundary(mddev, raid_bio)) {
pr_debug("chunk_aligned_read : non aligned\n"); pr_debug("%s: non aligned\n", __func__);
return 0; return 0;
} }
/* /*
...@@ -4886,6 +4886,31 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) ...@@ -4886,6 +4886,31 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
} }
} }
static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
{
struct bio *split;
do {
sector_t sector = raid_bio->bi_iter.bi_sector;
unsigned chunk_sects = mddev->chunk_sectors;
unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
if (sectors < bio_sectors(raid_bio)) {
split = bio_split(raid_bio, sectors, GFP_NOIO, fs_bio_set);
bio_chain(split, raid_bio);
} else
split = raid_bio;
if (!raid5_read_one_chunk(mddev, split)) {
if (split != raid_bio)
generic_make_request(raid_bio);
return split;
}
} while (split != raid_bio);
return NULL;
}
/* __get_priority_stripe - get the next stripe to process /* __get_priority_stripe - get the next stripe to process
* *
* Full stripe writes are allowed to pass preread active stripes up until * Full stripe writes are allowed to pass preread active stripes up until
...@@ -5163,9 +5188,11 @@ static void make_request(struct mddev *mddev, struct bio * bi) ...@@ -5163,9 +5188,11 @@ static void make_request(struct mddev *mddev, struct bio * bi)
* data on failed drives. * data on failed drives.
*/ */
if (rw == READ && mddev->degraded == 0 && if (rw == READ && mddev->degraded == 0 &&
mddev->reshape_position == MaxSector && mddev->reshape_position == MaxSector) {
chunk_aligned_read(mddev,bi)) bi = chunk_aligned_read(mddev, bi);
return; if (!bi)
return;
}
if (unlikely(bi->bi_rw & REQ_DISCARD)) { if (unlikely(bi->bi_rw & REQ_DISCARD)) {
make_discard_request(mddev, bi); make_discard_request(mddev, bi);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment