Commit 52cc6eea authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: blk-merge: fast-clone bio when splitting rw bios

biovecs has become immutable since v3.13, so it isn't necessary
to allocate biovecs for the new cloned bios, then we can save
one extra biovecs allocation/copy, and the allocation is often
not fixed-length and a bit more expensive.

For example, if the 'max_sectors_kb' of null blk's queue is set
as 16(32 sectors) via sysfs just for making more splits, this patch
can increase throught about ~70% in the sequential read test over
null_blk(direct io, bs: 1M).

Cc: Christoph Hellwig <hch@infradead.org>
Cc: Kent Overstreet <kent.overstreet@gmail.com>
Cc: Ming Lin <ming.l@ssi.samsung.com>
Cc: Dongsu Park <dpark@posteo.net>
Signed-off-by: default avatarMing Lei <ming.lei@canonical.com>

This fixes a performance regression introduced by commit 54efd50b,
and allows us to take full advantage of the fact that we have immutable
bio_vecs. Hand applied, as it rejected violently with commit
5014c311.
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 6fe810bd
...@@ -66,15 +66,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -66,15 +66,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
struct bio *bio, struct bio *bio,
struct bio_set *bs) struct bio_set *bs)
{ {
struct bio *split;
struct bio_vec bv, bvprv, *bvprvp = NULL; struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter; struct bvec_iter iter;
unsigned seg_size = 0, nsegs = 0, sectors = 0; unsigned seg_size = 0, nsegs = 0, sectors = 0;
bio_for_each_segment(bv, bio, iter) { bio_for_each_segment(bv, bio, iter) {
sectors += bv.bv_len >> 9; if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
if (sectors > queue_max_sectors(q))
goto split; goto split;
/* /*
...@@ -95,6 +92,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -95,6 +92,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
seg_size += bv.bv_len; seg_size += bv.bv_len;
bvprv = bv; bvprv = bv;
bvprvp = &bv; bvprvp = &bv;
sectors += bv.bv_len >> 9;
continue; continue;
} }
new_segment: new_segment:
...@@ -105,21 +103,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -105,21 +103,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bvprv = bv; bvprv = bv;
bvprvp = &bv; bvprvp = &bv;
seg_size = bv.bv_len; seg_size = bv.bv_len;
sectors += bv.bv_len >> 9;
} }
return NULL; return NULL;
split: split:
split = bio_clone_bioset(bio, GFP_NOIO, bs); return bio_split(bio, sectors, GFP_NOIO, bs);
split->bi_iter.bi_size -= iter.bi_size;
bio->bi_iter = iter;
if (bio_integrity(bio)) {
bio_integrity_advance(bio, split->bi_iter.bi_size);
bio_integrity_trim(split, 0, bio_sectors(split));
}
return split;
} }
void blk_queue_split(struct request_queue *q, struct bio **bio, void blk_queue_split(struct request_queue *q, struct bio **bio,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment