Commit 6869875f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: remove the bi_seg_{front,back}_size fields in struct bio

At this point these fields aren't used for anything, so we can remove
them.
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 200a9aff
...@@ -162,8 +162,7 @@ static unsigned get_max_segment_size(struct request_queue *q, ...@@ -162,8 +162,7 @@ static unsigned get_max_segment_size(struct request_queue *q,
* variables. * variables.
*/ */
static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv, static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
unsigned *nsegs, unsigned *last_seg_size, unsigned *nsegs, unsigned *sectors, unsigned max_segs)
unsigned *front_seg_size, unsigned *sectors, unsigned max_segs)
{ {
unsigned len = bv->bv_len; unsigned len = bv->bv_len;
unsigned total_len = 0; unsigned total_len = 0;
...@@ -185,28 +184,12 @@ static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv, ...@@ -185,28 +184,12 @@ static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
break; break;
} }
if (!new_nsegs) if (new_nsegs) {
return !!len; *nsegs += new_nsegs;
if (sectors)
/* update front segment size */ *sectors += total_len >> 9;
if (!*nsegs) {
unsigned first_seg_size;
if (new_nsegs == 1)
first_seg_size = get_max_segment_size(q, bv->bv_offset);
else
first_seg_size = queue_max_segment_size(q);
if (*front_seg_size < first_seg_size)
*front_seg_size = first_seg_size;
} }
/* update other varibles */
*last_seg_size = seg_size;
*nsegs += new_nsegs;
if (sectors)
*sectors += total_len >> 9;
/* split in the middle of the bvec if len != 0 */ /* split in the middle of the bvec if len != 0 */
return !!len; return !!len;
} }
...@@ -218,8 +201,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -218,8 +201,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
{ {
struct bio_vec bv, bvprv, *bvprvp = NULL; struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter; struct bvec_iter iter;
unsigned seg_size = 0, nsegs = 0, sectors = 0; unsigned nsegs = 0, sectors = 0;
unsigned front_seg_size = bio->bi_seg_front_size;
bool do_split = true; bool do_split = true;
struct bio *new = NULL; struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio); const unsigned max_sectors = get_max_io_size(q, bio);
...@@ -243,8 +225,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -243,8 +225,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
/* split in the middle of bvec */ /* split in the middle of bvec */
bv.bv_len = (max_sectors - sectors) << 9; bv.bv_len = (max_sectors - sectors) << 9;
bvec_split_segs(q, &bv, &nsegs, bvec_split_segs(q, &bv, &nsegs,
&seg_size,
&front_seg_size,
&sectors, max_segs); &sectors, max_segs);
} }
goto split; goto split;
...@@ -258,12 +238,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -258,12 +238,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) { if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
nsegs++; nsegs++;
seg_size = bv.bv_len;
sectors += bv.bv_len >> 9; sectors += bv.bv_len >> 9;
if (nsegs == 1 && seg_size > front_seg_size) } else if (bvec_split_segs(q, &bv, &nsegs, &sectors,
front_seg_size = seg_size; max_segs)) {
} else if (bvec_split_segs(q, &bv, &nsegs, &seg_size,
&front_seg_size, &sectors, max_segs)) {
goto split; goto split;
} }
} }
...@@ -278,10 +255,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -278,10 +255,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bio = new; bio = new;
} }
bio->bi_seg_front_size = front_seg_size;
if (seg_size > bio->bi_seg_back_size)
bio->bi_seg_back_size = seg_size;
return do_split ? new : NULL; return do_split ? new : NULL;
} }
...@@ -336,17 +309,13 @@ EXPORT_SYMBOL(blk_queue_split); ...@@ -336,17 +309,13 @@ EXPORT_SYMBOL(blk_queue_split);
static unsigned int __blk_recalc_rq_segments(struct request_queue *q, static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio) struct bio *bio)
{ {
struct bio_vec uninitialized_var(bv), bvprv = { NULL }; unsigned int nr_phys_segs = 0;
unsigned int seg_size, nr_phys_segs;
unsigned front_seg_size;
struct bio *fbio, *bbio;
struct bvec_iter iter; struct bvec_iter iter;
struct bio_vec bv;
if (!bio) if (!bio)
return 0; return 0;
front_seg_size = bio->bi_seg_front_size;
switch (bio_op(bio)) { switch (bio_op(bio)) {
case REQ_OP_DISCARD: case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE: case REQ_OP_SECURE_ERASE:
...@@ -356,23 +325,11 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -356,23 +325,11 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
return 1; return 1;
} }
fbio = bio;
seg_size = 0;
nr_phys_segs = 0;
for_each_bio(bio) { for_each_bio(bio) {
bio_for_each_bvec(bv, bio, iter) { bio_for_each_bvec(bv, bio, iter)
bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size, bvec_split_segs(q, &bv, &nr_phys_segs, NULL, UINT_MAX);
&front_seg_size, NULL, UINT_MAX);
}
bbio = bio;
if (likely(bio->bi_iter.bi_size))
bvprv = bv;
} }
fbio->bi_seg_front_size = front_seg_size;
if (seg_size > bbio->bi_seg_back_size)
bbio->bi_seg_back_size = seg_size;
return nr_phys_segs; return nr_phys_segs;
} }
...@@ -392,24 +349,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio) ...@@ -392,24 +349,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
bio_set_flag(bio, BIO_SEG_VALID); bio_set_flag(bio, BIO_SEG_VALID);
} }
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
struct bio_vec end_bv = { NULL }, nxt_bv;
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
queue_max_segment_size(q))
return 0;
if (!bio_has_data(bio))
return 1;
bio_get_last_bvec(bio, &end_bv);
bio_get_first_bvec(nxt, &nxt_bv);
return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
}
static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
struct scatterlist *sglist) struct scatterlist *sglist)
{ {
...@@ -669,8 +608,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, ...@@ -669,8 +608,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
struct request *next) struct request *next)
{ {
int total_phys_segments; int total_phys_segments;
unsigned int seg_size =
req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
if (req_gap_back_merge(req, next->bio)) if (req_gap_back_merge(req, next->bio))
return 0; return 0;
...@@ -683,13 +620,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, ...@@ -683,13 +620,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
return 0; return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
if (req->nr_phys_segments == 1)
req->bio->bi_seg_front_size = seg_size;
if (next->nr_phys_segments == 1)
next->biotail->bi_seg_back_size = seg_size;
}
if (total_phys_segments > queue_max_segments(q)) if (total_phys_segments > queue_max_segments(q))
return 0; return 0;
......
...@@ -159,13 +159,6 @@ struct bio { ...@@ -159,13 +159,6 @@ struct bio {
*/ */
unsigned int bi_phys_segments; unsigned int bi_phys_segments;
/*
* To keep track of the max segment size, we account for the
* sizes of the first and last mergeable segments in this bio.
*/
unsigned int bi_seg_front_size;
unsigned int bi_seg_back_size;
struct bvec_iter bi_iter; struct bvec_iter bi_iter;
atomic_t __bi_remaining; atomic_t __bi_remaining;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment