Commit 05f1dd53 authored by Jens Axboe's avatar Jens Axboe

block: add queue flag for disabling SG merging

If devices are not SG starved, we waste a lot of time potentially
collapsing SG segments. Enough that 1.5% of the CPU time goes
to this, at only 400K IOPS. Add a queue flag, QUEUE_FLAG_NO_SG_MERGE,
which just returns the number of vectors in a bio instead of looping
over all segments and checking for collapsible ones.

Add a BLK_MQ_F_SG_MERGE flag so that drivers can opt-in on the sg
merging, if they so desire.
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 4d92a9be
...@@ -13,7 +13,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -13,7 +13,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio) struct bio *bio)
{ {
struct bio_vec bv, bvprv = { NULL }; struct bio_vec bv, bvprv = { NULL };
int cluster, high, highprv = 1; int cluster, high, highprv = 1, no_sg_merge;
unsigned int seg_size, nr_phys_segs; unsigned int seg_size, nr_phys_segs;
struct bio *fbio, *bbio; struct bio *fbio, *bbio;
struct bvec_iter iter; struct bvec_iter iter;
...@@ -35,12 +35,21 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -35,12 +35,21 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
cluster = blk_queue_cluster(q); cluster = blk_queue_cluster(q);
seg_size = 0; seg_size = 0;
nr_phys_segs = 0; nr_phys_segs = 0;
no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
high = 0;
for_each_bio(bio) { for_each_bio(bio) {
bio_for_each_segment(bv, bio, iter) { bio_for_each_segment(bv, bio, iter) {
/*
* If SG merging is disabled, each bio vector is
* a segment
*/
if (no_sg_merge)
goto new_segment;
/* /*
* the trick here is making sure that a high page is * the trick here is making sure that a high page is
* never considered part of another segment, since that * never considered part of another segment, since
* might change with the bounce page. * that might change with the bounce page.
*/ */
high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
if (!high && !highprv && cluster) { if (!high && !highprv && cluster) {
...@@ -84,11 +93,16 @@ void blk_recalc_rq_segments(struct request *rq) ...@@ -84,11 +93,16 @@ void blk_recalc_rq_segments(struct request *rq)
void blk_recount_segments(struct request_queue *q, struct bio *bio) void blk_recount_segments(struct request_queue *q, struct bio *bio)
{ {
if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags))
bio->bi_phys_segments = bio->bi_vcnt;
else {
struct bio *nxt = bio->bi_next; struct bio *nxt = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
bio->bi_next = nxt; bio->bi_next = nxt;
}
bio->bi_flags |= (1 << BIO_SEG_VALID); bio->bi_flags |= (1 << BIO_SEG_VALID);
} }
EXPORT_SYMBOL(blk_recount_segments); EXPORT_SYMBOL(blk_recount_segments);
......
...@@ -1829,6 +1829,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) ...@@ -1829,6 +1829,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
q->mq_ops = set->ops; q->mq_ops = set->ops;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
if (!(set->flags & BLK_MQ_F_SG_MERGE))
q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
q->sg_reserved_size = INT_MAX; q->sg_reserved_size = INT_MAX;
INIT_WORK(&q->requeue_work, blk_mq_requeue_work); INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
......
...@@ -129,6 +129,7 @@ enum { ...@@ -129,6 +129,7 @@ enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_SHOULD_SORT = 1 << 1, BLK_MQ_F_SHOULD_SORT = 1 << 1,
BLK_MQ_F_TAG_SHARED = 1 << 2, BLK_MQ_F_TAG_SHARED = 1 << 2,
BLK_MQ_F_SG_MERGE = 1 << 3,
BLK_MQ_S_STOPPED = 0, BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_S_TAG_ACTIVE = 1,
......
...@@ -510,6 +510,7 @@ struct request_queue { ...@@ -510,6 +510,7 @@ struct request_queue {
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \ (1 << QUEUE_FLAG_STACKABLE) | \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment