Commit 900e0807 authored by Jens Axboe's avatar Jens Axboe

block: move queue enter logic into blk_mq_submit_bio()

Retain the old logic for the fops based submit, but for our internal
blk_mq_submit_bio(), move the queue entering logic into the core
function itself.

We need to be a bit careful if going into the scheduler, as a scheduler
or queue mappings can arbitrarily change before we have entered the queue.
Have the bio scheduler mapping do that separately, it's a very cheap
operation compared to actually doing merging locking and lookups.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
[axboe: update to check merge post submit_bio_checks() doing remap...]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c98cb5bb
...@@ -744,7 +744,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q, ...@@ -744,7 +744,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
return BLK_STS_OK; return BLK_STS_OK;
} }
static noinline_for_stack bool submit_bio_checks(struct bio *bio) noinline_for_stack bool submit_bio_checks(struct bio *bio)
{ {
struct block_device *bdev = bio->bi_bdev; struct block_device *bdev = bio->bi_bdev;
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
...@@ -862,24 +862,25 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio) ...@@ -862,24 +862,25 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
return false; return false;
} }
static void __submit_bio(struct bio *bio) static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
{ {
struct gendisk *disk = bio->bi_bdev->bd_disk;
if (unlikely(bio_queue_enter(bio) != 0)) if (unlikely(bio_queue_enter(bio) != 0))
return; return;
if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio))
if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
goto queue_exit;
if (!disk->fops->submit_bio) {
blk_mq_submit_bio(bio);
return;
}
disk->fops->submit_bio(bio); disk->fops->submit_bio(bio);
queue_exit:
blk_queue_exit(disk->queue); blk_queue_exit(disk->queue);
} }
static void __submit_bio(struct bio *bio)
{
struct gendisk *disk = bio->bi_bdev->bd_disk;
if (!disk->fops->submit_bio)
blk_mq_submit_bio(bio);
else
__submit_bio_fops(disk, bio);
}
/* /*
* The loop in this function may be a bit non-obvious, and so deserves some * The loop in this function may be a bit non-obvious, and so deserves some
* explanation: * explanation:
......
...@@ -370,15 +370,20 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, ...@@ -370,15 +370,20 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
bool ret = false; bool ret = false;
enum hctx_type type; enum hctx_type type;
if (e && e->type->ops.bio_merge) if (bio_queue_enter(bio))
return e->type->ops.bio_merge(q, bio, nr_segs); return false;
if (e && e->type->ops.bio_merge) {
ret = e->type->ops.bio_merge(q, bio, nr_segs);
goto out_put;
}
ctx = blk_mq_get_ctx(q); ctx = blk_mq_get_ctx(q);
hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
type = hctx->type; type = hctx->type;
if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
list_empty_careful(&ctx->rq_lists[type])) list_empty_careful(&ctx->rq_lists[type]))
return false; goto out_put;
/* default per sw-queue merge */ /* default per sw-queue merge */
spin_lock(&ctx->lock); spin_lock(&ctx->lock);
...@@ -391,6 +396,8 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, ...@@ -391,6 +396,8 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
ret = true; ret = true;
spin_unlock(&ctx->lock); spin_unlock(&ctx->lock);
out_put:
blk_queue_exit(q);
return ret; return ret;
} }
......
...@@ -2478,9 +2478,23 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) ...@@ -2478,9 +2478,23 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
return BLK_MAX_REQUEST_COUNT; return BLK_MAX_REQUEST_COUNT;
} }
static bool blk_attempt_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs, bool *same_queue_rq)
{
if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
if (blk_attempt_plug_merge(q, bio, nr_segs, same_queue_rq))
return true;
if (blk_mq_sched_bio_merge(q, bio, nr_segs))
return true;
}
return false;
}
static struct request *blk_mq_get_new_requests(struct request_queue *q, static struct request *blk_mq_get_new_requests(struct request_queue *q,
struct blk_plug *plug, struct blk_plug *plug,
struct bio *bio) struct bio *bio,
unsigned int nsegs,
bool *same_queue_rq)
{ {
struct blk_mq_alloc_data data = { struct blk_mq_alloc_data data = {
.q = q, .q = q,
...@@ -2489,6 +2503,15 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, ...@@ -2489,6 +2503,15 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
}; };
struct request *rq; struct request *rq;
if (unlikely(bio_queue_enter(bio)))
return NULL;
if (unlikely(!submit_bio_checks(bio)))
goto put_exit;
if (blk_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
goto put_exit;
rq_qos_throttle(q, bio);
if (plug) { if (plug) {
data.nr_tags = plug->nr_ios; data.nr_tags = plug->nr_ios;
plug->nr_ios = 1; plug->nr_ios = 1;
...@@ -2502,25 +2525,34 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, ...@@ -2502,25 +2525,34 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq_qos_cleanup(q, bio); rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT) if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio); bio_wouldblock_error(bio);
put_exit:
blk_queue_exit(q);
return NULL; return NULL;
} }
static inline struct request *blk_mq_get_request(struct request_queue *q, static inline struct request *blk_mq_get_request(struct request_queue *q,
struct blk_plug *plug, struct blk_plug *plug,
struct bio *bio) struct bio *bio,
unsigned int nsegs,
bool *same_queue_rq)
{ {
if (plug) { if (plug) {
struct request *rq; struct request *rq;
rq = rq_list_peek(&plug->cached_rq); rq = rq_list_peek(&plug->cached_rq);
if (rq) { if (rq) {
if (unlikely(!submit_bio_checks(bio)))
return NULL;
if (blk_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
return NULL;
plug->cached_rq = rq_list_next(rq); plug->cached_rq = rq_list_next(rq);
INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->queuelist);
rq_qos_throttle(q, bio);
return rq; return rq;
} }
} }
return blk_mq_get_new_requests(q, plug, bio); return blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
} }
/** /**
...@@ -2546,26 +2578,20 @@ void blk_mq_submit_bio(struct bio *bio) ...@@ -2546,26 +2578,20 @@ void blk_mq_submit_bio(struct bio *bio)
unsigned int nr_segs = 1; unsigned int nr_segs = 1;
blk_status_t ret; blk_status_t ret;
if (unlikely(!blk_crypto_bio_prep(&bio)))
return;
blk_queue_bounce(q, &bio); blk_queue_bounce(q, &bio);
if (blk_may_split(q, bio)) if (blk_may_split(q, bio))
__blk_queue_split(q, &bio, &nr_segs); __blk_queue_split(q, &bio, &nr_segs);
if (!bio_integrity_prep(bio)) if (!bio_integrity_prep(bio))
goto queue_exit; return;
if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
if (blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
goto queue_exit;
if (blk_mq_sched_bio_merge(q, bio, nr_segs))
goto queue_exit;
}
rq_qos_throttle(q, bio);
plug = blk_mq_plug(q, bio); plug = blk_mq_plug(q, bio);
rq = blk_mq_get_request(q, plug, bio); rq = blk_mq_get_request(q, plug, bio, nr_segs, &same_queue_rq);
if (unlikely(!rq)) if (unlikely(!rq))
goto queue_exit; return;
trace_block_getrq(bio); trace_block_getrq(bio);
...@@ -2646,10 +2672,6 @@ void blk_mq_submit_bio(struct bio *bio) ...@@ -2646,10 +2672,6 @@ void blk_mq_submit_bio(struct bio *bio)
/* Default case. */ /* Default case. */
blk_mq_sched_insert_request(rq, false, true, true); blk_mq_sched_insert_request(rq, false, true, true);
} }
return;
queue_exit:
blk_queue_exit(q);
} }
static size_t order_to_size(unsigned int order) static size_t order_to_size(unsigned int order)
......
...@@ -56,6 +56,7 @@ void blk_freeze_queue(struct request_queue *q); ...@@ -56,6 +56,7 @@ void blk_freeze_queue(struct request_queue *q);
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic); void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
void blk_queue_start_drain(struct request_queue *q); void blk_queue_start_drain(struct request_queue *q);
int __bio_queue_enter(struct request_queue *q, struct bio *bio); int __bio_queue_enter(struct request_queue *q, struct bio *bio);
bool submit_bio_checks(struct bio *bio);
static inline bool blk_try_enter_queue(struct request_queue *q, bool pm) static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment