Commit 94aa228c authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: move more logic into blk_mq_insert_requests

Move all logic related to the direct insert (including the call to
blk_mq_run_hw_queue) into blk_mq_insert_requests to streamline the code
flow up a bit, and to allow marking blk_mq_try_issue_list_directly
static.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230413064057.707578-5-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 90110e04
...@@ -472,23 +472,10 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, ...@@ -472,23 +472,10 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
e = hctx->queue->elevator; e = hctx->queue->elevator;
if (e) { if (e) {
e->type->ops.insert_requests(hctx, list, false); e->type->ops.insert_requests(hctx, list, false);
blk_mq_run_hw_queue(hctx, run_queue_async);
} else { } else {
/* blk_mq_insert_requests(hctx, ctx, list, run_queue_async);
* try to issue requests directly if the hw queue isn't
* busy in case of 'none' scheduler, and this way may save
* us one extra enqueue & dequeue to sw queue.
*/
if (!hctx->dispatch_busy && !run_queue_async) {
blk_mq_run_dispatch_ops(hctx->queue,
blk_mq_try_issue_list_directly(hctx, list));
if (list_empty(list))
goto out;
}
blk_mq_insert_requests(hctx, ctx, list);
} }
blk_mq_run_hw_queue(hctx, run_queue_async);
out:
percpu_ref_put(&q->q_usage_counter); percpu_ref_put(&q->q_usage_counter);
} }
......
...@@ -44,6 +44,9 @@ ...@@ -44,6 +44,9 @@
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list);
static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
blk_qc_t qc) blk_qc_t qc)
{ {
...@@ -2495,12 +2498,23 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head, ...@@ -2495,12 +2498,23 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
} }
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list) struct list_head *list, bool run_queue_async)
{ {
struct request *rq; struct request *rq;
enum hctx_type type = hctx->type; enum hctx_type type = hctx->type;
/*
* Try to issue requests directly if the hw queue isn't busy to save an
* extra enqueue & dequeue to the sw queue.
*/
if (!hctx->dispatch_busy && !run_queue_async) {
blk_mq_run_dispatch_ops(hctx->queue,
blk_mq_try_issue_list_directly(hctx, list));
if (list_empty(list))
goto out;
}
/* /*
* preemption doesn't flush plug list, so it's possible ctx->cpu is * preemption doesn't flush plug list, so it's possible ctx->cpu is
* offline now * offline now
...@@ -2514,6 +2528,8 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, ...@@ -2514,6 +2528,8 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
list_splice_tail_init(list, &ctx->rq_lists[type]); list_splice_tail_init(list, &ctx->rq_lists[type]);
blk_mq_hctx_mark_pending(hctx, ctx); blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock); spin_unlock(&ctx->lock);
out:
blk_mq_run_hw_queue(hctx, run_queue_async);
} }
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
...@@ -2755,7 +2771,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -2755,7 +2771,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
} while (!rq_list_empty(plug->mq_list)); } while (!rq_list_empty(plug->mq_list));
} }
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list) struct list_head *list)
{ {
int queued = 0; int queued = 0;
......
...@@ -70,9 +70,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -70,9 +70,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
void blk_mq_request_bypass_insert(struct request *rq, bool at_head, void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
bool run_queue); bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list); struct list_head *list, bool run_queue_async);
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list);
/* /*
* CPU -> queue mappings * CPU -> queue mappings
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment