Commit 05a93117 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: fold blk_mq_sched_insert_requests into blk_mq_dispatch_plug_list

blk_mq_dispatch_plug_list is the only caller of
blk_mq_sched_insert_requests, and it makes sense to just fold it there
as blk_mq_sched_insert_requests isn't specific to I/O schedulers despite
the name.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230413064057.707578-6-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 94aa228c
...@@ -455,30 +455,6 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, ...@@ -455,30 +455,6 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
blk_mq_run_hw_queue(hctx, async); blk_mq_run_hw_queue(hctx, async);
} }
void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx,
struct list_head *list, bool run_queue_async)
{
struct elevator_queue *e;
struct request_queue *q = hctx->queue;
/*
* blk_mq_sched_insert_requests() is called from flush plug
* context only, and hold one usage counter to prevent queue
* from being released.
*/
percpu_ref_get(&q->q_usage_counter);
e = hctx->queue->elevator;
if (e) {
e->type->ops.insert_requests(hctx, list, false);
blk_mq_run_hw_queue(hctx, run_queue_async);
} else {
blk_mq_insert_requests(hctx, ctx, list, run_queue_async);
}
percpu_ref_put(&q->q_usage_counter);
}
static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
struct blk_mq_hw_ctx *hctx, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx) unsigned int hctx_idx)
......
...@@ -18,9 +18,6 @@ void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); ...@@ -18,9 +18,6 @@ void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_insert_request(struct request *rq, bool at_head, void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async); bool run_queue, bool async);
void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx,
struct list_head *list, bool run_queue_async);
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
......
...@@ -2497,9 +2497,9 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head, ...@@ -2497,9 +2497,9 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
blk_mq_run_hw_queue(hctx, false); blk_mq_run_hw_queue(hctx, false);
} }
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
struct list_head *list, bool run_queue_async) struct blk_mq_ctx *ctx, struct list_head *list,
bool run_queue_async)
{ {
struct request *rq; struct request *rq;
enum hctx_type type = hctx->type; enum hctx_type type = hctx->type;
...@@ -2725,7 +2725,16 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) ...@@ -2725,7 +2725,16 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
plug->mq_list = requeue_list; plug->mq_list = requeue_list;
trace_block_unplug(this_hctx->queue, depth, !from_sched); trace_block_unplug(this_hctx->queue, depth, !from_sched);
blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
percpu_ref_get(&this_hctx->queue->q_usage_counter);
if (this_hctx->queue->elevator) {
this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
&list, false);
blk_mq_run_hw_queue(this_hctx, from_sched);
} else {
blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched);
}
percpu_ref_put(&this_hctx->queue->q_usage_counter);
} }
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
......
...@@ -69,8 +69,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -69,8 +69,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head); bool at_head);
void blk_mq_request_bypass_insert(struct request *rq, bool at_head, void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
bool run_queue); bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list, bool run_queue_async);
/* /*
* CPU -> queue mappings * CPU -> queue mappings
......
...@@ -820,7 +820,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -820,7 +820,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
} }
/* /*
* Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests(). * Called from blk_mq_sched_insert_request() or blk_mq_dispatch_plug_list().
*/ */
static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
struct list_head *list, bool at_head) struct list_head *list, bool at_head)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment