Commit 710fa378 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: pass a flags argument to blk_mq_insert_request

Replace the at_head bool with a flags argument that so far only contains
a single BLK_MQ_INSERT_AT_HEAD value.  This makes it much easier to grep
for head insertions into the blk-mq dispatch queues.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230413064057.707578-18-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 214a4418
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static void blk_mq_insert_request(struct request *rq, bool at_head); static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list); struct list_head *list);
...@@ -1308,7 +1308,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head) ...@@ -1308,7 +1308,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head)
return; return;
} }
blk_mq_insert_request(rq, at_head); blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
blk_mq_run_hw_queue(hctx, false); blk_mq_run_hw_queue(hctx, false);
} }
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
...@@ -1371,7 +1371,7 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head) ...@@ -1371,7 +1371,7 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head)
rq->end_io = blk_end_sync_rq; rq->end_io = blk_end_sync_rq;
blk_account_io_start(rq); blk_account_io_start(rq);
blk_mq_insert_request(rq, at_head); blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
blk_mq_run_hw_queue(hctx, false); blk_mq_run_hw_queue(hctx, false);
if (blk_rq_is_poll(rq)) { if (blk_rq_is_poll(rq)) {
...@@ -1451,14 +1451,14 @@ static void blk_mq_requeue_work(struct work_struct *work) ...@@ -1451,14 +1451,14 @@ static void blk_mq_requeue_work(struct work_struct *work)
} else if (rq->rq_flags & RQF_SOFTBARRIER) { } else if (rq->rq_flags & RQF_SOFTBARRIER) {
rq->rq_flags &= ~RQF_SOFTBARRIER; rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
blk_mq_insert_request(rq, true); blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
} }
} }
while (!list_empty(&rq_list)) { while (!list_empty(&rq_list)) {
rq = list_entry(rq_list.next, struct request, queuelist); rq = list_entry(rq_list.next, struct request, queuelist);
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
blk_mq_insert_request(rq, false); blk_mq_insert_request(rq, 0);
} }
blk_mq_run_hw_queues(q, false); blk_mq_run_hw_queues(q, false);
...@@ -2509,7 +2509,7 @@ static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, ...@@ -2509,7 +2509,7 @@ static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
blk_mq_run_hw_queue(hctx, run_queue_async); blk_mq_run_hw_queue(hctx, run_queue_async);
} }
static void blk_mq_insert_request(struct request *rq, bool at_head) static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_ctx *ctx = rq->mq_ctx;
...@@ -2526,7 +2526,7 @@ static void blk_mq_insert_request(struct request *rq, bool at_head) ...@@ -2526,7 +2526,7 @@ static void blk_mq_insert_request(struct request *rq, bool at_head)
* and it is added to the scheduler queue, there is no chance to * and it is added to the scheduler queue, there is no chance to
* dispatch it given we prioritize requests in hctx->dispatch. * dispatch it given we prioritize requests in hctx->dispatch.
*/ */
blk_mq_request_bypass_insert(rq, at_head); blk_mq_request_bypass_insert(rq, flags & BLK_MQ_INSERT_AT_HEAD);
} else if (rq->rq_flags & RQF_FLUSH_SEQ) { } else if (rq->rq_flags & RQF_FLUSH_SEQ) {
/* /*
* Firstly normal IO request is inserted to scheduler queue or * Firstly normal IO request is inserted to scheduler queue or
...@@ -2556,12 +2556,13 @@ static void blk_mq_insert_request(struct request *rq, bool at_head) ...@@ -2556,12 +2556,13 @@ static void blk_mq_insert_request(struct request *rq, bool at_head)
WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG); WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG);
list_add(&rq->queuelist, &list); list_add(&rq->queuelist, &list);
q->elevator->type->ops.insert_requests(hctx, &list, at_head); q->elevator->type->ops.insert_requests(hctx, &list,
flags & BLK_MQ_INSERT_AT_HEAD);
} else { } else {
trace_block_rq_insert(rq); trace_block_rq_insert(rq);
spin_lock(&ctx->lock); spin_lock(&ctx->lock);
if (at_head) if (flags & BLK_MQ_INSERT_AT_HEAD)
list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]); list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]);
else else
list_add_tail(&rq->queuelist, list_add_tail(&rq->queuelist,
...@@ -2653,12 +2654,12 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, ...@@ -2653,12 +2654,12 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
blk_status_t ret; blk_status_t ret;
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
blk_mq_insert_request(rq, false); blk_mq_insert_request(rq, 0);
return; return;
} }
if ((rq->rq_flags & RQF_ELV) || !blk_mq_get_budget_and_tag(rq)) { if ((rq->rq_flags & RQF_ELV) || !blk_mq_get_budget_and_tag(rq)) {
blk_mq_insert_request(rq, false); blk_mq_insert_request(rq, 0);
blk_mq_run_hw_queue(hctx, false); blk_mq_run_hw_queue(hctx, false);
return; return;
} }
...@@ -2683,7 +2684,7 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) ...@@ -2683,7 +2684,7 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
blk_mq_insert_request(rq, false); blk_mq_insert_request(rq, 0);
return BLK_STS_OK; return BLK_STS_OK;
} }
...@@ -3018,7 +3019,7 @@ void blk_mq_submit_bio(struct bio *bio) ...@@ -3018,7 +3019,7 @@ void blk_mq_submit_bio(struct bio *bio)
hctx = rq->mq_hctx; hctx = rq->mq_hctx;
if ((rq->rq_flags & RQF_ELV) || if ((rq->rq_flags & RQF_ELV) ||
(hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) { (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) {
blk_mq_insert_request(rq, false); blk_mq_insert_request(rq, 0);
blk_mq_run_hw_queue(hctx, true); blk_mq_run_hw_queue(hctx, true);
} else { } else {
blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq)); blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
......
...@@ -36,6 +36,9 @@ enum { ...@@ -36,6 +36,9 @@ enum {
BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
}; };
typedef unsigned int __bitwise blk_insert_t;
#define BLK_MQ_INSERT_AT_HEAD ((__force blk_insert_t)0x01)
void blk_mq_submit_bio(struct bio *bio); void blk_mq_submit_bio(struct bio *bio);
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
unsigned int flags); unsigned int flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment