Commit bcc330f4 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

blk-mq: pass request queue to blk_mq_run_dispatch_ops

We have switched to allocate srcu into request queue, so it is fine
to pass request queue to blk_mq_run_dispatch_ops().
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20211203131534.3668411-4-ming.lei@redhat.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 704b914f
...@@ -1925,7 +1925,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) ...@@ -1925,7 +1925,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
*/ */
WARN_ON_ONCE(in_interrupt()); WARN_ON_ONCE(in_interrupt());
blk_mq_run_dispatch_ops(hctx, blk_mq_sched_dispatch_requests(hctx)); blk_mq_run_dispatch_ops(hctx->queue,
blk_mq_sched_dispatch_requests(hctx));
} }
static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
...@@ -2047,7 +2048,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) ...@@ -2047,7 +2048,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
* And queue will be rerun in blk_mq_unquiesce_queue() if it is * And queue will be rerun in blk_mq_unquiesce_queue() if it is
* quiesced. * quiesced.
*/ */
blk_mq_run_dispatch_ops(hctx, blk_mq_run_dispatch_ops(hctx->queue,
need_run = !blk_queue_quiesced(hctx->queue) && need_run = !blk_queue_quiesced(hctx->queue) &&
blk_mq_hctx_has_pending(hctx)); blk_mq_hctx_has_pending(hctx));
...@@ -2466,7 +2467,7 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) ...@@ -2466,7 +2467,7 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
blk_status_t ret; blk_status_t ret;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
blk_mq_run_dispatch_ops(hctx, blk_mq_run_dispatch_ops(rq->q,
ret = __blk_mq_try_issue_directly(hctx, rq, true, last)); ret = __blk_mq_try_issue_directly(hctx, rq, true, last));
return ret; return ret;
} }
...@@ -2780,7 +2781,7 @@ void blk_mq_submit_bio(struct bio *bio) ...@@ -2780,7 +2781,7 @@ void blk_mq_submit_bio(struct bio *bio)
(q->nr_hw_queues == 1 || !is_sync))) (q->nr_hw_queues == 1 || !is_sync)))
blk_mq_sched_insert_request(rq, false, true, true); blk_mq_sched_insert_request(rq, false, true, true);
else else
blk_mq_run_dispatch_ops(rq->mq_hctx, blk_mq_run_dispatch_ops(rq->q,
blk_mq_try_issue_directly(rq->mq_hctx, rq)); blk_mq_try_issue_directly(rq->mq_hctx, rq));
} }
......
...@@ -375,9 +375,9 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, ...@@ -375,9 +375,9 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
} }
/* run the code block in @dispatch_ops with rcu/srcu read lock held */ /* run the code block in @dispatch_ops with rcu/srcu read lock held */
#define blk_mq_run_dispatch_ops(hctx, dispatch_ops) \ #define blk_mq_run_dispatch_ops(q, dispatch_ops) \
do { \ do { \
if (!((hctx)->flags & BLK_MQ_F_BLOCKING)) { \ if (!blk_queue_has_srcu(q)) { \
rcu_read_lock(); \ rcu_read_lock(); \
(dispatch_ops); \ (dispatch_ops); \
rcu_read_unlock(); \ rcu_read_unlock(); \
...@@ -385,9 +385,9 @@ do { \ ...@@ -385,9 +385,9 @@ do { \
int srcu_idx; \ int srcu_idx; \
\ \
might_sleep(); \ might_sleep(); \
srcu_idx = srcu_read_lock((hctx)->queue->srcu); \ srcu_idx = srcu_read_lock((q)->srcu); \
(dispatch_ops); \ (dispatch_ops); \
srcu_read_unlock((hctx)->queue->srcu, srcu_idx); \ srcu_read_unlock((q)->srcu, srcu_idx); \
} \ } \
} while (0) } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment