Commit 2a904d00 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

blk-mq: remove hctx_lock and hctx_unlock

Remove hctx_lock and hctx_unlock, and add one helper of
blk_mq_run_dispatch_ops() to run code block defined in dispatch_ops
with rcu/srcu read held.

Compared with hctx_lock()/hctx_unlock():

1) remove 2 branch to 1, so we just need to check
(hctx->flags & BLK_MQ_F_BLOCKING) once when running one dispatch_ops

2) srcu_idx needn't to be touched in case of non-blocking

3) might_sleep_if() can be moved to the blocking branch

Also put the added blk_mq_run_dispatch_ops() in private header, so that
the following patch can use it out of blk-mq.c.
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20211203131534.3668411-2-ming.lei@redhat.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0a467d0f
...@@ -1071,26 +1071,6 @@ void blk_mq_complete_request(struct request *rq) ...@@ -1071,26 +1071,6 @@ void blk_mq_complete_request(struct request *rq)
} }
EXPORT_SYMBOL(blk_mq_complete_request); EXPORT_SYMBOL(blk_mq_complete_request);
static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
__releases(hctx->srcu)
{
if (!(hctx->flags & BLK_MQ_F_BLOCKING))
rcu_read_unlock();
else
srcu_read_unlock(hctx->srcu, srcu_idx);
}
static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
__acquires(hctx->srcu)
{
if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
/* shut up gcc false positive */
*srcu_idx = 0;
rcu_read_lock();
} else
*srcu_idx = srcu_read_lock(hctx->srcu);
}
/** /**
* blk_mq_start_request - Start processing a request * blk_mq_start_request - Start processing a request
* @rq: Pointer to request to be started * @rq: Pointer to request to be started
...@@ -1947,19 +1927,13 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, ...@@ -1947,19 +1927,13 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
*/ */
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
{ {
int srcu_idx;
/* /*
* We can't run the queue inline with ints disabled. Ensure that * We can't run the queue inline with ints disabled. Ensure that
* we catch bad users of this early. * we catch bad users of this early.
*/ */
WARN_ON_ONCE(in_interrupt()); WARN_ON_ONCE(in_interrupt());
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); blk_mq_run_dispatch_ops(hctx, blk_mq_sched_dispatch_requests(hctx));
hctx_lock(hctx, &srcu_idx);
blk_mq_sched_dispatch_requests(hctx);
hctx_unlock(hctx, srcu_idx);
} }
static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
...@@ -2071,7 +2045,6 @@ EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); ...@@ -2071,7 +2045,6 @@ EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
*/ */
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{ {
int srcu_idx;
bool need_run; bool need_run;
/* /*
...@@ -2082,10 +2055,9 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) ...@@ -2082,10 +2055,9 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
* And queue will be rerun in blk_mq_unquiesce_queue() if it is * And queue will be rerun in blk_mq_unquiesce_queue() if it is
* quiesced. * quiesced.
*/ */
hctx_lock(hctx, &srcu_idx); blk_mq_run_dispatch_ops(hctx,
need_run = !blk_queue_quiesced(hctx->queue) && need_run = !blk_queue_quiesced(hctx->queue) &&
blk_mq_hctx_has_pending(hctx); blk_mq_hctx_has_pending(hctx));
hctx_unlock(hctx, srcu_idx);
if (need_run) if (need_run)
__blk_mq_delay_run_hw_queue(hctx, async, 0); __blk_mq_delay_run_hw_queue(hctx, async, 0);
...@@ -2488,32 +2460,22 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, ...@@ -2488,32 +2460,22 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq) struct request *rq)
{ {
blk_status_t ret; blk_status_t ret =
int srcu_idx; __blk_mq_try_issue_directly(hctx, rq, false, true);
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
hctx_lock(hctx, &srcu_idx);
ret = __blk_mq_try_issue_directly(hctx, rq, false, true);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
blk_mq_request_bypass_insert(rq, false, true); blk_mq_request_bypass_insert(rq, false, true);
else if (ret != BLK_STS_OK) else if (ret != BLK_STS_OK)
blk_mq_end_request(rq, ret); blk_mq_end_request(rq, ret);
hctx_unlock(hctx, srcu_idx);
} }
static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
{ {
blk_status_t ret; blk_status_t ret;
int srcu_idx;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
hctx_lock(hctx, &srcu_idx); blk_mq_run_dispatch_ops(hctx,
ret = __blk_mq_try_issue_directly(hctx, rq, true, last); ret = __blk_mq_try_issue_directly(hctx, rq, true, last));
hctx_unlock(hctx, srcu_idx);
return ret; return ret;
} }
...@@ -2826,7 +2788,8 @@ void blk_mq_submit_bio(struct bio *bio) ...@@ -2826,7 +2788,8 @@ void blk_mq_submit_bio(struct bio *bio)
(q->nr_hw_queues == 1 || !is_sync))) (q->nr_hw_queues == 1 || !is_sync)))
blk_mq_sched_insert_request(rq, false, true, true); blk_mq_sched_insert_request(rq, false, true, true);
else else
blk_mq_try_issue_directly(rq->mq_hctx, rq); blk_mq_run_dispatch_ops(rq->mq_hctx,
blk_mq_try_issue_directly(rq->mq_hctx, rq));
} }
/** /**
......
...@@ -374,5 +374,21 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, ...@@ -374,5 +374,21 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
return __blk_mq_active_requests(hctx) < depth; return __blk_mq_active_requests(hctx) < depth;
} }
/* run the code block in @dispatch_ops with rcu/srcu read lock held */
#define blk_mq_run_dispatch_ops(hctx, dispatch_ops) \
do { \
if (!((hctx)->flags & BLK_MQ_F_BLOCKING)) { \
rcu_read_lock(); \
(dispatch_ops); \
rcu_read_unlock(); \
} else { \
int srcu_idx; \
\
might_sleep(); \
srcu_idx = srcu_read_lock((hctx)->srcu); \
(dispatch_ops); \
srcu_read_unlock((hctx)->srcu, srcu_idx); \
} \
} while (0)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment