Commit 04ced159 authored by Jens Axboe's avatar Jens Axboe

blk-mq: move hctx lock/unlock into a helper

Move the RCU vs SRCU logic into lock/unlock helpers, which makes
the actual functional bits within the locked region much easier
to read.

tj: Reordered in front of timeout revamp patches and added the missing
    blk_mq_run_hw_queue() conversion.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0d52af59
...@@ -557,6 +557,22 @@ static void __blk_mq_complete_request(struct request *rq) ...@@ -557,6 +557,22 @@ static void __blk_mq_complete_request(struct request *rq)
put_cpu(); put_cpu();
} }
static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
{
if (!(hctx->flags & BLK_MQ_F_BLOCKING))
rcu_read_unlock();
else
srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
}
static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
{
if (!(hctx->flags & BLK_MQ_F_BLOCKING))
rcu_read_lock();
else
*srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
}
/** /**
* blk_mq_complete_request - end I/O on a request * blk_mq_complete_request - end I/O on a request
* @rq: the request being processed * @rq: the request being processed
...@@ -1214,17 +1230,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) ...@@ -1214,17 +1230,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
*/ */
WARN_ON_ONCE(in_interrupt()); WARN_ON_ONCE(in_interrupt());
if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
rcu_read_lock();
blk_mq_sched_dispatch_requests(hctx);
rcu_read_unlock();
} else {
might_sleep();
srcu_idx = srcu_read_lock(hctx->queue_rq_srcu); hctx_lock(hctx, &srcu_idx);
blk_mq_sched_dispatch_requests(hctx); blk_mq_sched_dispatch_requests(hctx);
srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx); hctx_unlock(hctx, srcu_idx);
}
} }
/* /*
...@@ -1296,17 +1306,10 @@ bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) ...@@ -1296,17 +1306,10 @@ bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
* And queue will be rerun in blk_mq_unquiesce_queue() if it is * And queue will be rerun in blk_mq_unquiesce_queue() if it is
* quiesced. * quiesced.
*/ */
if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { hctx_lock(hctx, &srcu_idx);
rcu_read_lock(); need_run = !blk_queue_quiesced(hctx->queue) &&
need_run = !blk_queue_quiesced(hctx->queue) && blk_mq_hctx_has_pending(hctx);
blk_mq_hctx_has_pending(hctx); hctx_unlock(hctx, srcu_idx);
rcu_read_unlock();
} else {
srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
need_run = !blk_queue_quiesced(hctx->queue) &&
blk_mq_hctx_has_pending(hctx);
srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
}
if (need_run) { if (need_run) {
__blk_mq_delay_run_hw_queue(hctx, async, 0); __blk_mq_delay_run_hw_queue(hctx, async, 0);
...@@ -1618,7 +1621,7 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) ...@@ -1618,7 +1621,7 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq, struct request *rq,
blk_qc_t *cookie, bool may_sleep) blk_qc_t *cookie)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = { struct blk_mq_queue_data bd = {
...@@ -1668,25 +1671,20 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, ...@@ -1668,25 +1671,20 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
} }
insert: insert:
blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep); blk_mq_sched_insert_request(rq, false, run_queue, false,
hctx->flags & BLK_MQ_F_BLOCKING);
} }
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq, blk_qc_t *cookie) struct request *rq, blk_qc_t *cookie)
{ {
if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { int srcu_idx;
rcu_read_lock();
__blk_mq_try_issue_directly(hctx, rq, cookie, false);
rcu_read_unlock();
} else {
unsigned int srcu_idx;
might_sleep(); might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
srcu_idx = srcu_read_lock(hctx->queue_rq_srcu); hctx_lock(hctx, &srcu_idx);
__blk_mq_try_issue_directly(hctx, rq, cookie, true); __blk_mq_try_issue_directly(hctx, rq, cookie);
srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx); hctx_unlock(hctx, srcu_idx);
}
} }
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment