Commit bf4907c0 authored by Jens Axboe's avatar Jens Axboe

blk-mq: fix schedule-under-preempt for blocking drivers

Commit a4d907b6 unified the single and multi queue request handlers,
but in the process, it also screwed up the locking balance and calls
blk_mq_try_issue_directly() with the ctx preempt lock held. This is a
problem for drivers that have set BLK_MQ_F_BLOCKING, since now they
can't reliably sleep.

While in there, protect against similar issues in the future, by adding
a might_sleep() trigger in the BLOCKING path for direct issue or queue
run.
Reported-by: default avatarJosef Bacik <josef@toxicpanda.com>
Tested-by: default avatarJosef Bacik <josef@toxicpanda.com>
Fixes: a4d907b6 ("blk-mq: streamline blk_mq_make_request")
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 47d75207
...@@ -1121,6 +1121,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) ...@@ -1121,6 +1121,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
blk_mq_sched_dispatch_requests(hctx); blk_mq_sched_dispatch_requests(hctx);
rcu_read_unlock(); rcu_read_unlock();
} else { } else {
might_sleep();
srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu); srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
blk_mq_sched_dispatch_requests(hctx); blk_mq_sched_dispatch_requests(hctx);
srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx); srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
...@@ -1495,7 +1497,11 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, ...@@ -1495,7 +1497,11 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
__blk_mq_try_issue_directly(rq, cookie, false); __blk_mq_try_issue_directly(rq, cookie, false);
rcu_read_unlock(); rcu_read_unlock();
} else { } else {
unsigned int srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu); unsigned int srcu_idx;
might_sleep();
srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
__blk_mq_try_issue_directly(rq, cookie, true); __blk_mq_try_issue_directly(rq, cookie, true);
srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx); srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
} }
...@@ -1595,18 +1601,23 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1595,18 +1601,23 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
list_del_init(&same_queue_rq->queuelist); list_del_init(&same_queue_rq->queuelist);
list_add_tail(&rq->queuelist, &plug->mq_list); list_add_tail(&rq->queuelist, &plug->mq_list);
blk_mq_put_ctx(data.ctx);
if (same_queue_rq) if (same_queue_rq)
blk_mq_try_issue_directly(data.hctx, same_queue_rq, blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie); &cookie);
return cookie;
} else if (q->nr_hw_queues > 1 && is_sync) { } else if (q->nr_hw_queues > 1 && is_sync) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
blk_mq_try_issue_directly(data.hctx, rq, &cookie); blk_mq_try_issue_directly(data.hctx, rq, &cookie);
return cookie;
} else if (q->elevator) { } else if (q->elevator) {
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
blk_mq_sched_insert_request(rq, false, true, true, true); blk_mq_sched_insert_request(rq, false, true, true, true);
} else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { } else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio))
blk_mq_run_hw_queue(data.hctx, true); blk_mq_run_hw_queue(data.hctx, true);
}
blk_mq_put_ctx(data.ctx); blk_mq_put_ctx(data.ctx);
return cookie; return cookie;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment