Commit e5c0ca13 authored by Chengming Zhou's avatar Chengming Zhou Committed by Jens Axboe

blk-mq: release scheduler resource when request completes

Chuck reported [1] an IO hang problem on NFS exports that reside on SATA
devices and bisected to commit 615939a2 ("blk-mq: defer to the normal
submission path for post-flush requests").

We analysed the IO hang problem, found there are two postflush requests
waiting for each other.

The first postflush request completed the REQ_FSEQ_DATA sequence, so go to
the REQ_FSEQ_POSTFLUSH sequence and added in the flush pending list, but
failed to blk_kick_flush() because of the second postflush request which
is inflight waiting in scheduler queue.

The second postflush waiting in scheduler queue can't be dispatched because
the first postflush hasn't released scheduler resource even though it has
completed by itself.

Fix it by releasing scheduler resource when the first postflush request
completed, so the second postflush can be dispatched and completed, then
make blk_kick_flush() succeed.

While at it, remove the check for e->ops.finish_request, as all
schedulers set that. Reaffirm this requirement by adding a WARN_ON_ONCE()
at scheduler registration time, just like we do for insert_requests and
dispatch_request.

[1] https://lore.kernel.org/all/7A57C7AE-A51A-4254-888B-FE15CA21F9E9@oracle.com/

Link: https://lore.kernel.org/linux-block/20230819031206.2744005-1-chengming.zhou@linux.dev/Reported-by: default avatarkernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202308172100.8ce4b853-oliver.sang@intel.com
Fixes: 615939a2 ("blk-mq: defer to the normal submission path for post-flush requests")
Reported-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Tested-by: default avatarChuck Lever <chuck.lever@oracle.com>
Link: https://lore.kernel.org/r/20230813152325.3017343-1-chengming.zhou@linux.dev
[axboe: folded in incremental fix and added tags]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c984ff14
...@@ -681,6 +681,21 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, ...@@ -681,6 +681,21 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
} }
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
static void blk_mq_finish_request(struct request *rq)
{
struct request_queue *q = rq->q;
if (rq->rq_flags & RQF_USE_SCHED) {
q->elevator->type->ops.finish_request(rq);
/*
* For postflush request that may need to be
* completed twice, we should clear this flag
* to avoid double finish_request() on the rq.
*/
rq->rq_flags &= ~RQF_USE_SCHED;
}
}
static void __blk_mq_free_request(struct request *rq) static void __blk_mq_free_request(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
...@@ -707,9 +722,7 @@ void blk_mq_free_request(struct request *rq) ...@@ -707,9 +722,7 @@ void blk_mq_free_request(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
if ((rq->rq_flags & RQF_USE_SCHED) && blk_mq_finish_request(rq);
q->elevator->type->ops.finish_request)
q->elevator->type->ops.finish_request(rq);
if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
laptop_io_completion(q->disk->bdi); laptop_io_completion(q->disk->bdi);
...@@ -1020,6 +1033,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error) ...@@ -1020,6 +1033,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
if (blk_mq_need_time_stamp(rq)) if (blk_mq_need_time_stamp(rq))
__blk_mq_end_request_acct(rq, ktime_get_ns()); __blk_mq_end_request_acct(rq, ktime_get_ns());
blk_mq_finish_request(rq);
if (rq->end_io) { if (rq->end_io) {
rq_qos_done(rq->q, rq); rq_qos_done(rq->q, rq);
if (rq->end_io(rq, error) == RQ_END_IO_FREE) if (rq->end_io(rq, error) == RQ_END_IO_FREE)
...@@ -1074,6 +1089,8 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob) ...@@ -1074,6 +1089,8 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
if (iob->need_ts) if (iob->need_ts)
__blk_mq_end_request_acct(rq, now); __blk_mq_end_request_acct(rq, now);
blk_mq_finish_request(rq);
rq_qos_done(rq->q, rq); rq_qos_done(rq->q, rq);
/* /*
......
...@@ -499,6 +499,9 @@ void elv_unregister_queue(struct request_queue *q) ...@@ -499,6 +499,9 @@ void elv_unregister_queue(struct request_queue *q)
int elv_register(struct elevator_type *e) int elv_register(struct elevator_type *e)
{ {
/* finish request is mandatory */
if (WARN_ON_ONCE(!e->ops.finish_request))
return -EINVAL;
/* insert_requests and dispatch_request are mandatory */ /* insert_requests and dispatch_request are mandatory */
if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request)) if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
return -EINVAL; return -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment