Commit 7211aef8 authored by Damien Le Moal's avatar Damien Le Moal Committed by Jens Axboe

block: mq-deadline: Fix write completion handling

For a zoned block device using mq-deadline, if a write request for a
zone is received while another write was already dispatched for the same
zone, dd_dispatch_request() will return NULL and the newly inserted
write request is kept in the scheduler queue waiting for the ongoing
zone write to complete. With this behavior, when no other request has
been dispatched, rq_list in blk_mq_sched_dispatch_requests() is empty
and blk_mq_sched_mark_restart_hctx() not called. This in turn leads to
__blk_mq_free_request() call of blk_mq_sched_restart() to not run the
queue when the already dispatched write request completes. The newly
dispatched request stays stuck in the scheduler queue until eventually
another request is submitted.

This problem does not affect SCSI disk as the SCSI stack handles queue
restart on request completion. However, this problem is can be triggered
the nullblk driver with zoned mode enabled.

Fix this by always requesting a queue restart in dd_dispatch_request()
if no request was dispatched while WRITE requests are queued.

Fixes: 5700f691 ("mq-deadline: Introduce zone locking support")
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>

Add missing export of blk_mq_sched_restart()
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7e849dd9
...@@ -61,13 +61,14 @@ void blk_mq_sched_assign_ioc(struct request *rq) ...@@ -61,13 +61,14 @@ void blk_mq_sched_assign_ioc(struct request *rq)
* Mark a hardware queue as needing a restart. For shared queues, maintain * Mark a hardware queue as needing a restart. For shared queues, maintain
* a count of how many hardware queues are marked for restart. * a count of how many hardware queues are marked for restart.
*/ */
static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{ {
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
return; return;
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
} }
EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
{ {
......
...@@ -15,6 +15,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, ...@@ -15,6 +15,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
struct request **merged_request); struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_insert_request(struct request *rq, bool at_head, void blk_mq_sched_insert_request(struct request *rq, bool at_head,
......
...@@ -373,9 +373,16 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) ...@@ -373,9 +373,16 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd)
/* /*
* One confusing aspect here is that we get called for a specific * One confusing aspect here is that we get called for a specific
* hardware queue, but we return a request that may not be for a * hardware queue, but we may return a request that is for a
* different hardware queue. This is because mq-deadline has shared * different hardware queue. This is because mq-deadline has shared
* state for all hardware queues, in terms of sorting, FIFOs, etc. * state for all hardware queues, in terms of sorting, FIFOs, etc.
*
* For a zoned block device, __dd_dispatch_request() may return NULL
* if all the queued write requests are directed at zones that are already
* locked due to on-going write requests. In this case, make sure to mark
* the queue as needing a restart to ensure that the queue is run again
* and the pending writes dispatched once the target zones for the ongoing
* write requests are unlocked in dd_finish_request().
*/ */
static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
{ {
...@@ -384,6 +391,9 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) ...@@ -384,6 +391,9 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
spin_lock(&dd->lock); spin_lock(&dd->lock);
rq = __dd_dispatch_request(dd); rq = __dd_dispatch_request(dd);
if (!rq && blk_queue_is_zoned(hctx->queue) &&
!list_empty(&dd->fifo_list[WRITE]))
blk_mq_sched_mark_restart_hctx(hctx);
spin_unlock(&dd->lock); spin_unlock(&dd->lock);
return rq; return rq;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment