Commit 6d6f167c authored by Jianchao Wang's avatar Jianchao Wang Committed by Jens Axboe

blk-mq: put the driver tag of nxt rq before first one is requeued

When freeing the driver tag of the next rq with an I/O scheduler
configured, we get the first entry of the list. However, this can
race with requeue of a request, and we end up getting the wrong request
from the head of the list. Free the driver tag of next rq before the
failed one is requeued in the failure branch of queue_rq callback.
Signed-off-by: default avatarJianchao Wang <jianchao.w.wang@oracle.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e8401073
...@@ -1094,7 +1094,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, ...@@ -1094,7 +1094,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
bool got_budget) bool got_budget)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
struct request *rq; struct request *rq, *nxt;
int errors, queued; int errors, queued;
if (list_empty(list)) if (list_empty(list))
...@@ -1151,14 +1151,20 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, ...@@ -1151,14 +1151,20 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
if (list_empty(list)) if (list_empty(list))
bd.last = true; bd.last = true;
else { else {
struct request *nxt;
nxt = list_first_entry(list, struct request, queuelist); nxt = list_first_entry(list, struct request, queuelist);
bd.last = !blk_mq_get_driver_tag(nxt, NULL, false); bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
} }
ret = q->mq_ops->queue_rq(hctx, &bd); ret = q->mq_ops->queue_rq(hctx, &bd);
if (ret == BLK_STS_RESOURCE) { if (ret == BLK_STS_RESOURCE) {
/*
* If an I/O scheduler has been configured and we got a
* driver tag for the next request already, free it again.
*/
if (!list_empty(list)) {
nxt = list_first_entry(list, struct request, queuelist);
blk_mq_put_driver_tag(nxt);
}
blk_mq_put_driver_tag_hctx(hctx, rq); blk_mq_put_driver_tag_hctx(hctx, rq);
list_add(&rq->queuelist, list); list_add(&rq->queuelist, list);
__blk_mq_requeue_request(rq); __blk_mq_requeue_request(rq);
...@@ -1181,13 +1187,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, ...@@ -1181,13 +1187,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
* that is where we will continue on next queue run. * that is where we will continue on next queue run.
*/ */
if (!list_empty(list)) { if (!list_empty(list)) {
/*
* If an I/O scheduler has been configured and we got a driver
* tag for the next request already, free it again.
*/
rq = list_first_entry(list, struct request, queuelist);
blk_mq_put_driver_tag(rq);
spin_lock(&hctx->lock); spin_lock(&hctx->lock);
list_splice_init(list, &hctx->dispatch); list_splice_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock); spin_unlock(&hctx->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment