Commit 5a26e45e authored by Chengming Zhou's avatar Chengming Zhou Committed by Jens Axboe

null_blk: fix poll request timeout handling

When doing io_uring benchmark on /dev/nullb0, it's easy to crash the
kernel if poll requests timeout triggered, as reported by David. [1]

BUG: kernel NULL pointer dereference, address: 0000000000000008
Workqueue: kblockd blk_mq_timeout_work
RIP: 0010:null_timeout_rq+0x4e/0x91
Call Trace:
 ? null_timeout_rq+0x4e/0x91
 blk_mq_handle_expired+0x31/0x4b
 bt_iter+0x68/0x84
 ? bt_tags_iter+0x81/0x81
 __sbitmap_for_each_set.constprop.0+0xb0/0xf2
 ? __blk_mq_complete_request_remote+0xf/0xf
 bt_for_each+0x46/0x64
 ? __blk_mq_complete_request_remote+0xf/0xf
 ? percpu_ref_get_many+0xc/0x2a
 blk_mq_queue_tag_busy_iter+0x14d/0x18e
 blk_mq_timeout_work+0x95/0x127
 process_one_work+0x185/0x263
 worker_thread+0x1b5/0x227

This is indeed a race problem between null_timeout_rq() and null_poll().

null_poll()				null_timeout_rq()
  spin_lock(&nq->poll_lock)
  list_splice_init(&nq->poll_list, &list)
  spin_unlock(&nq->poll_lock)

  while (!list_empty(&list))
    req = list_first_entry()
    list_del_init()
    ...
    blk_mq_add_to_batch()
    // req->rq_next = NULL
					spin_lock(&nq->poll_lock)

					// rq->queuelist->next == NULL
					list_del_init(&rq->queuelist)

					spin_unlock(&nq->poll_lock)

Fix these problems by setting requests state to MQ_RQ_COMPLETE under
nq->poll_lock protection, in which null_timeout_rq() can safely detect
this race and early return.

Note this patch just fix the kernel panic when request timeout happen.

[1] https://lore.kernel.org/all/3893581.1691785261@warthog.procyon.org.uk/

Fixes: 0a593fbb ("null_blk: poll queue support")
Reported-by: default avatarDavid Howells <dhowells@redhat.com>
Tested-by: default avatarDavid Howells <dhowells@redhat.com>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Link: https://lore.kernel.org/r/20230901120306.170520-2-chengming.zhou@linux.devSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f7cf2242
...@@ -1643,9 +1643,12 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) ...@@ -1643,9 +1643,12 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
struct nullb_queue *nq = hctx->driver_data; struct nullb_queue *nq = hctx->driver_data;
LIST_HEAD(list); LIST_HEAD(list);
int nr = 0; int nr = 0;
struct request *rq;
spin_lock(&nq->poll_lock); spin_lock(&nq->poll_lock);
list_splice_init(&nq->poll_list, &list); list_splice_init(&nq->poll_list, &list);
list_for_each_entry(rq, &list, queuelist)
blk_mq_set_request_complete(rq);
spin_unlock(&nq->poll_lock); spin_unlock(&nq->poll_lock);
while (!list_empty(&list)) { while (!list_empty(&list)) {
...@@ -1671,16 +1674,21 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq) ...@@ -1671,16 +1674,21 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
pr_info("rq %p timed out\n", rq);
if (hctx->type == HCTX_TYPE_POLL) { if (hctx->type == HCTX_TYPE_POLL) {
struct nullb_queue *nq = hctx->driver_data; struct nullb_queue *nq = hctx->driver_data;
spin_lock(&nq->poll_lock); spin_lock(&nq->poll_lock);
/* The request may have completed meanwhile. */
if (blk_mq_request_completed(rq)) {
spin_unlock(&nq->poll_lock);
return BLK_EH_DONE;
}
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
spin_unlock(&nq->poll_lock); spin_unlock(&nq->poll_lock);
} }
pr_info("rq %p timed out\n", rq);
/* /*
* If the device is marked as blocking (i.e. memory backed or zoned * If the device is marked as blocking (i.e. memory backed or zoned
* device), the submission path may be blocked waiting for resources * device), the submission path may be blocked waiting for resources
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment