Commit aa6df8dd authored by Mike Snitzer's avatar Mike Snitzer

dm: fix free_rq_clone() NULL pointer when requeueing unmapped request

Commit 02233342 ("dm: optimize dm_mq_queue_rq to _not_ use kthread if
using pure blk-mq") mistakenly removed free_rq_clone()'s clone->q check
before testing clone->q->mq_ops.  It was an oversight to discontinue
that check for 1 of the 2 use-cases for free_rq_clone():
1) free_rq_clone() called when an unmapped original request is requeued
2) free_rq_clone() called in the request-based IO completion path

The clone->q check made sense for case #1 but not for #2.  However, we
cannot just reinstate the check as it'd mask a serious bug in the IO
completion case #2 -- no in-flight request should have an uninitialized
request_queue (basic block layer refcounting _should_ ensure this).

The NULL pointer seen for case #1 is detailed here:
https://www.redhat.com/archives/dm-devel/2015-April/msg00160.html

Fix this free_rq_clone() NULL pointer by simply checking if the
mapped_device's type is DM_TYPE_MQ_REQUEST_BASED (clone's queue is
blk-mq) rather than checking clone->q->mq_ops.  This avoids the need to
dereference clone->q, but a WARN_ON_ONCE is added to let us know if an
uninitialized clone request is being completed.
Reported-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 3e6180f0
...@@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) ...@@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
dm_put(md); dm_put(md);
} }
static void free_rq_clone(struct request *clone) static void free_rq_clone(struct request *clone, bool must_be_mapped)
{ {
struct dm_rq_target_io *tio = clone->end_io_data; struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md; struct mapped_device *md = tio->md;
WARN_ON_ONCE(must_be_mapped && !clone->q);
blk_rq_unprep_clone(clone); blk_rq_unprep_clone(clone);
if (clone->q->mq_ops) if (md->type == DM_TYPE_MQ_REQUEST_BASED)
/* stacked on blk-mq queue(s) */
tio->ti->type->release_clone_rq(clone); tio->ti->type->release_clone_rq(clone);
else if (!md->queue->mq_ops) else if (!md->queue->mq_ops)
/* request_fn queue stacked on request_fn queue(s) */ /* request_fn queue stacked on request_fn queue(s) */
free_clone_request(md, clone); free_clone_request(md, clone);
/*
* NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
* no need to call free_clone_request() because we leverage blk-mq by
* allocating the clone at the end of the blk-mq pdu (see: clone_rq)
*/
if (!md->queue->mq_ops) if (!md->queue->mq_ops)
free_rq_tio(tio); free_rq_tio(tio);
...@@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error) ...@@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error)
rq->sense_len = clone->sense_len; rq->sense_len = clone->sense_len;
} }
free_rq_clone(clone); free_rq_clone(clone, true);
if (!rq->q->mq_ops) if (!rq->q->mq_ops)
blk_end_request_all(rq, error); blk_end_request_all(rq, error);
else else
...@@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq) ...@@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq)
} }
if (clone) if (clone)
free_rq_clone(clone); free_rq_clone(clone, false);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment