Commit 683d0e12 authored by David Hildenbrand's avatar David Hildenbrand Committed by Jens Axboe

blk-mq: Avoid race condition with uninitialized requests

This patch should fix the bug reported in
https://lkml.org/lkml/2014/9/11/249.

We have to initialize at least the atomic_flags and the cmd_flags when
allocating storage for the requests.

Otherwise blk_mq_timeout_check() might dereference uninitialized
pointers when racing with the creation of a request.

Also move the reset of cmd_flags for the initializing code to the point
where a request is freed. So we will never end up with pending flush
request indicators that might trigger dereferences of invalid pointers
in blk_mq_timeout_check().

Cc: stable@vger.kernel.org
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Reported-by: default avatarPaulo De Rezende Pinatti <ppinatti@linux.vnet.ibm.com>
Tested-by: default avatarPaulo De Rezende Pinatti <ppinatti@linux.vnet.ibm.com>
Acked-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 538b7534
...@@ -203,7 +203,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) ...@@ -203,7 +203,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
if (tag != BLK_MQ_TAG_FAIL) { if (tag != BLK_MQ_TAG_FAIL) {
rq = data->hctx->tags->rqs[tag]; rq = data->hctx->tags->rqs[tag];
rq->cmd_flags = 0;
if (blk_mq_tag_busy(data->hctx)) { if (blk_mq_tag_busy(data->hctx)) {
rq->cmd_flags = REQ_MQ_INFLIGHT; rq->cmd_flags = REQ_MQ_INFLIGHT;
atomic_inc(&data->hctx->nr_active); atomic_inc(&data->hctx->nr_active);
...@@ -258,6 +257,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, ...@@ -258,6 +257,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
if (rq->cmd_flags & REQ_MQ_INFLIGHT) if (rq->cmd_flags & REQ_MQ_INFLIGHT)
atomic_dec(&hctx->nr_active); atomic_dec(&hctx->nr_active);
rq->cmd_flags = 0;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
blk_mq_put_tag(hctx, tag, &ctx->last_tag); blk_mq_put_tag(hctx, tag, &ctx->last_tag);
...@@ -1410,6 +1410,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, ...@@ -1410,6 +1410,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
left -= to_do * rq_size; left -= to_do * rq_size;
for (j = 0; j < to_do; j++) { for (j = 0; j < to_do; j++) {
tags->rqs[i] = p; tags->rqs[i] = p;
tags->rqs[i]->atomic_flags = 0;
tags->rqs[i]->cmd_flags = 0;
if (set->ops->init_request) { if (set->ops->init_request) {
if (set->ops->init_request(set->driver_data, if (set->ops->init_request(set->driver_data,
tags->rqs[i], hctx_idx, i, tags->rqs[i], hctx_idx, i,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment