Commit 6be6d112 authored by Chengming Zhou's avatar Chengming Zhou Committed by Jens Axboe

blk-mq: fix tags UAF when shrinking q->nr_hw_queues

When nr_hw_queues shrink, we free the excess tags before realloc'ing
hw_ctxs for each queue. During that resize, we may need to access those
tags, like blk_mq_tag_idle(hctx) will access queue shared tags.

This can cause a slab use-after-free, as reported by KASAN. Fix it by
moving the releasing of excess tags to the end.

Fixes: e1dd7bc9 ("blk-mq: fix tags leak when shrink nr_hw_queues")
Reported-by: default avatarYi Zhang <yi.zhang@redhat.com>
Closes: https://lore.kernel.org/all/CAHj4cs_CK63uoDpGBGZ6DN4OCTpzkR3UaVgK=LX8Owr8ej2ieQ@mail.gmail.com/
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20230908005702.2183908-1-chengming.zhou@linux.devSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0bb80ecc
...@@ -4405,11 +4405,8 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, ...@@ -4405,11 +4405,8 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
struct blk_mq_tags **new_tags; struct blk_mq_tags **new_tags;
int i; int i;
if (set->nr_hw_queues >= new_nr_hw_queues) { if (set->nr_hw_queues >= new_nr_hw_queues)
for (i = new_nr_hw_queues; i < set->nr_hw_queues; i++)
__blk_mq_free_map_and_rqs(set, i);
goto done; goto done;
}
new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node); GFP_KERNEL, set->numa_node);
...@@ -4719,7 +4716,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, ...@@ -4719,7 +4716,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
{ {
struct request_queue *q; struct request_queue *q;
LIST_HEAD(head); LIST_HEAD(head);
int prev_nr_hw_queues; int prev_nr_hw_queues = set->nr_hw_queues;
int i;
lockdep_assert_held(&set->tag_list_lock); lockdep_assert_held(&set->tag_list_lock);
...@@ -4746,7 +4744,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, ...@@ -4746,7 +4744,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
blk_mq_sysfs_unregister_hctxs(q); blk_mq_sysfs_unregister_hctxs(q);
} }
prev_nr_hw_queues = set->nr_hw_queues;
if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
goto reregister; goto reregister;
...@@ -4781,6 +4778,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, ...@@ -4781,6 +4778,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
list_for_each_entry(q, &set->tag_list, tag_set_list) list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
/* Free the excess tags when nr_hw_queues shrink. */
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
__blk_mq_free_map_and_rqs(set, i);
} }
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment