Commit e09aae7e authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

blk-mq: release mq's kobjects in blk_release_queue()

The kobject memory inside blk-mq hctx/ctx shouldn't have been freed
before the kobject is released because driver core can access it freely
before its release.

We can't do that in all ctx/hctx/mq_kobj's release handler because
it can be run before blk_cleanup_queue().

Given mq_kobj shouldn't have been introduced, this patch simply moves
mq's release into blk_release_queue().
Reported-by: default avatarSasha Levin <sasha.levin@oracle.com>
Signed-off-by: default avatarMing Lei <ming.lei@canonical.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 74170118
...@@ -1641,10 +1641,8 @@ static void blk_mq_free_hw_queues(struct request_queue *q, ...@@ -1641,10 +1641,8 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
unsigned int i; unsigned int i;
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i)
free_cpumask_var(hctx->cpumask); free_cpumask_var(hctx->cpumask);
kfree(hctx);
}
} }
static int blk_mq_init_hctx(struct request_queue *q, static int blk_mq_init_hctx(struct request_queue *q,
...@@ -1869,6 +1867,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, ...@@ -1869,6 +1867,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
mutex_unlock(&set->tag_list_lock); mutex_unlock(&set->tag_list_lock);
} }
/*
* It is the actual release handler for mq, but we do it from
* request queue's release handler for avoiding use-after-free
* and headache because q->mq_kobj shouldn't have been introduced,
* but we can't group ctx/kctx kobj without it.
*/
void blk_mq_release(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
/* hctx kobj stays in hctx */
queue_for_each_hw_ctx(q, hctx, i)
kfree(hctx);
kfree(q->queue_hw_ctx);
/* ctx kobj stays in queue_ctx */
free_percpu(q->queue_ctx);
}
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{ {
struct blk_mq_hw_ctx **hctxs; struct blk_mq_hw_ctx **hctxs;
...@@ -2002,12 +2021,8 @@ void blk_mq_free_queue(struct request_queue *q) ...@@ -2002,12 +2021,8 @@ void blk_mq_free_queue(struct request_queue *q)
percpu_ref_exit(&q->mq_usage_counter); percpu_ref_exit(&q->mq_usage_counter);
free_percpu(q->queue_ctx);
kfree(q->queue_hw_ctx);
kfree(q->mq_map); kfree(q->mq_map);
q->queue_ctx = NULL;
q->queue_hw_ctx = NULL;
q->mq_map = NULL; q->mq_map = NULL;
mutex_lock(&all_q_mutex); mutex_lock(&all_q_mutex);
......
...@@ -62,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q); ...@@ -62,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q);
extern void blk_mq_rq_timed_out(struct request *req, bool reserved); extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
void blk_mq_release(struct request_queue *q);
/* /*
* Basic implementation of sparser bitmap, allowing the user to spread * Basic implementation of sparser bitmap, allowing the user to spread
* the bits over more cachelines. * the bits over more cachelines.
......
...@@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj) ...@@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj)
if (!q->mq_ops) if (!q->mq_ops)
blk_free_flush_queue(q->fq); blk_free_flush_queue(q->fq);
else
blk_mq_release(q);
blk_trace_shutdown(q); blk_trace_shutdown(q);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment