Commit 5a9d041b authored by Jens Axboe's avatar Jens Axboe

block: move io_context creation into where it's needed

The only user of the io_context for IO is BFQ, yet we put the checking
and logic of it into the normal IO path.

Put the creation into blk_mq_sched_assign_ioc(), and have BFQ use that
helper.
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 48b5c1fb
...@@ -6573,6 +6573,8 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, ...@@ -6573,6 +6573,8 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
*/ */
static void bfq_prepare_request(struct request *rq) static void bfq_prepare_request(struct request *rq)
{ {
blk_mq_sched_assign_ioc(rq);
/* /*
* Regardless of whether we have an icq attached, we have to * Regardless of whether we have an icq attached, we have to
* clear the scheduler pointers, as they might point to * clear the scheduler pointers, as they might point to
......
...@@ -750,15 +750,6 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio) ...@@ -750,15 +750,6 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio)
break; break;
} }
/*
* Various block parts want %current->io_context, so allocate it up
* front rather than dealing with lots of pain to allocate it only
* where needed. This may fail and the block layer knows how to live
* with it.
*/
if (unlikely(!current->io_context))
create_task_io_context(current, GFP_ATOMIC, q->node);
if (blk_throtl_bio(bio)) if (blk_throtl_bio(bio))
return false; return false;
......
...@@ -24,6 +24,10 @@ void blk_mq_sched_assign_ioc(struct request *rq) ...@@ -24,6 +24,10 @@ void blk_mq_sched_assign_ioc(struct request *rq)
struct io_context *ioc; struct io_context *ioc;
struct io_cq *icq; struct io_cq *icq;
/* create task io_context, if we don't have one already */
if (unlikely(!current->io_context))
create_task_io_context(current, GFP_ATOMIC, q->node);
/* /*
* May not have an IO context if it's a passthrough request * May not have an IO context if it's a passthrough request
*/ */
...@@ -43,6 +47,7 @@ void blk_mq_sched_assign_ioc(struct request *rq) ...@@ -43,6 +47,7 @@ void blk_mq_sched_assign_ioc(struct request *rq)
get_io_context(icq->ioc); get_io_context(icq->ioc);
rq->elv.icq = icq; rq->elv.icq = icq;
} }
EXPORT_SYMBOL_GPL(blk_mq_sched_assign_ioc);
/* /*
* Mark a hardware queue as needing a restart. For shared queues, maintain * Mark a hardware queue as needing a restart. For shared queues, maintain
......
...@@ -406,9 +406,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, ...@@ -406,9 +406,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
if (!op_is_flush(data->cmd_flags) && if (!op_is_flush(data->cmd_flags) &&
e->type->ops.prepare_request) { e->type->ops.prepare_request) {
if (e->type->icq_cache)
blk_mq_sched_assign_ioc(rq);
e->type->ops.prepare_request(rq); e->type->ops.prepare_request(rq);
rq->rq_flags |= RQF_ELVPRIV; rq->rq_flags |= RQF_ELVPRIV;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment