Commit 781dd830 authored by Jens Axboe's avatar Jens Axboe

block: move RQF_ELV setting into allocators

It's not safe to do this before blk_queue_enter(), as the scheduler state
could have changed in between. Hence move the RQF_ELV setting into the
allocators, where we know the queue is already entered.
Suggested-by: default avatarMing Lei <ming.lei@redhat.com>
Reported-by: default avatarYi Zhang <yi.zhang@redhat.com>
Reported-by: default avatarSteffen Maier <maier@linux.ibm.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a1c2f7e7
...@@ -419,7 +419,6 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, ...@@ -419,7 +419,6 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
{ {
struct request_queue *q = data->q; struct request_queue *q = data->q;
struct elevator_queue *e = q->elevator;
u64 alloc_time_ns = 0; u64 alloc_time_ns = 0;
struct request *rq; struct request *rq;
unsigned int tag; unsigned int tag;
...@@ -431,7 +430,11 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) ...@@ -431,7 +430,11 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
if (data->cmd_flags & REQ_NOWAIT) if (data->cmd_flags & REQ_NOWAIT)
data->flags |= BLK_MQ_REQ_NOWAIT; data->flags |= BLK_MQ_REQ_NOWAIT;
if (e) { if (q->elevator) {
struct elevator_queue *e = q->elevator;
data->rq_flags |= RQF_ELV;
/* /*
* Flush/passthrough requests are special and go directly to the * Flush/passthrough requests are special and go directly to the
* dispatch list. Don't include reserved tags in the * dispatch list. Don't include reserved tags in the
...@@ -447,7 +450,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) ...@@ -447,7 +450,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
retry: retry:
data->ctx = blk_mq_get_ctx(q); data->ctx = blk_mq_get_ctx(q);
data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
if (!e) if (!(data->rq_flags & RQF_ELV))
blk_mq_tag_busy(data->hctx); blk_mq_tag_busy(data->hctx);
/* /*
...@@ -490,7 +493,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, ...@@ -490,7 +493,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
.q = q, .q = q,
.flags = flags, .flags = flags,
.cmd_flags = op, .cmd_flags = op,
.rq_flags = q->elevator ? RQF_ELV : 0,
.nr_tags = 1, .nr_tags = 1,
}; };
struct request *rq; struct request *rq;
...@@ -520,7 +522,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, ...@@ -520,7 +522,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
.q = q, .q = q,
.flags = flags, .flags = flags,
.cmd_flags = op, .cmd_flags = op,
.rq_flags = q->elevator ? RQF_ELV : 0,
.nr_tags = 1, .nr_tags = 1,
}; };
u64 alloc_time_ns = 0; u64 alloc_time_ns = 0;
...@@ -561,6 +562,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, ...@@ -561,6 +562,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
if (!q->elevator) if (!q->elevator)
blk_mq_tag_busy(data.hctx); blk_mq_tag_busy(data.hctx);
else
data.rq_flags |= RQF_ELV;
ret = -EWOULDBLOCK; ret = -EWOULDBLOCK;
tag = blk_mq_get_tag(&data); tag = blk_mq_get_tag(&data);
...@@ -2515,7 +2518,6 @@ void blk_mq_submit_bio(struct bio *bio) ...@@ -2515,7 +2518,6 @@ void blk_mq_submit_bio(struct bio *bio)
.q = q, .q = q,
.nr_tags = 1, .nr_tags = 1,
.cmd_flags = bio->bi_opf, .cmd_flags = bio->bi_opf,
.rq_flags = q->elevator ? RQF_ELV : 0,
}; };
if (plug) { if (plug) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment