Commit b82d4b19 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

blkcg: make request_queue bypassing on allocation

With the previous change to guarantee bypass visiblity for RCU read
lock regions, entering bypass mode involves non-trivial overhead and
future changes are scheduled to make use of bypass mode during init
path.  Combined it may end up adding noticeable delay during boot.

This patch makes request_queue start its life in bypass mode, which is
ended on queue init completion at the end of
blk_init_allocated_queue(), and updates blk_queue_bypass_start() such
that draining and RCU synchronization are performed only when the
queue actually enters bypass mode.

This avoids unnecessarily switching in and out of bypass mode during
init avoiding the overhead and any nasty surprises which may step from
leaving bypass mode on half-initialized queues.

The boot time overhead was pointed out by Vivek.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 80fd9979
...@@ -421,14 +421,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) ...@@ -421,14 +421,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
*/ */
void blk_queue_bypass_start(struct request_queue *q) void blk_queue_bypass_start(struct request_queue *q)
{ {
bool drain;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
q->bypass_depth++; drain = !q->bypass_depth++;
queue_flag_set(QUEUE_FLAG_BYPASS, q); queue_flag_set(QUEUE_FLAG_BYPASS, q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
if (drain) {
blk_drain_queue(q, false); blk_drain_queue(q, false);
/* ensure blk_queue_bypass() is %true inside RCU read lock */ /* ensure blk_queue_bypass() is %true inside RCU read lock */
synchronize_rcu(); synchronize_rcu();
}
} }
EXPORT_SYMBOL_GPL(blk_queue_bypass_start); EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
...@@ -577,6 +581,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -577,6 +581,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
*/ */
q->queue_lock = &q->__queue_lock; q->queue_lock = &q->__queue_lock;
/*
* A queue starts its life with bypass turned on to avoid
* unnecessary bypass on/off overhead and nasty surprises during
* init. The initial bypass will be finished at the end of
* blk_init_allocated_queue().
*/
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
if (blkcg_init_queue(q)) if (blkcg_init_queue(q))
goto fail_id; goto fail_id;
...@@ -672,15 +685,15 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, ...@@ -672,15 +685,15 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
q->sg_reserved_size = INT_MAX; q->sg_reserved_size = INT_MAX;
/* /* init elevator */
* all done if (elevator_init(q, NULL))
*/ return NULL;
if (!elevator_init(q, NULL)) {
blk_queue_congestion_threshold(q); blk_queue_congestion_threshold(q);
return q;
}
return NULL; /* all done, end the initial bypass */
blk_queue_bypass_end(q);
return q;
} }
EXPORT_SYMBOL(blk_init_allocated_queue); EXPORT_SYMBOL(blk_init_allocated_queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment