Commit 26fed4ac authored by Jens Axboe's avatar Jens Axboe

block: flush plug based on hardware and software queue order

We used to sort the plug list if we had multiple queues before dispatching
requests to the IO scheduler. This usually isn't needed, but for certain
workloads that interleave requests to disks, it's a less efficient to
process the plug list one-by-one if everything is interleaved.

Don't sort the list, but skip through it and flush out entries that have
the same target at the same time.

Fixes: df87eb0f ("block: get rid of plug list sorting")
Reported-and-tested-by: default avatarSong Liu <song@kernel.org>
Reviewed-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5b205071
......@@ -2573,13 +2573,36 @@ static void __blk_mq_flush_plug_list(struct request_queue *q,
q->mq_ops->queue_rqs(&plug->mq_list);
}
static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
{
struct blk_mq_hw_ctx *this_hctx = NULL;
struct blk_mq_ctx *this_ctx = NULL;
struct request *requeue_list = NULL;
unsigned int depth = 0;
LIST_HEAD(list);
do {
struct request *rq = rq_list_pop(&plug->mq_list);
if (!this_hctx) {
this_hctx = rq->mq_hctx;
this_ctx = rq->mq_ctx;
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
rq_list_add(&requeue_list, rq);
continue;
}
list_add_tail(&rq->queuelist, &list);
depth++;
} while (!rq_list_empty(plug->mq_list));
plug->mq_list = requeue_list;
trace_block_unplug(this_hctx->queue, depth, !from_sched);
blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
}
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct blk_mq_hw_ctx *this_hctx;
struct blk_mq_ctx *this_ctx;
struct request *rq;
unsigned int depth;
LIST_HEAD(list);
if (rq_list_empty(plug->mq_list))
return;
......@@ -2615,35 +2638,9 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
return;
}
this_hctx = NULL;
this_ctx = NULL;
depth = 0;
do {
rq = rq_list_pop(&plug->mq_list);
if (!this_hctx) {
this_hctx = rq->mq_hctx;
this_ctx = rq->mq_ctx;
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
trace_block_unplug(this_hctx->queue, depth,
!from_schedule);
blk_mq_sched_insert_requests(this_hctx, this_ctx,
&list, from_schedule);
depth = 0;
this_hctx = rq->mq_hctx;
this_ctx = rq->mq_ctx;
}
list_add(&rq->queuelist, &list);
depth++;
blk_mq_dispatch_plug_list(plug, from_schedule);
} while (!rq_list_empty(plug->mq_list));
if (!list_empty(&list)) {
trace_block_unplug(this_hctx->queue, depth, !from_schedule);
blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
from_schedule);
}
}
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment