Commit 68ca8fc0 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: small optimisation of tctx_task_work

There should be no completions stashed when we first get into
tctx_task_work(), so move completion flushing checks a bit later
after we had a chance to execute some task works.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/c6765c804f3c438591b9825ab9c43d22039073c4.1647897811.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent af2d861d
......@@ -2475,10 +2475,6 @@ static void tctx_task_work(struct callback_head *cb)
while (1) {
struct io_wq_work_node *node1, *node2;
if (!tctx->task_list.first &&
!tctx->prior_task_list.first && uring_locked)
io_submit_flush_completions(ctx);
spin_lock_irq(&tctx->task_lock);
node1 = tctx->prior_task_list.first;
node2 = tctx->task_list.first;
......@@ -2492,10 +2488,13 @@ static void tctx_task_work(struct callback_head *cb)
if (node1)
handle_prev_tw_list(node1, &ctx, &uring_locked);
if (node2)
handle_tw_list(node2, &ctx, &uring_locked);
cond_resched();
if (!tctx->task_list.first &&
!tctx->prior_task_list.first && uring_locked)
io_submit_flush_completions(ctx);
}
ctx_flush_and_put(ctx, &uring_locked);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment