Commit d9dee430 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: remove ->flush_cqes optimisation

It's not clear how widely used IOSQE_CQE_SKIP_SUCCESS is, and how often
->flush_cqes flag prevents from completion being flushed. Sometimes it's
high level of concurrency that enables it at least for one CQE, but
sometimes it doesn't save much because nobody waiting on the CQ.

Remove ->flush_cqes flag and the optimisation, it should benefit the
normal use case. Note, that there is no spurious eventfd problem with
that as checks for spuriousness were incorporated into
io_eventfd_signal().
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/692e81eeddccc096f449a7960365fa7b4a18f8e6.1655637157.git.asml.silence@gmail.com
[axboe: remove now dead state->flush_cqes variable]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a830ffd2
...@@ -148,7 +148,6 @@ struct io_submit_state { ...@@ -148,7 +148,6 @@ struct io_submit_state {
bool plug_started; bool plug_started;
bool need_plug; bool need_plug;
bool flush_cqes;
unsigned short submit_nr; unsigned short submit_nr;
struct blk_plug plug; struct blk_plug plug;
}; };
......
...@@ -1250,22 +1250,19 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) ...@@ -1250,22 +1250,19 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
struct io_wq_work_node *node, *prev; struct io_wq_work_node *node, *prev;
struct io_submit_state *state = &ctx->submit_state; struct io_submit_state *state = &ctx->submit_state;
if (state->flush_cqes) { spin_lock(&ctx->completion_lock);
spin_lock(&ctx->completion_lock); wq_list_for_each(node, prev, &state->compl_reqs) {
wq_list_for_each(node, prev, &state->compl_reqs) { struct io_kiocb *req = container_of(node, struct io_kiocb,
struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list);
comp_list);
if (!(req->flags & REQ_F_CQE_SKIP))
__io_fill_cqe_req(ctx, req);
}
io_commit_cqring(ctx); if (!(req->flags & REQ_F_CQE_SKIP))
spin_unlock(&ctx->completion_lock); __io_fill_cqe_req(ctx, req);
io_cqring_ev_posted(ctx);
state->flush_cqes = false;
} }
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
io_free_batch_list(ctx, state->compl_reqs.first); io_free_batch_list(ctx, state->compl_reqs.first);
INIT_WQ_LIST(&state->compl_reqs); INIT_WQ_LIST(&state->compl_reqs);
} }
......
...@@ -219,8 +219,6 @@ static inline void io_req_add_compl_list(struct io_kiocb *req) ...@@ -219,8 +219,6 @@ static inline void io_req_add_compl_list(struct io_kiocb *req)
{ {
struct io_submit_state *state = &req->ctx->submit_state; struct io_submit_state *state = &req->ctx->submit_state;
if (!(req->flags & REQ_F_CQE_SKIP))
state->flush_cqes = true;
wq_list_add_tail(&req->comp_list, &state->compl_reqs); wq_list_add_tail(&req->comp_list, &state->compl_reqs);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment