Commit 3d4aeb9f authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: don't spinlock when not posting CQEs

When no of queued for the batch completion requests need to post an CQE,
see IOSQE_CQE_SKIP_SUCCESS, avoid grabbing ->completion_lock and other
commit/post.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8d4b4a08bca022cbe19af00266407116775b3e4d.1636559119.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 04c76b41
...@@ -321,6 +321,7 @@ struct io_submit_state { ...@@ -321,6 +321,7 @@ struct io_submit_state {
bool plug_started; bool plug_started;
bool need_plug; bool need_plug;
bool flush_cqes;
unsigned short submit_nr; unsigned short submit_nr;
struct blk_plug plug; struct blk_plug plug;
}; };
...@@ -1525,8 +1526,11 @@ static void io_prep_async_link(struct io_kiocb *req) ...@@ -1525,8 +1526,11 @@ static void io_prep_async_link(struct io_kiocb *req)
static inline void io_req_add_compl_list(struct io_kiocb *req) static inline void io_req_add_compl_list(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx;
struct io_submit_state *state = &req->ctx->submit_state; struct io_submit_state *state = &req->ctx->submit_state;
if (!(req->flags & REQ_F_CQE_SKIP))
ctx->submit_state.flush_cqes = true;
wq_list_add_tail(&req->comp_list, &state->compl_reqs); wq_list_add_tail(&req->comp_list, &state->compl_reqs);
} }
...@@ -2386,18 +2390,22 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) ...@@ -2386,18 +2390,22 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
struct io_wq_work_node *node, *prev; struct io_wq_work_node *node, *prev;
struct io_submit_state *state = &ctx->submit_state; struct io_submit_state *state = &ctx->submit_state;
spin_lock(&ctx->completion_lock); if (state->flush_cqes) {
wq_list_for_each(node, prev, &state->compl_reqs) { spin_lock(&ctx->completion_lock);
struct io_kiocb *req = container_of(node, struct io_kiocb, wq_list_for_each(node, prev, &state->compl_reqs) {
struct io_kiocb *req = container_of(node, struct io_kiocb,
comp_list); comp_list);
if (!(req->flags & REQ_F_CQE_SKIP)) if (!(req->flags & REQ_F_CQE_SKIP))
__io_fill_cqe(ctx, req->user_data, req->result, __io_fill_cqe(ctx, req->user_data, req->result,
req->cflags); req->cflags);
}
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
state->flush_cqes = false;
} }
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
io_free_batch_list(ctx, state->compl_reqs.first); io_free_batch_list(ctx, state->compl_reqs.first);
INIT_WQ_LIST(&state->compl_reqs); INIT_WQ_LIST(&state->compl_reqs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment