Commit a38d68db authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: help inlining of io_req_complete()

__io_req_complete() inlining is a bit weird, some compilers don't
optimise out the non-NULL branch of it even when called as
io_req_complete(). Help it a bit by extracting state and stateless
helpers out of __io_req_complete().
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 8662daec
...@@ -1886,7 +1886,8 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res) ...@@ -1886,7 +1886,8 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res)
__io_cqring_fill_event(req, res, 0); __io_cqring_fill_event(req, res, 0);
} }
static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags) static void io_req_complete_nostate(struct io_kiocb *req, long res,
unsigned int cflags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
unsigned long flags; unsigned long flags;
...@@ -1897,6 +1898,7 @@ static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags) ...@@ -1897,6 +1898,7 @@ static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
io_put_req(req);
} }
static void io_submit_flush_completions(struct io_comp_state *cs) static void io_submit_flush_completions(struct io_comp_state *cs)
...@@ -1932,23 +1934,27 @@ static void io_submit_flush_completions(struct io_comp_state *cs) ...@@ -1932,23 +1934,27 @@ static void io_submit_flush_completions(struct io_comp_state *cs)
cs->nr = 0; cs->nr = 0;
} }
static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags, static void io_req_complete_state(struct io_kiocb *req, long res,
struct io_comp_state *cs) unsigned int cflags, struct io_comp_state *cs)
{ {
if (!cs) { io_clean_op(req);
io_cqring_add_event(req, res, cflags); req->result = res;
io_put_req(req); req->compl.cflags = cflags;
} else { list_add_tail(&req->compl.list, &cs->list);
io_clean_op(req); if (++cs->nr >= 32)
req->result = res; io_submit_flush_completions(cs);
req->compl.cflags = cflags; }
list_add_tail(&req->compl.list, &cs->list);
if (++cs->nr >= 32) static inline void __io_req_complete(struct io_kiocb *req, long res,
io_submit_flush_completions(cs); unsigned cflags, struct io_comp_state *cs)
} {
if (!cs)
io_req_complete_nostate(req, res, cflags);
else
io_req_complete_state(req, res, cflags, cs);
} }
static void io_req_complete(struct io_kiocb *req, long res) static inline void io_req_complete(struct io_kiocb *req, long res)
{ {
__io_req_complete(req, res, 0, NULL); __io_req_complete(req, res, 0, NULL);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment