Commit 906c6caa authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: optimise io_prep_linked_timeout()

Linked timeout handling during issuing is heavy, it adds extra
instructions and forces to save the next linked timeout before
io_issue_sqe().

Follwing the same reasoning as in refcounting patches, a request can't
be freed by the time it returns from io_issue_sqe(), so now we don't
need to do io_prep_linked_timeout() in advance, and it can be delayed to
colder paths optimising the generic path.

Also, it should also save quite a lot for requests with linked timeouts
and completed inline on timeout spinlocking + hrtimer_start() +
hrtimer_try_to_cancel() and so on.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/19bfc9a0d26c5c5f1e359f7650afe807ca8ef879.1628981736.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0756a869
...@@ -1306,8 +1306,16 @@ static void io_req_track_inflight(struct io_kiocb *req) ...@@ -1306,8 +1306,16 @@ static void io_req_track_inflight(struct io_kiocb *req)
} }
} }
static inline void io_unprep_linked_timeout(struct io_kiocb *req)
{
req->flags &= ~REQ_F_LINK_TIMEOUT;
}
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
{ {
if (WARN_ON_ONCE(!req->link))
return NULL;
req->flags &= ~REQ_F_ARM_LTIMEOUT; req->flags &= ~REQ_F_ARM_LTIMEOUT;
req->flags |= REQ_F_LINK_TIMEOUT; req->flags |= REQ_F_LINK_TIMEOUT;
...@@ -1932,6 +1940,7 @@ static bool io_disarm_next(struct io_kiocb *req) ...@@ -1932,6 +1940,7 @@ static bool io_disarm_next(struct io_kiocb *req)
if (req->flags & REQ_F_ARM_LTIMEOUT) { if (req->flags & REQ_F_ARM_LTIMEOUT) {
struct io_kiocb *link = req->link; struct io_kiocb *link = req->link;
req->flags &= ~REQ_F_ARM_LTIMEOUT;
if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
io_remove_next_linked(req); io_remove_next_linked(req);
io_cqring_fill_event(link->ctx, link->user_data, io_cqring_fill_event(link->ctx, link->user_data,
...@@ -6485,7 +6494,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req) ...@@ -6485,7 +6494,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
static void __io_queue_sqe(struct io_kiocb *req) static void __io_queue_sqe(struct io_kiocb *req)
__must_hold(&req->ctx->uring_lock) __must_hold(&req->ctx->uring_lock)
{ {
struct io_kiocb *linked_timeout = io_prep_linked_timeout(req); struct io_kiocb *linked_timeout;
int ret; int ret;
issue_sqe: issue_sqe:
...@@ -6503,10 +6512,19 @@ static void __io_queue_sqe(struct io_kiocb *req) ...@@ -6503,10 +6512,19 @@ static void __io_queue_sqe(struct io_kiocb *req)
state->compl_reqs[state->compl_nr++] = req; state->compl_reqs[state->compl_nr++] = req;
if (state->compl_nr == ARRAY_SIZE(state->compl_reqs)) if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
io_submit_flush_completions(ctx); io_submit_flush_completions(ctx);
return;
} }
linked_timeout = io_prep_linked_timeout(req);
if (linked_timeout)
io_queue_linked_timeout(linked_timeout);
} else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
linked_timeout = io_prep_linked_timeout(req);
switch (io_arm_poll_handler(req)) { switch (io_arm_poll_handler(req)) {
case IO_APOLL_READY: case IO_APOLL_READY:
if (linked_timeout)
io_unprep_linked_timeout(req);
goto issue_sqe; goto issue_sqe;
case IO_APOLL_ABORTED: case IO_APOLL_ABORTED:
/* /*
...@@ -6516,11 +6534,12 @@ static void __io_queue_sqe(struct io_kiocb *req) ...@@ -6516,11 +6534,12 @@ static void __io_queue_sqe(struct io_kiocb *req)
io_queue_async_work(req); io_queue_async_work(req);
break; break;
} }
if (linked_timeout)
io_queue_linked_timeout(linked_timeout);
} else { } else {
io_req_complete_failed(req, ret); io_req_complete_failed(req, ret);
} }
if (linked_timeout)
io_queue_linked_timeout(linked_timeout);
} }
static inline void io_queue_sqe(struct io_kiocb *req) static inline void io_queue_sqe(struct io_kiocb *req)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment