Commit 57859f4d authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: clean up io_queue_next()

Move fast check out of io_queue_next(), it makes req->flags checks in
__io_submit_flush_completions() a bit clearer and grants us better
comtrol, e.g. can remove now not justified unlikely() in
__io_submit_flush_completions(). Also, we don't care about having this
check in io_free_req() as the function is a slow path and
io_req_find_next() handles it correctly.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/1f9e1cc80adbb11b37017d511df4a2c6141a3f08.1647897811.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b605a7fa
......@@ -2597,14 +2597,12 @@ static void io_req_task_queue_reissue(struct io_kiocb *req)
io_req_task_work_add(req, false);
}
static inline void io_queue_next(struct io_kiocb *req)
static void io_queue_next(struct io_kiocb *req)
{
if (unlikely(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))) {
struct io_kiocb *nxt = io_req_find_next(req);
struct io_kiocb *nxt = io_req_find_next(req);
if (nxt)
io_req_task_queue(nxt);
}
if (nxt)
io_req_task_queue(nxt);
}
static void io_free_req(struct io_kiocb *req)
......@@ -2644,7 +2642,8 @@ static void io_free_batch_list(struct io_ring_ctx *ctx,
&ctx->apoll_cache);
req->flags &= ~REQ_F_POLLED;
}
io_queue_next(req);
if (req->flags & (REQ_F_LINK|REQ_F_HARDLINK))
io_queue_next(req);
if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
io_clean_op(req);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment