Commit 863e0560 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: track link's head and tail during submit

Explicitly save not only a link's head in io_submit_sqe[s]() but the
tail as well. That's in preparation for keeping linked requests in a
singly linked list.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 018043be
...@@ -6536,8 +6536,13 @@ static inline void io_queue_link_head(struct io_kiocb *req, ...@@ -6536,8 +6536,13 @@ static inline void io_queue_link_head(struct io_kiocb *req,
io_queue_sqe(req, NULL, cs); io_queue_sqe(req, NULL, cs);
} }
struct io_submit_link {
struct io_kiocb *head;
struct io_kiocb *last;
};
static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_kiocb **link, struct io_comp_state *cs) struct io_submit_link *link, struct io_comp_state *cs)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
int ret; int ret;
...@@ -6549,8 +6554,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -6549,8 +6554,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
* submitted sync once the chain is complete. If none of those * submitted sync once the chain is complete. If none of those
* conditions are true (normal request), then just queue it. * conditions are true (normal request), then just queue it.
*/ */
if (*link) { if (link->head) {
struct io_kiocb *head = *link; struct io_kiocb *head = link->head;
/* /*
* Taking sequential execution of a link, draining both sides * Taking sequential execution of a link, draining both sides
...@@ -6571,11 +6576,12 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -6571,11 +6576,12 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} }
trace_io_uring_link(ctx, req, head); trace_io_uring_link(ctx, req, head);
list_add_tail(&req->link_list, &head->link_list); list_add_tail(&req->link_list, &head->link_list);
link->last = req;
/* last request of a link, enqueue the link */ /* last request of a link, enqueue the link */
if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
io_queue_link_head(head, cs); io_queue_link_head(head, cs);
*link = NULL; link->head = NULL;
} }
} else { } else {
if (unlikely(ctx->drain_next)) { if (unlikely(ctx->drain_next)) {
...@@ -6589,7 +6595,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -6589,7 +6595,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ret = io_req_defer_prep(req, sqe); ret = io_req_defer_prep(req, sqe);
if (unlikely(ret)) if (unlikely(ret))
req->flags |= REQ_F_FAIL_LINK; req->flags |= REQ_F_FAIL_LINK;
*link = req; link->head = req;
link->last = req;
} else { } else {
io_queue_sqe(req, sqe, cs); io_queue_sqe(req, sqe, cs);
} }
...@@ -6769,7 +6776,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -6769,7 +6776,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
{ {
struct io_submit_state state; struct io_submit_state state;
struct io_kiocb *link = NULL; struct io_submit_link link;
int i, submitted = 0; int i, submitted = 0;
/* if we have a backlog and couldn't flush it all, return BUSY */ /* if we have a backlog and couldn't flush it all, return BUSY */
...@@ -6789,6 +6796,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) ...@@ -6789,6 +6796,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
refcount_add(nr, &current->usage); refcount_add(nr, &current->usage);
io_submit_state_start(&state, ctx, nr); io_submit_state_start(&state, ctx, nr);
link.head = NULL;
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
const struct io_uring_sqe *sqe; const struct io_uring_sqe *sqe;
...@@ -6834,8 +6842,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) ...@@ -6834,8 +6842,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
percpu_counter_sub(&tctx->inflight, unused); percpu_counter_sub(&tctx->inflight, unused);
put_task_struct_many(current, unused); put_task_struct_many(current, unused);
} }
if (link) if (link.head)
io_queue_link_head(link, &state.comp); io_queue_link_head(link.head, &state.comp);
io_submit_state_end(&state); io_submit_state_end(&state);
/* Commit SQ ring head once we've consumed and submitted all SQEs */ /* Commit SQ ring head once we've consumed and submitted all SQEs */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment