Commit 22b2ca31 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: extra a helper for drain init

Add a helper io_init_req_drain for initialising requests with
IOSQE_DRAIN set. Also move bits from preambule of io_drain_req() in
there, because we already modify all the bits needed inside the helper.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/dcb412825b35b1cb8891245a387d7d69f8d14cef.1633107393.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5e371265
...@@ -6430,28 +6430,11 @@ static u32 io_get_sequence(struct io_kiocb *req) ...@@ -6430,28 +6430,11 @@ static u32 io_get_sequence(struct io_kiocb *req)
static bool io_drain_req(struct io_kiocb *req) static bool io_drain_req(struct io_kiocb *req)
{ {
struct io_kiocb *pos;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_defer_entry *de; struct io_defer_entry *de;
int ret; int ret;
u32 seq; u32 seq;
/* not interested in head, start from the first linked */
io_for_each_link(pos, req->link) {
/*
* If we need to drain a request in the middle of a link, drain
* the head request and the next request/link after the current
* link. Considering sequential execution of links,
* IOSQE_IO_DRAIN will be maintained for every request of our
* link.
*/
if (pos->flags & REQ_F_IO_DRAIN) {
ctx->drain_next = true;
req->flags |= REQ_F_IO_DRAIN;
break;
}
}
/* Still need defer if there is pending req in defer list. */ /* Still need defer if there is pending req in defer list. */
if (likely(list_empty_careful(&ctx->defer_list) && if (likely(list_empty_careful(&ctx->defer_list) &&
!(req->flags & REQ_F_IO_DRAIN))) { !(req->flags & REQ_F_IO_DRAIN))) {
...@@ -6992,6 +6975,25 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx, ...@@ -6992,6 +6975,25 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx,
return true; return true;
} }
static void io_init_req_drain(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *head = ctx->submit_state.link.head;
ctx->drain_active = true;
if (head) {
/*
* If we need to drain a request in the middle of a link, drain
* the head request and the next request/link after the current
* link. Considering sequential execution of links,
* IOSQE_IO_DRAIN will be maintained for every request of our
* link.
*/
head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
ctx->drain_next = true;
}
}
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe) const struct io_uring_sqe *sqe)
__must_hold(&ctx->uring_lock) __must_hold(&ctx->uring_lock)
...@@ -7018,14 +7020,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -7018,14 +7020,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if ((sqe_flags & IOSQE_BUFFER_SELECT) && if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
!io_op_defs[req->opcode].buffer_select) !io_op_defs[req->opcode].buffer_select)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (sqe_flags & IOSQE_IO_DRAIN) { if (sqe_flags & IOSQE_IO_DRAIN)
struct io_submit_link *link = &ctx->submit_state.link; io_init_req_drain(req);
ctx->drain_active = true;
req->flags |= REQ_F_FORCE_ASYNC;
if (link->head)
link->head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
}
} }
if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) { if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags)) if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
...@@ -7037,7 +7033,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -7037,7 +7033,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) { if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
ctx->drain_next = false; ctx->drain_next = false;
ctx->drain_active = true; ctx->drain_active = true;
req->flags |= REQ_F_FORCE_ASYNC | IOSQE_IO_DRAIN; req->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment