Commit b7e298d2 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: merge defer_prep() and prep_async()

Merge two function and do renaming in favour of the second one, it
relays the meaning better.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 26f0505a
...@@ -5868,6 +5868,13 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -5868,6 +5868,13 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_req_prep_async(struct io_kiocb *req) static int io_req_prep_async(struct io_kiocb *req)
{ {
if (!io_op_defs[req->opcode].needs_async_setup)
return 0;
if (WARN_ON_ONCE(req->async_data))
return -EFAULT;
if (io_alloc_async_data(req))
return -EAGAIN;
switch (req->opcode) { switch (req->opcode) {
case IORING_OP_READV: case IORING_OP_READV:
return io_rw_prep_async(req, READ); return io_rw_prep_async(req, READ);
...@@ -5880,18 +5887,9 @@ static int io_req_prep_async(struct io_kiocb *req) ...@@ -5880,18 +5887,9 @@ static int io_req_prep_async(struct io_kiocb *req)
case IORING_OP_CONNECT: case IORING_OP_CONNECT:
return io_connect_prep_async(req); return io_connect_prep_async(req);
} }
return 0; printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
} req->opcode);
static int io_req_defer_prep(struct io_kiocb *req)
{
if (!io_op_defs[req->opcode].needs_async_setup)
return 0;
if (WARN_ON_ONCE(req->async_data))
return -EFAULT; return -EFAULT;
if (io_alloc_async_data(req))
return -EAGAIN;
return io_req_prep_async(req);
} }
static u32 io_get_sequence(struct io_kiocb *req) static u32 io_get_sequence(struct io_kiocb *req)
...@@ -5924,7 +5922,7 @@ static int io_req_defer(struct io_kiocb *req) ...@@ -5924,7 +5922,7 @@ static int io_req_defer(struct io_kiocb *req)
if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
return 0; return 0;
ret = io_req_defer_prep(req); ret = io_req_prep_async(req);
if (ret) if (ret)
return ret; return ret;
io_prep_async_link(req); io_prep_async_link(req);
...@@ -6339,7 +6337,7 @@ static void io_queue_sqe(struct io_kiocb *req) ...@@ -6339,7 +6337,7 @@ static void io_queue_sqe(struct io_kiocb *req)
io_req_complete_failed(req, ret); io_req_complete_failed(req, ret);
} }
} else if (req->flags & REQ_F_FORCE_ASYNC) { } else if (req->flags & REQ_F_FORCE_ASYNC) {
ret = io_req_defer_prep(req); ret = io_req_prep_async(req);
if (unlikely(ret)) if (unlikely(ret))
goto fail_req; goto fail_req;
io_queue_async_work(req); io_queue_async_work(req);
...@@ -6492,7 +6490,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -6492,7 +6490,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
head->flags |= REQ_F_IO_DRAIN; head->flags |= REQ_F_IO_DRAIN;
ctx->drain_next = 1; ctx->drain_next = 1;
} }
ret = io_req_defer_prep(req); ret = io_req_prep_async(req);
if (unlikely(ret)) if (unlikely(ret))
goto fail_req; goto fail_req;
trace_io_uring_link(ctx, req, head); trace_io_uring_link(ctx, req, head);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment