Commit 973fc83f authored by Dylan Yudaken's avatar Dylan Yudaken Committed by Jens Axboe

io_uring: defer all io_req_complete_failed

All failures happen under lock now, and can be deferred. To be consistent
when the failure has happened after some multishot cqe has been
deferred (and keep ordering), always defer failures.

To make this obvious at the caller (and to help prevent a future bug)
rename io_req_complete_failed to io_req_defer_failed.
Signed-off-by: default avatarDylan Yudaken <dylany@meta.com>
Link: https://lore.kernel.org/r/20221124093559.3780686-4-dylany@meta.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c06c6c5d
...@@ -864,7 +864,7 @@ void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) ...@@ -864,7 +864,7 @@ void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
} }
} }
void io_req_complete_failed(struct io_kiocb *req, s32 res) void io_req_defer_failed(struct io_kiocb *req, s32 res)
__must_hold(&ctx->uring_lock) __must_hold(&ctx->uring_lock)
{ {
const struct io_op_def *def = &io_op_defs[req->opcode]; const struct io_op_def *def = &io_op_defs[req->opcode];
...@@ -875,7 +875,7 @@ void io_req_complete_failed(struct io_kiocb *req, s32 res) ...@@ -875,7 +875,7 @@ void io_req_complete_failed(struct io_kiocb *req, s32 res)
io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED)); io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
if (def->fail) if (def->fail)
def->fail(req); def->fail(req);
io_req_complete_post(req, 0); io_req_complete_defer(req);
} }
/* /*
...@@ -1231,9 +1231,8 @@ int io_run_local_work(struct io_ring_ctx *ctx) ...@@ -1231,9 +1231,8 @@ int io_run_local_work(struct io_ring_ctx *ctx)
static void io_req_task_cancel(struct io_kiocb *req, bool *locked) static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
{ {
/* not needed for normal modes, but SQPOLL depends on it */
io_tw_lock(req->ctx, locked); io_tw_lock(req->ctx, locked);
io_req_complete_failed(req, req->cqe.res); io_req_defer_failed(req, req->cqe.res);
} }
void io_req_task_submit(struct io_kiocb *req, bool *locked) void io_req_task_submit(struct io_kiocb *req, bool *locked)
...@@ -1243,7 +1242,7 @@ void io_req_task_submit(struct io_kiocb *req, bool *locked) ...@@ -1243,7 +1242,7 @@ void io_req_task_submit(struct io_kiocb *req, bool *locked)
if (likely(!(req->task->flags & PF_EXITING))) if (likely(!(req->task->flags & PF_EXITING)))
io_queue_sqe(req); io_queue_sqe(req);
else else
io_req_complete_failed(req, -EFAULT); io_req_defer_failed(req, -EFAULT);
} }
void io_req_task_queue_fail(struct io_kiocb *req, int ret) void io_req_task_queue_fail(struct io_kiocb *req, int ret)
...@@ -1630,7 +1629,7 @@ static __cold void io_drain_req(struct io_kiocb *req) ...@@ -1630,7 +1629,7 @@ static __cold void io_drain_req(struct io_kiocb *req)
ret = io_req_prep_async(req); ret = io_req_prep_async(req);
if (ret) { if (ret) {
fail: fail:
io_req_complete_failed(req, ret); io_req_defer_failed(req, ret);
return; return;
} }
io_prep_async_link(req); io_prep_async_link(req);
...@@ -1860,7 +1859,7 @@ static void io_queue_async(struct io_kiocb *req, int ret) ...@@ -1860,7 +1859,7 @@ static void io_queue_async(struct io_kiocb *req, int ret)
struct io_kiocb *linked_timeout; struct io_kiocb *linked_timeout;
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
io_req_complete_failed(req, ret); io_req_defer_failed(req, ret);
return; return;
} }
...@@ -1910,14 +1909,14 @@ static void io_queue_sqe_fallback(struct io_kiocb *req) ...@@ -1910,14 +1909,14 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
*/ */
req->flags &= ~REQ_F_HARDLINK; req->flags &= ~REQ_F_HARDLINK;
req->flags |= REQ_F_LINK; req->flags |= REQ_F_LINK;
io_req_complete_failed(req, req->cqe.res); io_req_defer_failed(req, req->cqe.res);
} else if (unlikely(req->ctx->drain_active)) { } else if (unlikely(req->ctx->drain_active)) {
io_drain_req(req); io_drain_req(req);
} else { } else {
int ret = io_req_prep_async(req); int ret = io_req_prep_async(req);
if (unlikely(ret)) if (unlikely(ret))
io_req_complete_failed(req, ret); io_req_defer_failed(req, ret);
else else
io_queue_iowq(req, NULL); io_queue_iowq(req, NULL);
} }
......
...@@ -30,7 +30,7 @@ bool io_req_cqe_overflow(struct io_kiocb *req); ...@@ -30,7 +30,7 @@ bool io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(struct io_ring_ctx *ctx); int io_run_task_work_sig(struct io_ring_ctx *ctx);
int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked); int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
int io_run_local_work(struct io_ring_ctx *ctx); int io_run_local_work(struct io_ring_ctx *ctx);
void io_req_complete_failed(struct io_kiocb *req, s32 res); void io_req_defer_failed(struct io_kiocb *req, s32 res);
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
bool allow_overflow); bool allow_overflow);
......
...@@ -317,7 +317,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked) ...@@ -317,7 +317,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
else if (ret == IOU_POLL_DONE) else if (ret == IOU_POLL_DONE)
io_req_task_submit(req, locked); io_req_task_submit(req, locked);
else else
io_req_complete_failed(req, ret); io_req_defer_failed(req, ret);
} }
static void __io_poll_execute(struct io_kiocb *req, int mask) static void __io_poll_execute(struct io_kiocb *req, int mask)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment