Commit 47b4c686 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring/rw: don't lose partial IO result on fail

A partially done read/write may end up in io_req_complete_failed() and
loose the result, make sure we return the number of bytes processed.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/05e0879c226bcd53b441bf92868eadd4bf04e2fc.1663668091.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a47b255e
...@@ -69,6 +69,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -69,6 +69,7 @@ const struct io_op_def io_op_defs[] = {
.issue = io_read, .issue = io_read,
.prep_async = io_readv_prep_async, .prep_async = io_readv_prep_async,
.cleanup = io_readv_writev_cleanup, .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
}, },
[IORING_OP_WRITEV] = { [IORING_OP_WRITEV] = {
.needs_file = 1, .needs_file = 1,
...@@ -85,6 +86,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -85,6 +86,7 @@ const struct io_op_def io_op_defs[] = {
.issue = io_write, .issue = io_write,
.prep_async = io_writev_prep_async, .prep_async = io_writev_prep_async,
.cleanup = io_readv_writev_cleanup, .cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
}, },
[IORING_OP_FSYNC] = { [IORING_OP_FSYNC] = {
.needs_file = 1, .needs_file = 1,
...@@ -105,6 +107,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -105,6 +107,7 @@ const struct io_op_def io_op_defs[] = {
.name = "READ_FIXED", .name = "READ_FIXED",
.prep = io_prep_rw, .prep = io_prep_rw,
.issue = io_read, .issue = io_read,
.fail = io_rw_fail,
}, },
[IORING_OP_WRITE_FIXED] = { [IORING_OP_WRITE_FIXED] = {
.needs_file = 1, .needs_file = 1,
...@@ -119,6 +122,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -119,6 +122,7 @@ const struct io_op_def io_op_defs[] = {
.name = "WRITE_FIXED", .name = "WRITE_FIXED",
.prep = io_prep_rw, .prep = io_prep_rw,
.issue = io_write, .issue = io_write,
.fail = io_rw_fail,
}, },
[IORING_OP_POLL_ADD] = { [IORING_OP_POLL_ADD] = {
.needs_file = 1, .needs_file = 1,
...@@ -275,6 +279,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -275,6 +279,7 @@ const struct io_op_def io_op_defs[] = {
.name = "READ", .name = "READ",
.prep = io_prep_rw, .prep = io_prep_rw,
.issue = io_read, .issue = io_read,
.fail = io_rw_fail,
}, },
[IORING_OP_WRITE] = { [IORING_OP_WRITE] = {
.needs_file = 1, .needs_file = 1,
...@@ -289,6 +294,7 @@ const struct io_op_def io_op_defs[] = { ...@@ -289,6 +294,7 @@ const struct io_op_def io_op_defs[] = {
.name = "WRITE", .name = "WRITE",
.prep = io_prep_rw, .prep = io_prep_rw,
.issue = io_write, .issue = io_write,
.fail = io_rw_fail,
}, },
[IORING_OP_FADVISE] = { [IORING_OP_FADVISE] = {
.needs_file = 1, .needs_file = 1,
......
...@@ -954,6 +954,14 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) ...@@ -954,6 +954,14 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
io_cqring_wake(ctx); io_cqring_wake(ctx);
} }
void io_rw_fail(struct io_kiocb *req)
{
int res;
res = io_fixup_rw_res(req, req->cqe.res);
io_req_set_res(req, res, req->cqe.flags);
}
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
{ {
struct io_wq_work_node *pos, *start, *prev; struct io_wq_work_node *pos, *start, *prev;
......
...@@ -21,3 +21,4 @@ int io_readv_prep_async(struct io_kiocb *req); ...@@ -21,3 +21,4 @@ int io_readv_prep_async(struct io_kiocb *req);
int io_write(struct io_kiocb *req, unsigned int issue_flags); int io_write(struct io_kiocb *req, unsigned int issue_flags);
int io_writev_prep_async(struct io_kiocb *req); int io_writev_prep_async(struct io_kiocb *req);
void io_readv_writev_cleanup(struct io_kiocb *req); void io_readv_writev_cleanup(struct io_kiocb *req);
void io_rw_fail(struct io_kiocb *req);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment