Commit b7bb4f7d authored by Jens Axboe's avatar Jens Axboe

io_uring: fix pre-prepped issue with force_nonblock == true

Some of these code paths assume that any force_nonblock == true issue
is not prepped, but that's not true if we did prep as part of link setup
earlier. Check if we already have an async context allocate before
setting up a new one.

Cleanup the async context setup in general, we have a lot of duplicated
code there.

Fixes: 03b1230c ("io_uring: ensure async punted sendmsg/recvmsg requests copy data")
Fixes: f67676d1 ("io_uring: ensure async punted read/write requests copy iovec")
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 525b305d
...@@ -1701,7 +1701,7 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, ...@@ -1701,7 +1701,7 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
return ret; return ret;
} }
static void io_req_map_io(struct io_kiocb *req, ssize_t io_size, static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
struct iovec *iovec, struct iovec *fast_iov, struct iovec *iovec, struct iovec *fast_iov,
struct iov_iter *iter) struct iov_iter *iter)
{ {
...@@ -1715,19 +1715,39 @@ static void io_req_map_io(struct io_kiocb *req, ssize_t io_size, ...@@ -1715,19 +1715,39 @@ static void io_req_map_io(struct io_kiocb *req, ssize_t io_size,
} }
} }
static int io_setup_async_io(struct io_kiocb *req, ssize_t io_size, static int io_alloc_async_ctx(struct io_kiocb *req)
struct iovec *iovec, struct iovec *fast_iov,
struct iov_iter *iter)
{ {
req->io = kmalloc(sizeof(*req->io), GFP_KERNEL); req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
if (req->io) { if (req->io) {
io_req_map_io(req, io_size, iovec, fast_iov, iter);
memcpy(&req->io->sqe, req->sqe, sizeof(req->io->sqe)); memcpy(&req->io->sqe, req->sqe, sizeof(req->io->sqe));
req->sqe = &req->io->sqe; req->sqe = &req->io->sqe;
return 0; return 0;
} }
return 1;
}
static void io_rw_async(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
struct iovec *iov = NULL;
if (req->io->rw.iov != req->io->rw.fast_iov)
iov = req->io->rw.iov;
io_wq_submit_work(workptr);
kfree(iov);
}
static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
struct iovec *iovec, struct iovec *fast_iov,
struct iov_iter *iter)
{
if (!req->io && io_alloc_async_ctx(req))
return -ENOMEM; return -ENOMEM;
io_req_map_rw(req, io_size, iovec, fast_iov, iter);
req->work.func = io_rw_async;
return 0;
} }
static int io_read_prep(struct io_kiocb *req, struct iovec **iovec, static int io_read_prep(struct io_kiocb *req, struct iovec **iovec,
...@@ -1806,7 +1826,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt, ...@@ -1806,7 +1826,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
kiocb_done(kiocb, ret2, nxt, req->in_async); kiocb_done(kiocb, ret2, nxt, req->in_async);
} else { } else {
copy_iov: copy_iov:
ret = io_setup_async_io(req, io_size, iovec, ret = io_setup_async_rw(req, io_size, iovec,
inline_vecs, &iter); inline_vecs, &iter);
if (ret) if (ret)
goto out_free; goto out_free;
...@@ -1814,6 +1834,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt, ...@@ -1814,6 +1834,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
} }
} }
out_free: out_free:
if (!io_wq_current_is_worker())
kfree(iovec); kfree(iovec);
return ret; return ret;
} }
...@@ -1900,7 +1921,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, ...@@ -1900,7 +1921,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
kiocb_done(kiocb, ret2, nxt, req->in_async); kiocb_done(kiocb, ret2, nxt, req->in_async);
} else { } else {
copy_iov: copy_iov:
ret = io_setup_async_io(req, io_size, iovec, ret = io_setup_async_rw(req, io_size, iovec,
inline_vecs, &iter); inline_vecs, &iter);
if (ret) if (ret)
goto out_free; goto out_free;
...@@ -1908,6 +1929,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, ...@@ -1908,6 +1929,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
} }
} }
out_free: out_free:
if (!io_wq_current_is_worker())
kfree(iovec); kfree(iovec);
return ret; return ret;
} }
...@@ -2021,6 +2043,19 @@ static int io_sync_file_range(struct io_kiocb *req, ...@@ -2021,6 +2043,19 @@ static int io_sync_file_range(struct io_kiocb *req,
return 0; return 0;
} }
#if defined(CONFIG_NET)
static void io_sendrecv_async(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
struct iovec *iov = NULL;
if (req->io->rw.iov != req->io->rw.fast_iov)
iov = req->io->msg.iov;
io_wq_submit_work(workptr);
kfree(iov);
}
#endif
static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io) static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
{ {
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
...@@ -2050,7 +2085,7 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2050,7 +2085,7 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
sock = sock_from_file(req->file, &ret); sock = sock_from_file(req->file, &ret);
if (sock) { if (sock) {
struct io_async_ctx io, *copy; struct io_async_ctx io;
struct sockaddr_storage addr; struct sockaddr_storage addr;
unsigned flags; unsigned flags;
...@@ -2077,15 +2112,12 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2077,15 +2112,12 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
if (force_nonblock && ret == -EAGAIN) { if (force_nonblock && ret == -EAGAIN) {
copy = kmalloc(sizeof(*copy), GFP_KERNEL); if (req->io)
if (!copy) { return -EAGAIN;
ret = -ENOMEM; if (io_alloc_async_ctx(req))
goto out; return -ENOMEM;
} memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
memcpy(&copy->msg, &io.msg, sizeof(copy->msg)); req->work.func = io_sendrecv_async;
req->io = copy;
memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe));
req->sqe = &req->io->sqe;
return -EAGAIN; return -EAGAIN;
} }
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
...@@ -2093,7 +2125,7 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2093,7 +2125,7 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} }
out: out:
if (kmsg && kmsg->iov != kmsg->fast_iov) if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov); kfree(kmsg->iov);
io_cqring_add_event(req, ret); io_cqring_add_event(req, ret);
if (ret < 0) if (ret < 0)
...@@ -2136,7 +2168,7 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2136,7 +2168,7 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
sock = sock_from_file(req->file, &ret); sock = sock_from_file(req->file, &ret);
if (sock) { if (sock) {
struct user_msghdr __user *msg; struct user_msghdr __user *msg;
struct io_async_ctx io, *copy; struct io_async_ctx io;
struct sockaddr_storage addr; struct sockaddr_storage addr;
unsigned flags; unsigned flags;
...@@ -2165,15 +2197,12 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2165,15 +2197,12 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ret = __sys_recvmsg_sock(sock, &kmsg->msg, msg, kmsg->uaddr, flags); ret = __sys_recvmsg_sock(sock, &kmsg->msg, msg, kmsg->uaddr, flags);
if (force_nonblock && ret == -EAGAIN) { if (force_nonblock && ret == -EAGAIN) {
copy = kmalloc(sizeof(*copy), GFP_KERNEL); if (req->io)
if (!copy) { return -EAGAIN;
ret = -ENOMEM; if (io_alloc_async_ctx(req))
goto out; return -ENOMEM;
} memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
memcpy(copy, &io, sizeof(*copy)); req->work.func = io_sendrecv_async;
req->io = copy;
memcpy(&req->io->sqe, req->sqe, sizeof(*req->sqe));
req->sqe = &req->io->sqe;
return -EAGAIN; return -EAGAIN;
} }
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
...@@ -2181,7 +2210,7 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2181,7 +2210,7 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} }
out: out:
if (kmsg && kmsg->iov != kmsg->fast_iov) if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov); kfree(kmsg->iov);
io_cqring_add_event(req, ret); io_cqring_add_event(req, ret);
if (ret < 0) if (ret < 0)
...@@ -2272,15 +2301,13 @@ static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2272,15 +2301,13 @@ static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ret = __sys_connect_file(req->file, &io->connect.address, addr_len, ret = __sys_connect_file(req->file, &io->connect.address, addr_len,
file_flags); file_flags);
if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) { if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
io = kmalloc(sizeof(*io), GFP_KERNEL); if (req->io)
if (!io) { return -EAGAIN;
if (io_alloc_async_ctx(req)) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
memcpy(&io->connect, &__io.connect, sizeof(io->connect)); memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect));
req->io = io;
memcpy(&io->sqe, req->sqe, sizeof(*req->sqe));
req->sqe = &io->sqe;
return -EAGAIN; return -EAGAIN;
} }
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
...@@ -2511,7 +2538,6 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2511,7 +2538,6 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (!poll->file) if (!poll->file)
return -EBADF; return -EBADF;
req->io = NULL;
INIT_IO_WORK(&req->work, io_poll_complete_work); INIT_IO_WORK(&req->work, io_poll_complete_work);
events = READ_ONCE(sqe->poll_events); events = READ_ONCE(sqe->poll_events);
poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
...@@ -2692,7 +2718,6 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io, ...@@ -2692,7 +2718,6 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
data->mode = HRTIMER_MODE_REL; data->mode = HRTIMER_MODE_REL;
hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode); hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
req->io = io;
return 0; return 0;
} }
...@@ -2701,23 +2726,17 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -2701,23 +2726,17 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
unsigned count; unsigned count;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_timeout_data *data; struct io_timeout_data *data;
struct io_async_ctx *io;
struct list_head *entry; struct list_head *entry;
unsigned span = 0; unsigned span = 0;
io = req->io;
if (!io) {
int ret; int ret;
io = kmalloc(sizeof(*io), GFP_KERNEL); if (!req->io) {
if (!io) if (io_alloc_async_ctx(req))
return -ENOMEM; return -ENOMEM;
ret = io_timeout_prep(req, io, false); ret = io_timeout_prep(req, req->io, false);
if (ret) { if (ret)
kfree(io);
return ret; return ret;
} }
}
data = &req->io->timeout; data = &req->io->timeout;
/* /*
...@@ -2858,23 +2877,35 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2858,23 +2877,35 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0; return 0;
} }
static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io) static int io_req_defer_prep(struct io_kiocb *req)
{ {
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct io_async_ctx *io = req->io;
struct iov_iter iter; struct iov_iter iter;
ssize_t ret; ssize_t ret;
memcpy(&io->sqe, req->sqe, sizeof(io->sqe));
req->sqe = &io->sqe;
switch (io->sqe.opcode) { switch (io->sqe.opcode) {
case IORING_OP_READV: case IORING_OP_READV:
case IORING_OP_READ_FIXED: case IORING_OP_READ_FIXED:
/* ensure prep does right import */
req->io = NULL;
ret = io_read_prep(req, &iovec, &iter, true); ret = io_read_prep(req, &iovec, &iter, true);
req->io = io;
if (ret < 0)
break;
io_req_map_rw(req, ret, iovec, inline_vecs, &iter);
ret = 0;
break; break;
case IORING_OP_WRITEV: case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED: case IORING_OP_WRITE_FIXED:
/* ensure prep does right import */
req->io = NULL;
ret = io_write_prep(req, &iovec, &iter, true); ret = io_write_prep(req, &iovec, &iter, true);
req->io = io;
if (ret < 0)
break;
io_req_map_rw(req, ret, iovec, inline_vecs, &iter);
ret = 0;
break; break;
case IORING_OP_SENDMSG: case IORING_OP_SENDMSG:
ret = io_sendmsg_prep(req, io); ret = io_sendmsg_prep(req, io);
...@@ -2886,41 +2917,34 @@ static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io) ...@@ -2886,41 +2917,34 @@ static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io)
ret = io_connect_prep(req, io); ret = io_connect_prep(req, io);
break; break;
case IORING_OP_TIMEOUT: case IORING_OP_TIMEOUT:
return io_timeout_prep(req, io, false); ret = io_timeout_prep(req, io, false);
break;
case IORING_OP_LINK_TIMEOUT: case IORING_OP_LINK_TIMEOUT:
return io_timeout_prep(req, io, true); ret = io_timeout_prep(req, io, true);
break;
default: default:
req->io = io; ret = 0;
return 0; break;
} }
if (ret < 0)
return ret; return ret;
req->io = io;
io_req_map_io(req, ret, iovec, inline_vecs, &iter);
return 0;
} }
static int io_req_defer(struct io_kiocb *req) static int io_req_defer(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_async_ctx *io;
int ret; int ret;
/* Still need defer if there is pending req in defer list. */ /* Still need defer if there is pending req in defer list. */
if (!req_need_defer(req) && list_empty(&ctx->defer_list)) if (!req_need_defer(req) && list_empty(&ctx->defer_list))
return 0; return 0;
io = kmalloc(sizeof(*io), GFP_KERNEL); if (io_alloc_async_ctx(req))
if (!io)
return -EAGAIN; return -EAGAIN;
ret = io_req_defer_prep(req, io); ret = io_req_defer_prep(req);
if (ret < 0) { if (ret < 0)
kfree(io);
return ret; return ret;
}
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
if (!req_need_defer(req) && list_empty(&ctx->defer_list)) { if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
...@@ -3366,7 +3390,6 @@ static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state, ...@@ -3366,7 +3390,6 @@ static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
*/ */
if (*link) { if (*link) {
struct io_kiocb *prev = *link; struct io_kiocb *prev = *link;
struct io_async_ctx *io;
if (req->sqe->flags & IOSQE_IO_DRAIN) if (req->sqe->flags & IOSQE_IO_DRAIN)
(*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN; (*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN;
...@@ -3374,15 +3397,13 @@ static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state, ...@@ -3374,15 +3397,13 @@ static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
if (req->sqe->flags & IOSQE_IO_HARDLINK) if (req->sqe->flags & IOSQE_IO_HARDLINK)
req->flags |= REQ_F_HARDLINK; req->flags |= REQ_F_HARDLINK;
io = kmalloc(sizeof(*io), GFP_KERNEL); if (io_alloc_async_ctx(req)) {
if (!io) {
ret = -EAGAIN; ret = -EAGAIN;
goto err_req; goto err_req;
} }
ret = io_req_defer_prep(req, io); ret = io_req_defer_prep(req);
if (ret) { if (ret) {
kfree(io);
/* fail even hard links since we don't submit */ /* fail even hard links since we don't submit */
prev->flags |= REQ_F_FAIL_LINK; prev->flags |= REQ_F_FAIL_LINK;
goto err_req; goto err_req;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment