Commit 257e84a5 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: refactor sendmsg/recvmsg iov managing

Current iov handling with recvmsg/sendmsg may be confusing. First make a
rule for msg->iov: either it points to an allocated iov that have to be
kfree()'d later, or it's NULL and we use fast_iov. That's much better
than current 3-state (also can point to fast_iov). And rename it into
free_iov for uniformity with read/write.

Also, instead of after struct io_async_msghdr copy fixing up of
msg.msg_iter.iov has been happening in io_recvmsg()/io_sendmsg(). Move
it into io_setup_async_msg(), that's the right place.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
[axboe: add comment on NULL check before kfree()]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5476dfed
...@@ -594,7 +594,8 @@ struct io_async_connect { ...@@ -594,7 +594,8 @@ struct io_async_connect {
struct io_async_msghdr { struct io_async_msghdr {
struct iovec fast_iov[UIO_FASTIOV]; struct iovec fast_iov[UIO_FASTIOV];
struct iovec *iov; /* points to an allocated iov, if NULL we use fast_iov instead */
struct iovec *free_iov;
struct sockaddr __user *uaddr; struct sockaddr __user *uaddr;
struct msghdr msg; struct msghdr msg;
struct sockaddr_storage addr; struct sockaddr_storage addr;
...@@ -4551,24 +4552,27 @@ static int io_setup_async_msg(struct io_kiocb *req, ...@@ -4551,24 +4552,27 @@ static int io_setup_async_msg(struct io_kiocb *req,
if (async_msg) if (async_msg)
return -EAGAIN; return -EAGAIN;
if (io_alloc_async_data(req)) { if (io_alloc_async_data(req)) {
if (kmsg->iov != kmsg->fast_iov) kfree(kmsg->free_iov);
kfree(kmsg->iov);
return -ENOMEM; return -ENOMEM;
} }
async_msg = req->async_data; async_msg = req->async_data;
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
memcpy(async_msg, kmsg, sizeof(*kmsg)); memcpy(async_msg, kmsg, sizeof(*kmsg));
async_msg->msg.msg_name = &async_msg->addr; async_msg->msg.msg_name = &async_msg->addr;
/* if were using fast_iov, set it to the new one */
if (!async_msg->free_iov)
async_msg->msg.msg_iter.iov = async_msg->fast_iov;
return -EAGAIN; return -EAGAIN;
} }
static int io_sendmsg_copy_hdr(struct io_kiocb *req, static int io_sendmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg) struct io_async_msghdr *iomsg)
{ {
iomsg->iov = iomsg->fast_iov;
iomsg->msg.msg_name = &iomsg->addr; iomsg->msg.msg_name = &iomsg->addr;
iomsg->free_iov = iomsg->fast_iov;
return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg, return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
req->sr_msg.msg_flags, &iomsg->iov); req->sr_msg.msg_flags, &iomsg->free_iov);
} }
static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
...@@ -4609,13 +4613,8 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, ...@@ -4609,13 +4613,8 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
if (unlikely(!sock)) if (unlikely(!sock))
return -ENOTSOCK; return -ENOTSOCK;
if (req->async_data) { kmsg = req->async_data;
kmsg = req->async_data; if (!kmsg) {
/* if iov is set, it's allocated already */
if (!kmsg->iov)
kmsg->iov = kmsg->fast_iov;
kmsg->msg.msg_iter.iov = kmsg->iov;
} else {
ret = io_sendmsg_copy_hdr(req, &iomsg); ret = io_sendmsg_copy_hdr(req, &iomsg);
if (ret) if (ret)
return ret; return ret;
...@@ -4634,8 +4633,9 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, ...@@ -4634,8 +4633,9 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
ret = -EINTR; ret = -EINTR;
if (kmsg->iov != kmsg->fast_iov) /* fast path, check for non-NULL to avoid function call */
kfree(kmsg->iov); if (kmsg->free_iov)
kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
...@@ -4704,10 +4704,11 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req, ...@@ -4704,10 +4704,11 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov))) if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
return -EFAULT; return -EFAULT;
sr->len = iomsg->fast_iov[0].iov_len; sr->len = iomsg->fast_iov[0].iov_len;
iomsg->iov = NULL; iomsg->free_iov = NULL;
} else { } else {
iomsg->free_iov = iomsg->fast_iov;
ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV, ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
&iomsg->iov, &iomsg->msg.msg_iter, &iomsg->free_iov, &iomsg->msg.msg_iter,
false); false);
if (ret > 0) if (ret > 0)
ret = 0; ret = 0;
...@@ -4746,10 +4747,11 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, ...@@ -4746,10 +4747,11 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
if (clen < 0) if (clen < 0)
return -EINVAL; return -EINVAL;
sr->len = clen; sr->len = clen;
iomsg->iov = NULL; iomsg->free_iov = NULL;
} else { } else {
iomsg->free_iov = iomsg->fast_iov;
ret = __import_iovec(READ, (struct iovec __user *)uiov, len, ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
UIO_FASTIOV, &iomsg->iov, UIO_FASTIOV, &iomsg->free_iov,
&iomsg->msg.msg_iter, true); &iomsg->msg.msg_iter, true);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -4763,7 +4765,6 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req, ...@@ -4763,7 +4765,6 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg) struct io_async_msghdr *iomsg)
{ {
iomsg->msg.msg_name = &iomsg->addr; iomsg->msg.msg_name = &iomsg->addr;
iomsg->iov = iomsg->fast_iov;
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
if (req->ctx->compat) if (req->ctx->compat)
...@@ -4834,13 +4835,8 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, ...@@ -4834,13 +4835,8 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
if (unlikely(!sock)) if (unlikely(!sock))
return -ENOTSOCK; return -ENOTSOCK;
if (req->async_data) { kmsg = req->async_data;
kmsg = req->async_data; if (!kmsg) {
/* if iov is set, it's allocated already */
if (!kmsg->iov)
kmsg->iov = kmsg->fast_iov;
kmsg->msg.msg_iter.iov = kmsg->iov;
} else {
ret = io_recvmsg_copy_hdr(req, &iomsg); ret = io_recvmsg_copy_hdr(req, &iomsg);
if (ret) if (ret)
return ret; return ret;
...@@ -4872,8 +4868,9 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, ...@@ -4872,8 +4868,9 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
if (req->flags & REQ_F_BUFFER_SELECTED) if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_recv_kbuf(req); cflags = io_put_recv_kbuf(req);
if (kmsg->iov != kmsg->fast_iov) /* fast path, check for non-NULL to avoid function call */
kfree(kmsg->iov); if (kmsg->free_iov)
kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
...@@ -6166,8 +6163,8 @@ static void __io_clean_op(struct io_kiocb *req) ...@@ -6166,8 +6163,8 @@ static void __io_clean_op(struct io_kiocb *req)
case IORING_OP_RECVMSG: case IORING_OP_RECVMSG:
case IORING_OP_SENDMSG: { case IORING_OP_SENDMSG: {
struct io_async_msghdr *io = req->async_data; struct io_async_msghdr *io = req->async_data;
if (io->iov != io->fast_iov)
kfree(io->iov); kfree(io->free_iov);
break; break;
} }
case IORING_OP_SPLICE: case IORING_OP_SPLICE:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment