Commit 0a78a376 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.1/io_uring-2022-10-03' of git://git.kernel.dk/linux

Pull io_uring updates from Jens Axboe:

 - Add supported for more directly managed task_work running.

   This is beneficial for real world applications that end up issuing
   lots of system calls as part of handling work. Normal task_work will
   always execute as we transition in and out of the kernel, even for
   "unrelated" system calls. It's more efficient to defer the handling
   of io_uring's deferred work until the application wants it to be run,
   generally in batches.

   As part of ongoing work to write an io_uring network backend for
   Thrift, this has been shown to greatly improve performance. (Dylan)

 - Add IOPOLL support for passthrough (Kanchan)

 - Improvements and fixes to the send zero-copy support (Pavel)

 - Partial IO handling fixes (Pavel)

 - CQE ordering fixes around CQ ring overflow (Pavel)

 - Support sendto() for non-zc as well (Pavel)

 - Support sendmsg for zerocopy (Pavel)

 - Networking iov_iter fix (Stefan)

 - Misc fixes and cleanups (Pavel, me)

* tag 'for-6.1/io_uring-2022-10-03' of git://git.kernel.dk/linux: (56 commits)
  io_uring/net: fix notif cqe reordering
  io_uring/net: don't update msg_name if not provided
  io_uring: don't gate task_work run on TIF_NOTIFY_SIGNAL
  io_uring/rw: defer fsnotify calls to task context
  io_uring/net: fix fast_iov assignment in io_setup_async_msg()
  io_uring/net: fix non-zc send with address
  io_uring/net: don't skip notifs for failed requests
  io_uring/rw: don't lose short results on io_setup_async_rw()
  io_uring/rw: fix unexpected link breakage
  io_uring/net: fix cleanup double free free_iov init
  io_uring: fix CQE reordering
  io_uring/net: fix UAF in io_sendrecv_fail()
  selftest/net: adjust io_uring sendzc notif handling
  io_uring: ensure local task_work marks task as running
  io_uring/net: zerocopy sendmsg
  io_uring/net: combine fail handlers
  io_uring/net: rename io_sendzc()
  io_uring/net: support non-zerocopy sendto
  io_uring/net: refactor io_setup_async_addr
  io_uring/net: don't lose partial send_zc on fail
  ...
parents 188943a1 108893dd
......@@ -1233,7 +1233,7 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
complete(&wait->done);
}
static bool blk_rq_is_poll(struct request *rq)
bool blk_rq_is_poll(struct request *rq)
{
if (!rq->mq_hctx)
return false;
......@@ -1243,6 +1243,7 @@ static bool blk_rq_is_poll(struct request *rq)
return false;
return true;
}
EXPORT_SYMBOL_GPL(blk_rq_is_poll);
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{
......
......@@ -3976,6 +3976,7 @@ static const struct file_operations nvme_ns_chr_fops = {
.unlocked_ioctl = nvme_ns_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.uring_cmd = nvme_ns_chr_uring_cmd,
.uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
};
static int nvme_add_ns_cdev(struct nvme_ns *ns)
......
......@@ -391,10 +391,18 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
/* extract bio before reusing the same field for request */
struct bio *bio = pdu->bio;
void *cookie = READ_ONCE(ioucmd->cookie);
pdu->req = req;
req->bio = bio;
/* this takes care of moving rest of completion-work to task context */
/*
* For iopoll, complete it directly.
* Otherwise, move the completion to task work.
*/
if (cookie != NULL && blk_rq_is_poll(req))
nvme_uring_task_cb(ioucmd);
else
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
}
......@@ -445,7 +453,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
rq_flags = REQ_NOWAIT;
blk_flags = BLK_MQ_REQ_NOWAIT;
}
if (issue_flags & IO_URING_F_IOPOLL)
rq_flags |= REQ_POLLED;
retry:
req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
d.data_len, nvme_to_user_ptr(d.metadata),
d.metadata_len, 0, &meta, d.timeout_ms ?
......@@ -456,6 +467,17 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
req->end_io = nvme_uring_cmd_end_io;
req->end_io_data = ioucmd;
if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
if (unlikely(!req->bio)) {
/* we can't poll this, so alloc regular req instead */
blk_mq_free_request(req);
rq_flags &= ~REQ_POLLED;
goto retry;
} else {
WRITE_ONCE(ioucmd->cookie, req->bio);
req->bio->bi_opf |= REQ_POLLED;
}
}
/* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio;
pdu->meta = meta;
......@@ -559,9 +581,6 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static int nvme_uring_cmd_checks(unsigned int issue_flags)
{
/* IOPOLL not supported yet */
if (issue_flags & IO_URING_F_IOPOLL)
return -EOPNOTSUPP;
/* NVMe passthrough requires big SQE/CQE support */
if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
......@@ -604,6 +623,25 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
}
int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
struct io_comp_batch *iob,
unsigned int poll_flags)
{
struct bio *bio;
int ret = 0;
struct nvme_ns *ns;
struct request_queue *q;
rcu_read_lock();
bio = READ_ONCE(ioucmd->cookie);
ns = container_of(file_inode(ioucmd->file)->i_cdev,
struct nvme_ns, cdev);
q = ns->queue;
if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
ret = bio_poll(bio, iob, poll_flags);
rcu_read_unlock();
return ret;
}
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx)
......@@ -685,6 +723,31 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
struct io_comp_batch *iob,
unsigned int poll_flags)
{
struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
int srcu_idx = srcu_read_lock(&head->srcu);
struct nvme_ns *ns = nvme_find_path(head);
struct bio *bio;
int ret = 0;
struct request_queue *q;
if (ns) {
rcu_read_lock();
bio = READ_ONCE(ioucmd->cookie);
q = ns->queue;
if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio
&& bio->bi_bdev)
ret = bio_poll(bio, iob, poll_flags);
rcu_read_unlock();
}
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
#endif /* CONFIG_NVME_MULTIPATH */
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
......@@ -692,6 +755,10 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
struct nvme_ctrl *ctrl = ioucmd->file->private_data;
int ret;
/* IOPOLL not supported yet */
if (issue_flags & IO_URING_F_IOPOLL)
return -EOPNOTSUPP;
ret = nvme_uring_cmd_checks(issue_flags);
if (ret)
return ret;
......
......@@ -439,6 +439,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
.unlocked_ioctl = nvme_ns_head_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.uring_cmd = nvme_ns_head_chr_uring_cmd,
.uring_cmd_iopoll = nvme_ns_head_chr_uring_cmd_iopoll,
};
static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
......
......@@ -821,6 +821,10 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
struct io_comp_batch *iob, unsigned int poll_flags);
int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
struct io_comp_batch *iob, unsigned int poll_flags);
int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
......
......@@ -69,17 +69,17 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
* it returns false, the eventfd_signal() call should be deferred to a
* safe context.
*/
if (WARN_ON_ONCE(current->in_eventfd_signal))
if (WARN_ON_ONCE(current->in_eventfd))
return 0;
spin_lock_irqsave(&ctx->wqh.lock, flags);
current->in_eventfd_signal = 1;
current->in_eventfd = 1;
if (ULLONG_MAX - ctx->count < n)
n = ULLONG_MAX - ctx->count;
ctx->count += n;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
current->in_eventfd_signal = 0;
current->in_eventfd = 0;
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n;
......@@ -253,8 +253,10 @@ static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
__set_current_state(TASK_RUNNING);
}
eventfd_ctx_do_read(ctx, &ucnt);
current->in_eventfd = 1;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
current->in_eventfd = 0;
spin_unlock_irq(&ctx->wqh.lock);
if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
return -EFAULT;
......@@ -301,8 +303,10 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
}
if (likely(res > 0)) {
ctx->count += ucnt;
current->in_eventfd = 1;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
current->in_eventfd = 0;
}
spin_unlock_irq(&ctx->wqh.lock);
......
......@@ -980,6 +980,7 @@ int blk_rq_map_kern(struct request_queue *, struct request *, void *,
int blk_rq_append_bio(struct request *rq, struct bio *bio);
void blk_execute_rq_nowait(struct request *rq, bool at_head);
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
bool blk_rq_is_poll(struct request *rq);
struct req_iterator {
struct bvec_iter iter;
......
......@@ -46,7 +46,7 @@ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
static inline bool eventfd_signal_allowed(void)
{
return !current->in_eventfd_signal;
return !current->in_eventfd;
}
#else /* CONFIG_EVENTFD */
......
......@@ -2133,6 +2133,8 @@ struct file_operations {
loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int);
int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *,
unsigned int poll_flags);
} __randomize_layout;
struct inode_operations {
......
......@@ -20,8 +20,12 @@ enum io_uring_cmd_flags {
struct io_uring_cmd {
struct file *file;
const void *cmd;
union {
/* callback to defer completions to task context */
void (*task_work_cb)(struct io_uring_cmd *cmd);
/* used for polled completion */
void *cookie;
};
u32 cmd_op;
u32 pad;
u8 pdu[32]; /* available inline for free use */
......
......@@ -184,6 +184,8 @@ struct io_ev_fd {
struct eventfd_ctx *cq_ev_fd;
unsigned int eventfd_async: 1;
struct rcu_head rcu;
atomic_t refs;
atomic_t ops;
};
struct io_alloc_cache {
......@@ -301,6 +303,8 @@ struct io_ring_ctx {
struct io_hash_table cancel_table;
bool poll_multi_queue;
struct llist_head work_llist;
struct list_head io_buffers_comp;
} ____cacheline_aligned_in_smp;
......
......@@ -936,7 +936,7 @@ struct task_struct {
#endif
#ifdef CONFIG_EVENTFD
/* Recursion prevention for eventfd_signal() */
unsigned in_eventfd_signal:1;
unsigned in_eventfd:1;
#endif
#ifdef CONFIG_IOMMU_SVA
unsigned pasid_activated:1;
......
......@@ -655,6 +655,35 @@ TRACE_EVENT(io_uring_short_write,
__entry->wanted, __entry->got)
);
/*
* io_uring_local_work_run - ran ring local task work
*
* @tctx: pointer to a io_uring_ctx
* @count: how many functions it ran
* @loops: how many loops it ran
*
*/
TRACE_EVENT(io_uring_local_work_run,
TP_PROTO(void *ctx, int count, unsigned int loops),
TP_ARGS(ctx, count, loops),
TP_STRUCT__entry (
__field(void *, ctx )
__field(int, count )
__field(unsigned int, loops )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->count = count;
__entry->loops = loops;
),
TP_printk("ring %p, count %d, loops %u", __entry->ctx, __entry->count, __entry->loops)
);
#endif /* _TRACE_IO_URING_H */
/* This part must be outside protection */
......
......@@ -157,6 +157,13 @@ enum {
*/
#define IORING_SETUP_SINGLE_ISSUER (1U << 12)
/*
* Defer running task work to get events.
* Rather than running bits of task work whenever the task transitions
* try to do it just before it is needed.
*/
#define IORING_SETUP_DEFER_TASKRUN (1U << 13)
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
......@@ -206,6 +213,7 @@ enum io_uring_op {
IORING_OP_SOCKET,
IORING_OP_URING_CMD,
IORING_OP_SEND_ZC,
IORING_OP_SENDMSG_ZC,
/* this goes last, obviously */
IORING_OP_LAST,
......
......@@ -292,7 +292,7 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
break;
mutex_unlock(&ctx->uring_lock);
ret = io_run_task_work_sig();
ret = io_run_task_work_sig(ctx);
if (ret < 0) {
mutex_lock(&ctx->uring_lock);
break;
......
......@@ -60,13 +60,15 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
unsigned int cq_head = READ_ONCE(r->cq.head);
unsigned int cq_tail = READ_ONCE(r->cq.tail);
unsigned int cq_shift = 0;
unsigned int sq_shift = 0;
unsigned int sq_entries, cq_entries;
bool has_lock;
bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
unsigned int i;
if (is_cqe32)
if (ctx->flags & IORING_SETUP_CQE32)
cq_shift = 1;
if (ctx->flags & IORING_SETUP_SQE128)
sq_shift = 1;
/*
* we may get imprecise sqe and cqe info if uring is actively running
......@@ -82,19 +84,36 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
seq_printf(m, "CqHead:\t%u\n", cq_head);
seq_printf(m, "CqTail:\t%u\n", cq_tail);
seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head);
seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head);
sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
for (i = 0; i < sq_entries; i++) {
unsigned int entry = i + sq_head;
unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
struct io_uring_sqe *sqe;
unsigned int sq_idx;
sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
if (sq_idx > sq_mask)
continue;
sqe = &ctx->sq_sqes[sq_idx];
seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n",
sq_idx, sqe->opcode, sqe->fd, sqe->flags,
sqe->user_data);
sqe = &ctx->sq_sqes[sq_idx << 1];
seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, "
"addr:0x%llx, rw_flags:0x%x, buf_index:%d "
"user_data:%llu",
sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd,
sqe->flags, (unsigned long long) sqe->off,
(unsigned long long) sqe->addr, sqe->rw_flags,
sqe->buf_index, sqe->user_data);
if (sq_shift) {
u64 *sqeb = (void *) (sqe + 1);
int size = sizeof(struct io_uring_sqe) / sizeof(u64);
int j;
for (j = 0; j < size; j++) {
seq_printf(m, ", e%d:0x%llx", j,
(unsigned long long) *sqeb);
sqeb++;
}
}
seq_printf(m, "\n");
}
seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
......@@ -102,16 +121,13 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
unsigned int entry = i + cq_head;
struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift];
if (!is_cqe32) {
seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n",
seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x",
entry & cq_mask, cqe->user_data, cqe->res,
cqe->flags);
} else {
seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x, "
"extra1:%llu, extra2:%llu\n",
entry & cq_mask, cqe->user_data, cqe->res,
cqe->flags, cqe->big_cqe[0], cqe->big_cqe[1]);
}
if (cq_shift)
seq_printf(m, ", extra1:%llu, extra2:%llu\n",
cqe->big_cqe[0], cqe->big_cqe[1]);
seq_printf(m, "\n");
}
/*
......
This diff is collapsed.
......@@ -24,9 +24,11 @@ enum {
IOU_STOP_MULTISHOT = -ECANCELED,
};
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
bool io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(void);
int io_run_task_work_sig(struct io_ring_ctx *ctx);
int __io_run_local_work(struct io_ring_ctx *ctx, bool locked);
int io_run_local_work(struct io_ring_ctx *ctx);
void io_req_complete_failed(struct io_kiocb *req, s32 res);
void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
void io_req_complete_post(struct io_kiocb *req);
......@@ -91,7 +93,8 @@ static inline void io_cq_lock(struct io_ring_ctx *ctx)
void io_cq_unlock_post(struct io_ring_ctx *ctx);
static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
bool overflow)
{
if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
struct io_uring_cqe *cqe = ctx->cqe_cached;
......@@ -103,7 +106,12 @@ static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
return cqe;
}
return __io_get_cqe(ctx);
return __io_get_cqe(ctx, overflow);
}
static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
{
return io_get_cqe_overflow(ctx, false);
}
static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
......@@ -221,17 +229,43 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
}
static inline bool io_run_task_work(void)
static inline int io_run_task_work(void)
{
if (test_thread_flag(TIF_NOTIFY_SIGNAL)) {
__set_current_state(TASK_RUNNING);
if (task_work_pending(current)) {
if (test_thread_flag(TIF_NOTIFY_SIGNAL))
clear_notify_signal();
if (task_work_pending(current))
__set_current_state(TASK_RUNNING);
task_work_run();
return true;
return 1;
}
return false;
return 0;
}
static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
{
return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
!wq_list_empty(&ctx->work_llist);
}
static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
{
int ret = 0;
int ret2;
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
ret = io_run_local_work(ctx);
/* want to run this after in case more is added */
ret2 = io_run_task_work();
/* Try propagate error in favour of if tasks were run,
* but still make sure to run them if requested
*/
if (ret >= 0)
ret += ret2;
return ret;
}
static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
......@@ -301,4 +335,10 @@ static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
return container_of(node, struct io_kiocb, comp_list);
}
static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
{
return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
ctx->submitter_task == current);
}
#endif
......@@ -86,18 +86,6 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
{
/*
* READV uses fields in `struct io_rw` (len/addr) to stash the selected
* buffer data. However if that buffer is recycled the original request
* data stored in addr is lost. Therefore forbid recycling for now.
*/
if (req->opcode == IORING_OP_READV) {
if ((req->flags & REQ_F_BUFFER_RING) && req->buf_list) {
req->buf_list->head++;
req->buf_list = NULL;
}
return;
}
if (req->flags & REQ_F_BUFFER_SELECTED)
io_kbuf_recycle_legacy(req, issue_flags);
if (req->flags & REQ_F_BUFFER_RING)
......
This diff is collapsed.
......@@ -31,18 +31,21 @@ struct io_async_connect {
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
int io_sendzc_prep_async(struct io_kiocb *req);
int io_sendmsg_prep_async(struct io_kiocb *req);
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
int io_send(struct io_kiocb *req, unsigned int issue_flags);
int io_send_prep_async(struct io_kiocb *req);
int io_recvmsg_prep_async(struct io_kiocb *req);
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
int io_recv(struct io_kiocb *req, unsigned int issue_flags);
void io_sendrecv_fail(struct io_kiocb *req);
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_accept(struct io_kiocb *req, unsigned int issue_flags);
......@@ -53,9 +56,10 @@ int io_connect_prep_async(struct io_kiocb *req);
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_connect(struct io_kiocb *req, unsigned int issue_flags);
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags);
int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
void io_sendzc_cleanup(struct io_kiocb *req);
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags);
int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
void io_send_zc_cleanup(struct io_kiocb *req);
void io_netmsg_cache_free(struct io_cache_entry *entry);
#else
......
......@@ -69,6 +69,7 @@ const struct io_op_def io_op_defs[] = {
.issue = io_read,
.prep_async = io_readv_prep_async,
.cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_WRITEV] = {
.needs_file = 1,
......@@ -85,6 +86,7 @@ const struct io_op_def io_op_defs[] = {
.issue = io_write,
.prep_async = io_writev_prep_async,
.cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_FSYNC] = {
.needs_file = 1,
......@@ -105,6 +107,7 @@ const struct io_op_def io_op_defs[] = {
.name = "READ_FIXED",
.prep = io_prep_rw,
.issue = io_read,
.fail = io_rw_fail,
},
[IORING_OP_WRITE_FIXED] = {
.needs_file = 1,
......@@ -119,6 +122,7 @@ const struct io_op_def io_op_defs[] = {
.name = "WRITE_FIXED",
.prep = io_prep_rw,
.issue = io_write,
.fail = io_rw_fail,
},
[IORING_OP_POLL_ADD] = {
.needs_file = 1,
......@@ -146,6 +150,7 @@ const struct io_op_def io_op_defs[] = {
.unbound_nonreg_file = 1,
.pollout = 1,
.ioprio = 1,
.manual_alloc = 1,
.name = "SENDMSG",
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
......@@ -153,6 +158,7 @@ const struct io_op_def io_op_defs[] = {
.issue = io_sendmsg,
.prep_async = io_sendmsg_prep_async,
.cleanup = io_sendmsg_recvmsg_cleanup,
.fail = io_sendrecv_fail,
#else
.prep = io_eopnotsupp_prep,
#endif
......@@ -163,6 +169,7 @@ const struct io_op_def io_op_defs[] = {
.pollin = 1,
.buffer_select = 1,
.ioprio = 1,
.manual_alloc = 1,
.name = "RECVMSG",
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
......@@ -170,6 +177,7 @@ const struct io_op_def io_op_defs[] = {
.issue = io_recvmsg,
.prep_async = io_recvmsg_prep_async,
.cleanup = io_sendmsg_recvmsg_cleanup,
.fail = io_sendrecv_fail,
#else
.prep = io_eopnotsupp_prep,
#endif
......@@ -273,6 +281,7 @@ const struct io_op_def io_op_defs[] = {
.name = "READ",
.prep = io_prep_rw,
.issue = io_read,
.fail = io_rw_fail,
},
[IORING_OP_WRITE] = {
.needs_file = 1,
......@@ -287,6 +296,7 @@ const struct io_op_def io_op_defs[] = {
.name = "WRITE",
.prep = io_prep_rw,
.issue = io_write,
.fail = io_rw_fail,
},
[IORING_OP_FADVISE] = {
.needs_file = 1,
......@@ -306,10 +316,14 @@ const struct io_op_def io_op_defs[] = {
.pollout = 1,
.audit_skip = 1,
.ioprio = 1,
.manual_alloc = 1,
.name = "SEND",
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
.prep = io_sendmsg_prep,
.issue = io_send,
.fail = io_sendrecv_fail,
.prep_async = io_send_prep_async,
#else
.prep = io_eopnotsupp_prep,
#endif
......@@ -325,6 +339,7 @@ const struct io_op_def io_op_defs[] = {
#if defined(CONFIG_NET)
.prep = io_recvmsg_prep,
.issue = io_recv,
.fail = io_sendrecv_fail,
#else
.prep = io_eopnotsupp_prep,
#endif
......@@ -465,6 +480,7 @@ const struct io_op_def io_op_defs[] = {
.needs_file = 1,
.plug = 1,
.name = "URING_CMD",
.iopoll = 1,
.async_size = uring_cmd_pdu_size(1),
.prep = io_uring_cmd_prep,
.issue = io_uring_cmd,
......@@ -480,10 +496,30 @@ const struct io_op_def io_op_defs[] = {
.manual_alloc = 1,
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
.prep = io_sendzc_prep,
.issue = io_sendzc,
.prep_async = io_sendzc_prep_async,
.cleanup = io_sendzc_cleanup,
.prep = io_send_zc_prep,
.issue = io_send_zc,
.prep_async = io_send_prep_async,
.cleanup = io_send_zc_cleanup,
.fail = io_sendrecv_fail,
#else
.prep = io_eopnotsupp_prep,
#endif
},
[IORING_OP_SENDMSG_ZC] = {
.name = "SENDMSG_ZC",
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
.audit_skip = 1,
.ioprio = 1,
.manual_alloc = 1,
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
.prep = io_send_zc_prep,
.issue = io_sendmsg_zc,
.prep_async = io_sendmsg_prep_async,
.cleanup = io_send_zc_cleanup,
.fail = io_sendrecv_fail,
#else
.prep = io_eopnotsupp_prep,
#endif
......
......@@ -36,6 +36,7 @@ struct io_op_def {
int (*issue)(struct io_kiocb *, unsigned int);
int (*prep_async)(struct io_kiocb *);
void (*cleanup)(struct io_kiocb *);
void (*fail)(struct io_kiocb *);
};
extern const struct io_op_def io_op_defs[];
......
......@@ -341,7 +341,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
flush_delayed_work(&ctx->rsrc_put_work);
reinit_completion(&data->done);
ret = io_run_task_work_sig();
ret = io_run_task_work_sig(ctx);
mutex_lock(&ctx->uring_lock);
} while (ret >= 0);
data->quiesce = false;
......
......@@ -33,6 +33,46 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req)
return req->flags & REQ_F_SUPPORT_NOWAIT;
}
#ifdef CONFIG_COMPAT
static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
{
struct compat_iovec __user *uiov;
compat_ssize_t clen;
uiov = u64_to_user_ptr(rw->addr);
if (!access_ok(uiov, sizeof(*uiov)))
return -EFAULT;
if (__get_user(clen, &uiov->iov_len))
return -EFAULT;
if (clen < 0)
return -EINVAL;
rw->len = clen;
return 0;
}
#endif
static int io_iov_buffer_select_prep(struct io_kiocb *req)
{
struct iovec __user *uiov;
struct iovec iov;
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
if (rw->len != 1)
return -EINVAL;
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
return io_iov_compat_buffer_select_prep(rw);
#endif
uiov = u64_to_user_ptr(rw->addr);
if (copy_from_user(&iov, uiov, sizeof(*uiov)))
return -EFAULT;
rw->len = iov.iov_len;
return 0;
}
int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
......@@ -69,6 +109,16 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
rw->addr = READ_ONCE(sqe->addr);
rw->len = READ_ONCE(sqe->len);
rw->flags = READ_ONCE(sqe->rw_flags);
/* Have to do this validation here, as this is in io_read() rw->len might
* have chanaged due to buffer selection
*/
if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
ret = io_iov_buffer_select_prep(req);
if (ret)
return ret;
}
return 0;
}
......@@ -186,14 +236,6 @@ static void kiocb_end_write(struct io_kiocb *req)
static bool __io_complete_rw_common(struct io_kiocb *req, long res)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
if (rw->kiocb.ki_flags & IOCB_WRITE) {
kiocb_end_write(req);
fsnotify_modify(req->file);
} else {
fsnotify_access(req->file);
}
if (unlikely(res != req->cqe.res)) {
if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
io_rw_should_reissue(req)) {
......@@ -220,6 +262,20 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
return res;
}
static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
if (rw->kiocb.ki_flags & IOCB_WRITE) {
kiocb_end_write(req);
fsnotify_modify(req->file);
} else {
fsnotify_access(req->file);
}
io_req_task_complete(req, locked);
}
static void io_complete_rw(struct kiocb *kiocb, long res)
{
struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
......@@ -228,7 +284,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
if (__io_complete_rw_common(req, res))
return;
io_req_set_res(req, io_fixup_rw_res(req, res), 0);
req->io_task_work.func = io_req_task_complete;
req->io_task_work.func = io_req_rw_complete;
io_req_task_work_add(req);
}
......@@ -279,79 +335,6 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
return IOU_ISSUE_SKIP_COMPLETE;
}
#ifdef CONFIG_COMPAT
static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
unsigned int issue_flags)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct compat_iovec __user *uiov;
compat_ssize_t clen;
void __user *buf;
size_t len;
uiov = u64_to_user_ptr(rw->addr);
if (!access_ok(uiov, sizeof(*uiov)))
return -EFAULT;
if (__get_user(clen, &uiov->iov_len))
return -EFAULT;
if (clen < 0)
return -EINVAL;
len = clen;
buf = io_buffer_select(req, &len, issue_flags);
if (!buf)
return -ENOBUFS;
rw->addr = (unsigned long) buf;
iov[0].iov_base = buf;
rw->len = iov[0].iov_len = (compat_size_t) len;
return 0;
}
#endif
static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
unsigned int issue_flags)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct iovec __user *uiov = u64_to_user_ptr(rw->addr);
void __user *buf;
ssize_t len;
if (copy_from_user(iov, uiov, sizeof(*uiov)))
return -EFAULT;
len = iov[0].iov_len;
if (len < 0)
return -EINVAL;
buf = io_buffer_select(req, &len, issue_flags);
if (!buf)
return -ENOBUFS;
rw->addr = (unsigned long) buf;
iov[0].iov_base = buf;
rw->len = iov[0].iov_len = len;
return 0;
}
static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
unsigned int issue_flags)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
iov[0].iov_base = u64_to_user_ptr(rw->addr);
iov[0].iov_len = rw->len;
return 0;
}
if (rw->len != 1)
return -EINVAL;
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
return io_compat_import(req, iov, issue_flags);
#endif
return __io_iov_buffer_select(req, iov, issue_flags);
}
static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
struct io_rw_state *s,
unsigned int issue_flags)
......@@ -374,7 +357,8 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
buf = u64_to_user_ptr(rw->addr);
sqe_len = rw->len;
if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
(req->flags & REQ_F_BUFFER_SELECT)) {
if (io_do_buffer_select(req)) {
buf = io_buffer_select(req, &sqe_len, issue_flags);
if (!buf)
......@@ -390,14 +374,6 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
}
iovec = s->fast_iov;
if (req->flags & REQ_F_BUFFER_SELECT) {
ret = io_iov_buffer_select(req, iovec, issue_flags);
if (ret)
return ERR_PTR(ret);
iov_iter_init(iter, ddir, iovec, 1, iovec->iov_len);
return NULL;
}
ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
req->ctx->compat);
if (unlikely(ret < 0))
......@@ -794,10 +770,12 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
iov_iter_restore(&s->iter, &s->iter_state);
ret2 = io_setup_async_rw(req, iovec, s, true);
if (ret2)
return ret2;
iovec = NULL;
if (ret2) {
ret = ret > 0 ? ret : ret2;
goto done;
}
io = req->async_data;
s = &io->s;
/*
......@@ -823,6 +801,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
return -EAGAIN;
}
req->cqe.res = iov_iter_count(&s->iter);
/*
* Now retry read with the IOCB_WAITQ parts set in the iocb. If
* we get -EIOCBQUEUED, then we'll get a notification when the
......@@ -984,6 +963,14 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
io_cqring_wake(ctx);
}
void io_rw_fail(struct io_kiocb *req)
{
int res;
res = io_fixup_rw_res(req, req->cqe.res);
io_req_set_res(req, res, req->cqe.flags);
}
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
{
struct io_wq_work_node *pos, *start, *prev;
......@@ -1000,7 +987,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
wq_list_for_each(pos, start, &ctx->iopoll_list) {
struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct file *file = req->file;
int ret;
/*
......@@ -1011,7 +998,17 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (READ_ONCE(req->iopoll_completed))
break;
ret = rw->kiocb.ki_filp->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
if (req->opcode == IORING_OP_URING_CMD) {
struct io_uring_cmd *ioucmd;
ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
poll_flags);
} else {
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
}
if (unlikely(ret < 0))
return ret;
else if (ret)
......
......@@ -21,3 +21,4 @@ int io_readv_prep_async(struct io_kiocb *req);
int io_write(struct io_kiocb *req, unsigned int issue_flags);
int io_writev_prep_async(struct io_kiocb *req);
void io_readv_writev_cleanup(struct io_kiocb *req);
void io_rw_fail(struct io_kiocb *req);
......@@ -149,11 +149,10 @@ static inline void io_remove_next_linked(struct io_kiocb *req)
nxt->link = NULL;
}
bool io_disarm_next(struct io_kiocb *req)
void io_disarm_next(struct io_kiocb *req)
__must_hold(&req->ctx->completion_lock)
{
struct io_kiocb *link = NULL;
bool posted = false;
if (req->flags & REQ_F_ARM_LTIMEOUT) {
link = req->link;
......@@ -161,7 +160,6 @@ bool io_disarm_next(struct io_kiocb *req)
if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
io_remove_next_linked(req);
io_req_tw_post_queue(link, -ECANCELED, 0);
posted = true;
}
} else if (req->flags & REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx;
......@@ -169,17 +167,12 @@ bool io_disarm_next(struct io_kiocb *req)
spin_lock_irq(&ctx->timeout_lock);
link = io_disarm_linked_timeout(req);
spin_unlock_irq(&ctx->timeout_lock);
if (link) {
posted = true;
if (link)
io_req_tw_post_queue(link, -ECANCELED, 0);
}
}
if (unlikely((req->flags & REQ_F_FAIL) &&
!(req->flags & REQ_F_HARDLINK))) {
posted |= (req->link != NULL);
!(req->flags & REQ_F_HARDLINK)))
io_fail_links(req);
}
return posted;
}
struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
......
......@@ -27,7 +27,7 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd);
__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
bool cancel_all);
void io_queue_linked_timeout(struct io_kiocb *req);
bool io_disarm_next(struct io_kiocb *req);
void io_disarm_next(struct io_kiocb *req);
int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
......
......@@ -50,6 +50,10 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
io_req_set_res(req, ret, 0);
if (req->ctx->flags & IORING_SETUP_CQE32)
io_req_set_cqe32_extra(req, res2, 0);
if (req->ctx->flags & IORING_SETUP_IOPOLL)
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
smp_store_release(&req->iopoll_completed, 1);
else
__io_req_complete(req, 0);
}
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
......@@ -97,8 +101,11 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
issue_flags |= IO_URING_F_SQE128;
if (ctx->flags & IORING_SETUP_CQE32)
issue_flags |= IO_URING_F_CQE32;
if (ctx->flags & IORING_SETUP_IOPOLL)
if (ctx->flags & IORING_SETUP_IOPOLL) {
issue_flags |= IO_URING_F_IOPOLL;
req->iopoll_completed = 0;
WRITE_ONCE(ioucmd->cookie, NULL);
}
if (req_has_async_data(req))
ioucmd->cmd = req->async_data;
......
......@@ -400,7 +400,6 @@ static void do_tx(int domain, int type, int protocol)
cfg_payload_len, msg_flags);
sqe->user_data = NONZC_TAG;
} else {
compl_cqes++;
io_uring_prep_sendzc(sqe, fd, payload,
cfg_payload_len,
msg_flags, zc_flags);
......@@ -430,18 +429,23 @@ static void do_tx(int domain, int type, int protocol)
if (cqe->flags & IORING_CQE_F_NOTIF) {
if (cqe->flags & IORING_CQE_F_MORE)
error(1, -EINVAL, "invalid notif flags");
if (compl_cqes <= 0)
error(1, -EINVAL, "notification mismatch");
compl_cqes--;
i--;
} else if (cqe->res <= 0) {
if (cqe->flags & IORING_CQE_F_MORE)
error(1, cqe->res, "more with a failed send");
error(1, cqe->res, "send failed");
} else {
if (cqe->user_data == ZC_TAG &&
!(cqe->flags & IORING_CQE_F_MORE))
error(1, cqe->res, "missing more flag");
io_uring_cqe_seen(&ring);
continue;
}
if (cqe->flags & IORING_CQE_F_MORE) {
if (cqe->user_data != ZC_TAG)
error(1, cqe->res, "unexpected F_MORE");
compl_cqes++;
}
if (cqe->res >= 0) {
packets++;
bytes += cqe->res;
} else if (cqe->res != -EAGAIN) {
error(1, cqe->res, "send failed");
}
io_uring_cqe_seen(&ring);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment