Commit fc68fcda authored by Jens Axboe's avatar Jens Axboe

io_uring/rw: add support for IORING_OP_READ_MULTISHOT

This behaves like IORING_OP_READ, except:

1) It only supports pollable files (eg pipes, sockets, etc). Note that
   for sockets, you probably want to use recv/recvmsg with multishot
   instead.

2) It supports multishot mode, meaning it will repeatedly trigger a
   read and fill a buffer when data is available. This allows similar
   use to recv/recvmsg but on non-sockets, where a single request will
   repeatedly post a CQE whenever data is read from it.

3) Because of #2, it must be used with provided buffers. This is
   uniformly true across any request type that supports multishot and
   transfers data, with the reason being that it's obviously not
   possible to pass in a single buffer for the data, as multiple reads
   may very well trigger before an application has a chance to process
   previous CQEs and the data passed from them.
Reviewed-by: default avatarGabriel Krisman Bertazi <krisman@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d2d778fb
...@@ -240,6 +240,7 @@ enum io_uring_op { ...@@ -240,6 +240,7 @@ enum io_uring_op {
IORING_OP_URING_CMD, IORING_OP_URING_CMD,
IORING_OP_SEND_ZC, IORING_OP_SEND_ZC,
IORING_OP_SENDMSG_ZC, IORING_OP_SENDMSG_ZC,
IORING_OP_READ_MULTISHOT,
/* this goes last, obviously */ /* this goes last, obviously */
IORING_OP_LAST, IORING_OP_LAST,
......
...@@ -430,9 +430,17 @@ const struct io_issue_def io_issue_defs[] = { ...@@ -430,9 +430,17 @@ const struct io_issue_def io_issue_defs[] = {
.prep = io_eopnotsupp_prep, .prep = io_eopnotsupp_prep,
#endif #endif
}, },
[IORING_OP_READ_MULTISHOT] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollin = 1,
.buffer_select = 1,
.audit_skip = 1,
.prep = io_read_mshot_prep,
.issue = io_read_mshot,
},
}; };
const struct io_cold_def io_cold_defs[] = { const struct io_cold_def io_cold_defs[] = {
[IORING_OP_NOP] = { [IORING_OP_NOP] = {
.name = "NOP", .name = "NOP",
...@@ -650,6 +658,9 @@ const struct io_cold_def io_cold_defs[] = { ...@@ -650,6 +658,9 @@ const struct io_cold_def io_cold_defs[] = {
.fail = io_sendrecv_fail, .fail = io_sendrecv_fail,
#endif #endif
}, },
[IORING_OP_READ_MULTISHOT] = {
.name = "READ_MULTISHOT",
},
}; };
const char *io_uring_get_opcode(u8 opcode) const char *io_uring_get_opcode(u8 opcode)
......
...@@ -123,6 +123,22 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -123,6 +123,22 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0; return 0;
} }
/*
* Multishot read is prepared just like a normal read/write request, only
* difference is that we set the MULTISHOT flag.
*/
int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
int ret;
ret = io_prep_rw(req, sqe);
if (unlikely(ret))
return ret;
req->flags |= REQ_F_APOLL_MULTISHOT;
return 0;
}
void io_readv_writev_cleanup(struct io_kiocb *req) void io_readv_writev_cleanup(struct io_kiocb *req)
{ {
struct io_async_rw *io = req->async_data; struct io_async_rw *io = req->async_data;
...@@ -869,6 +885,57 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags) ...@@ -869,6 +885,57 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
return ret; return ret;
} }
int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
{
unsigned int cflags = 0;
int ret;
/*
* Multishot MUST be used on a pollable file
*/
if (!file_can_poll(req->file))
return -EBADFD;
ret = __io_read(req, issue_flags);
/*
* If we get -EAGAIN, recycle our buffer and just let normal poll
* handling arm it.
*/
if (ret == -EAGAIN) {
io_kbuf_recycle(req, issue_flags);
return -EAGAIN;
}
/*
* Any successful return value will keep the multishot read armed.
*/
if (ret > 0) {
/*
* Put our buffer and post a CQE. If we fail to post a CQE, then
* jump to the termination path. This request is then done.
*/
cflags = io_put_kbuf(req, issue_flags);
if (io_fill_cqe_req_aux(req,
issue_flags & IO_URING_F_COMPLETE_DEFER,
ret, cflags | IORING_CQE_F_MORE)) {
if (issue_flags & IO_URING_F_MULTISHOT)
return IOU_ISSUE_SKIP_COMPLETE;
return -EAGAIN;
}
}
/*
* Either an error, or we've hit overflow posting the CQE. For any
* multishot request, hitting overflow will terminate it.
*/
io_req_set_res(req, ret, cflags);
if (issue_flags & IO_URING_F_MULTISHOT)
return IOU_STOP_MULTISHOT;
return IOU_OK;
}
int io_write(struct io_kiocb *req, unsigned int issue_flags) int io_write(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
......
...@@ -23,3 +23,5 @@ int io_writev_prep_async(struct io_kiocb *req); ...@@ -23,3 +23,5 @@ int io_writev_prep_async(struct io_kiocb *req);
void io_readv_writev_cleanup(struct io_kiocb *req); void io_readv_writev_cleanup(struct io_kiocb *req);
void io_rw_fail(struct io_kiocb *req); void io_rw_fail(struct io_kiocb *req);
void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts); void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts);
int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment