Commit 26f0505a authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: rethink def->needs_async_data

needs_async_data controls allocation of async_data, and used in two
cases. 1) when async setup requires it (by io_req_prep_async() or
handler themselves), and 2) when op always needs additional space to
operate, like timeouts do.

Opcode preps already don't bother about the second case and do
allocation unconditionally, restrict needs_async_data to the first case
only and rename it into needs_async_setup.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
[axboe: update for IOPOLL fix]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6cb78689
......@@ -849,8 +849,8 @@ struct io_op_def {
unsigned pollout : 1;
/* op supports buffer selection */
unsigned buffer_select : 1;
/* must always have async data allocated */
unsigned needs_async_data : 1;
/* do prep async if is going to be punted */
unsigned needs_async_setup : 1;
/* should block plug */
unsigned plug : 1;
/* size of async data needed, if any */
......@@ -864,7 +864,7 @@ static const struct io_op_def io_op_defs[] = {
.unbound_nonreg_file = 1,
.pollin = 1,
.buffer_select = 1,
.needs_async_data = 1,
.needs_async_setup = 1,
.plug = 1,
.async_size = sizeof(struct io_async_rw),
},
......@@ -873,7 +873,7 @@ static const struct io_op_def io_op_defs[] = {
.hash_reg_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
.needs_async_data = 1,
.needs_async_setup = 1,
.plug = 1,
.async_size = sizeof(struct io_async_rw),
},
......@@ -907,7 +907,7 @@ static const struct io_op_def io_op_defs[] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
.needs_async_data = 1,
.needs_async_setup = 1,
.async_size = sizeof(struct io_async_msghdr),
},
[IORING_OP_RECVMSG] = {
......@@ -915,11 +915,10 @@ static const struct io_op_def io_op_defs[] = {
.unbound_nonreg_file = 1,
.pollin = 1,
.buffer_select = 1,
.needs_async_data = 1,
.needs_async_setup = 1,
.async_size = sizeof(struct io_async_msghdr),
},
[IORING_OP_TIMEOUT] = {
.needs_async_data = 1,
.async_size = sizeof(struct io_timeout_data),
},
[IORING_OP_TIMEOUT_REMOVE] = {
......@@ -932,14 +931,13 @@ static const struct io_op_def io_op_defs[] = {
},
[IORING_OP_ASYNC_CANCEL] = {},
[IORING_OP_LINK_TIMEOUT] = {
.needs_async_data = 1,
.async_size = sizeof(struct io_timeout_data),
},
[IORING_OP_CONNECT] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
.needs_async_data = 1,
.needs_async_setup = 1,
.async_size = sizeof(struct io_async_connect),
},
[IORING_OP_FALLOCATE] = {
......@@ -3139,7 +3137,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
const struct iovec *fast_iov,
struct iov_iter *iter, bool force)
{
if (!force && !io_op_defs[req->opcode].needs_async_data)
if (!force && !io_op_defs[req->opcode].needs_async_setup)
return 0;
if (!req->async_data) {
if (io_alloc_async_data(req)) {
......@@ -5872,12 +5870,8 @@ static int io_req_prep_async(struct io_kiocb *req)
{
switch (req->opcode) {
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
case IORING_OP_READ:
return io_rw_prep_async(req, READ);
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
case IORING_OP_WRITE:
return io_rw_prep_async(req, WRITE);
case IORING_OP_SENDMSG:
return io_sendmsg_prep_async(req);
......@@ -5891,11 +5885,10 @@ static int io_req_prep_async(struct io_kiocb *req)
static int io_req_defer_prep(struct io_kiocb *req)
{
if (!io_op_defs[req->opcode].needs_async_data)
return 0;
/* some opcodes init it during the inital prep */
if (req->async_data)
if (!io_op_defs[req->opcode].needs_async_setup)
return 0;
if (WARN_ON_ONCE(req->async_data))
return -EFAULT;
if (io_alloc_async_data(req))
return -EAGAIN;
return io_req_prep_async(req);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment