Commit 3b44b371 authored by Hao Xu's avatar Hao Xu Committed by Jens Axboe

io_uring: split logic of force_nonblock

Currently force_nonblock stands for three meanings:
 - nowait or not
 - in an io-worker or not(hold uring_lock or not)

Let's split the logic to two flags, IO_URING_F_NONBLOCK and
IO_URING_F_UNLOCKED for convenience of the next patch.
Suggested-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarHao Xu <haoxu@linux.alibaba.com>
Reviewed-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/20211018133431.103298-1-haoxu@linux.alibaba.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 00169246
...@@ -199,6 +199,7 @@ struct io_rings { ...@@ -199,6 +199,7 @@ struct io_rings {
enum io_uring_cmd_flags { enum io_uring_cmd_flags {
IO_URING_F_COMPLETE_DEFER = 1, IO_URING_F_COMPLETE_DEFER = 1,
IO_URING_F_UNLOCKED = 2,
/* int's last bit, sign checks are usually faster than a bit test */ /* int's last bit, sign checks are usually faster than a bit test */
IO_URING_F_NONBLOCK = INT_MIN, IO_URING_F_NONBLOCK = INT_MIN,
}; };
...@@ -2706,10 +2707,10 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) ...@@ -2706,10 +2707,10 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
const bool need_lock = !(issue_flags & IO_URING_F_NONBLOCK); const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
/* workqueue context doesn't hold uring_lock, grab it now */ /* workqueue context doesn't hold uring_lock, grab it now */
if (unlikely(need_lock)) if (unlikely(needs_lock))
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
/* /*
...@@ -2737,7 +2738,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) ...@@ -2737,7 +2738,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
else else
wq_list_add_tail(&req->comp_list, &ctx->iopoll_list); wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
if (unlikely(need_lock)) { if (unlikely(needs_lock)) {
/* /*
* If IORING_SETUP_SQPOLL is enabled, sqes are either handle * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
* in sq thread task context or in io worker task context. If * in sq thread task context or in io worker task context. If
...@@ -2921,7 +2922,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, ...@@ -2921,7 +2922,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
req_set_fail(req); req_set_fail(req);
if (!(issue_flags & IO_URING_F_NONBLOCK)) { if (issue_flags & IO_URING_F_UNLOCKED) {
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
__io_req_complete(req, issue_flags, ret, cflags); __io_req_complete(req, issue_flags, ret, cflags);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
...@@ -3031,7 +3032,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, ...@@ -3031,7 +3032,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
{ {
struct io_buffer *kbuf = req->kbuf; struct io_buffer *kbuf = req->kbuf;
struct io_buffer *head; struct io_buffer *head;
bool needs_lock = !(issue_flags & IO_URING_F_NONBLOCK); bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
if (req->flags & REQ_F_BUFFER_SELECTED) if (req->flags & REQ_F_BUFFER_SELECTED)
return kbuf; return kbuf;
...@@ -3336,7 +3337,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) ...@@ -3336,7 +3337,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
int ret; int ret;
/* submission path, ->uring_lock should already be taken */ /* submission path, ->uring_lock should already be taken */
ret = io_import_iovec(rw, req, &iov, &iorw->s, IO_URING_F_NONBLOCK); ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
...@@ -4309,9 +4310,9 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4309,9 +4310,9 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_buffer *head; struct io_buffer *head;
int ret = 0; int ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
io_ring_submit_lock(ctx, !force_nonblock); io_ring_submit_lock(ctx, needs_lock);
lockdep_assert_held(&ctx->uring_lock); lockdep_assert_held(&ctx->uring_lock);
...@@ -4324,7 +4325,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4324,7 +4325,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
/* complete before unlock, IOPOLL may need the lock */ /* complete before unlock, IOPOLL may need the lock */
__io_req_complete(req, issue_flags, ret, 0); __io_req_complete(req, issue_flags, ret, 0);
io_ring_submit_unlock(ctx, !force_nonblock); io_ring_submit_unlock(ctx, needs_lock);
return 0; return 0;
} }
...@@ -4396,9 +4397,9 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4396,9 +4397,9 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_buffer *head, *list; struct io_buffer *head, *list;
int ret = 0; int ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
io_ring_submit_lock(ctx, !force_nonblock); io_ring_submit_lock(ctx, needs_lock);
lockdep_assert_held(&ctx->uring_lock); lockdep_assert_held(&ctx->uring_lock);
...@@ -4414,7 +4415,7 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4414,7 +4415,7 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
req_set_fail(req); req_set_fail(req);
/* complete before unlock, IOPOLL may need the lock */ /* complete before unlock, IOPOLL may need the lock */
__io_req_complete(req, issue_flags, ret, 0); __io_req_complete(req, issue_flags, ret, 0);
io_ring_submit_unlock(ctx, !force_nonblock); io_ring_submit_unlock(ctx, needs_lock);
return 0; return 0;
} }
...@@ -6271,6 +6272,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) ...@@ -6271,6 +6272,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
u64 sqe_addr = req->cancel.addr; u64 sqe_addr = req->cancel.addr;
bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
struct io_tctx_node *node; struct io_tctx_node *node;
int ret; int ret;
...@@ -6279,7 +6281,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) ...@@ -6279,7 +6281,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
goto done; goto done;
/* slow path, try all io-wq's */ /* slow path, try all io-wq's */
io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); io_ring_submit_lock(ctx, needs_lock);
ret = -ENOENT; ret = -ENOENT;
list_for_each_entry(node, &ctx->tctx_list, ctx_node) { list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
struct io_uring_task *tctx = node->task->io_uring; struct io_uring_task *tctx = node->task->io_uring;
...@@ -6288,7 +6290,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) ...@@ -6288,7 +6290,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
if (ret != -ENOENT) if (ret != -ENOENT)
break; break;
} }
io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); io_ring_submit_unlock(ctx, needs_lock);
done: done:
if (ret < 0) if (ret < 0)
req_set_fail(req); req_set_fail(req);
...@@ -6315,6 +6317,7 @@ static int io_rsrc_update_prep(struct io_kiocb *req, ...@@ -6315,6 +6317,7 @@ static int io_rsrc_update_prep(struct io_kiocb *req,
static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
struct io_uring_rsrc_update2 up; struct io_uring_rsrc_update2 up;
int ret; int ret;
...@@ -6324,10 +6327,10 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) ...@@ -6324,10 +6327,10 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
up.tags = 0; up.tags = 0;
up.resv = 0; up.resv = 0;
io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); io_ring_submit_lock(ctx, needs_lock);
ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE, ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
&up, req->rsrc_update.nr_args); &up, req->rsrc_update.nr_args);
io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); io_ring_submit_unlock(ctx, needs_lock);
if (ret < 0) if (ret < 0)
req_set_fail(req); req_set_fail(req);
...@@ -6737,7 +6740,7 @@ static void io_wq_submit_work(struct io_wq_work *work) ...@@ -6737,7 +6740,7 @@ static void io_wq_submit_work(struct io_wq_work *work)
if (!ret) { if (!ret) {
do { do {
ret = io_issue_sqe(req, 0); ret = io_issue_sqe(req, IO_URING_F_UNLOCKED);
/* /*
* We can get EAGAIN for polled IO even though we're * We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't * forcing a sync submission from here, since we can't
...@@ -8326,12 +8329,12 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file, ...@@ -8326,12 +8329,12 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
unsigned int issue_flags, u32 slot_index) unsigned int issue_flags, u32 slot_index)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
bool needs_switch = false; bool needs_switch = false;
struct io_fixed_file *file_slot; struct io_fixed_file *file_slot;
int ret = -EBADF; int ret = -EBADF;
io_ring_submit_lock(ctx, !force_nonblock); io_ring_submit_lock(ctx, needs_lock);
if (file->f_op == &io_uring_fops) if (file->f_op == &io_uring_fops)
goto err; goto err;
ret = -ENXIO; ret = -ENXIO;
...@@ -8372,7 +8375,7 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file, ...@@ -8372,7 +8375,7 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
err: err:
if (needs_switch) if (needs_switch)
io_rsrc_node_switch(ctx, ctx->file_data); io_rsrc_node_switch(ctx, ctx->file_data);
io_ring_submit_unlock(ctx, !force_nonblock); io_ring_submit_unlock(ctx, needs_lock);
if (ret) if (ret)
fput(file); fput(file);
return ret; return ret;
...@@ -8382,11 +8385,12 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) ...@@ -8382,11 +8385,12 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
{ {
unsigned int offset = req->close.file_slot - 1; unsigned int offset = req->close.file_slot - 1;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
struct io_fixed_file *file_slot; struct io_fixed_file *file_slot;
struct file *file; struct file *file;
int ret, i; int ret, i;
io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); io_ring_submit_lock(ctx, needs_lock);
ret = -ENXIO; ret = -ENXIO;
if (unlikely(!ctx->file_data)) if (unlikely(!ctx->file_data))
goto out; goto out;
...@@ -8412,7 +8416,7 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) ...@@ -8412,7 +8416,7 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
io_rsrc_node_switch(ctx, ctx->file_data); io_rsrc_node_switch(ctx, ctx->file_data);
ret = 0; ret = 0;
out: out:
io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); io_ring_submit_unlock(ctx, needs_lock);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment