Commit 6e6b8c62 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring/rw: avoid punting to io-wq directly

kiocb_done() should care to specifically redirecting requests to io-wq.
Remove the hopping to tw to then queue an io-wq, return -EAGAIN and let
the core code io_uring handle offloading.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Tested-by: default avatarMing Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/413564e550fe23744a970e1783dfa566291b0e6f.1710799188.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1afdb760
...@@ -492,7 +492,7 @@ static void io_prep_async_link(struct io_kiocb *req) ...@@ -492,7 +492,7 @@ static void io_prep_async_link(struct io_kiocb *req)
} }
} }
void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use) static void io_queue_iowq(struct io_kiocb *req)
{ {
struct io_kiocb *link = io_prep_linked_timeout(req); struct io_kiocb *link = io_prep_linked_timeout(req);
struct io_uring_task *tctx = req->task->io_uring; struct io_uring_task *tctx = req->task->io_uring;
...@@ -1499,7 +1499,7 @@ void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts) ...@@ -1499,7 +1499,7 @@ void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
if (unlikely(req->task->flags & PF_EXITING)) if (unlikely(req->task->flags & PF_EXITING))
io_req_defer_failed(req, -EFAULT); io_req_defer_failed(req, -EFAULT);
else if (req->flags & REQ_F_FORCE_ASYNC) else if (req->flags & REQ_F_FORCE_ASYNC)
io_queue_iowq(req, ts); io_queue_iowq(req);
else else
io_queue_sqe(req); io_queue_sqe(req);
} }
...@@ -2087,7 +2087,7 @@ static void io_queue_async(struct io_kiocb *req, int ret) ...@@ -2087,7 +2087,7 @@ static void io_queue_async(struct io_kiocb *req, int ret)
break; break;
case IO_APOLL_ABORTED: case IO_APOLL_ABORTED:
io_kbuf_recycle(req, 0); io_kbuf_recycle(req, 0);
io_queue_iowq(req, NULL); io_queue_iowq(req);
break; break;
case IO_APOLL_OK: case IO_APOLL_OK:
break; break;
...@@ -2134,7 +2134,7 @@ static void io_queue_sqe_fallback(struct io_kiocb *req) ...@@ -2134,7 +2134,7 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
if (unlikely(req->ctx->drain_active)) if (unlikely(req->ctx->drain_active))
io_drain_req(req); io_drain_req(req);
else else
io_queue_iowq(req, NULL); io_queue_iowq(req);
} }
} }
......
...@@ -79,7 +79,6 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd, ...@@ -79,7 +79,6 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
bool io_alloc_async_data(struct io_kiocb *req); bool io_alloc_async_data(struct io_kiocb *req);
void io_req_task_queue(struct io_kiocb *req); void io_req_task_queue(struct io_kiocb *req);
void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use);
void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts); void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
void io_req_task_queue_fail(struct io_kiocb *req, int ret); void io_req_task_queue_fail(struct io_kiocb *req, int ret);
void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts); void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
......
...@@ -187,12 +187,6 @@ static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) ...@@ -187,12 +187,6 @@ static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
return NULL; return NULL;
} }
static void io_req_task_queue_reissue(struct io_kiocb *req)
{
req->io_task_work.func = io_queue_iowq;
io_req_task_work_add(req);
}
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
static bool io_resubmit_prep(struct io_kiocb *req) static bool io_resubmit_prep(struct io_kiocb *req)
{ {
...@@ -405,7 +399,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret, ...@@ -405,7 +399,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
if (req->flags & REQ_F_REISSUE) { if (req->flags & REQ_F_REISSUE) {
req->flags &= ~REQ_F_REISSUE; req->flags &= ~REQ_F_REISSUE;
if (io_resubmit_prep(req)) if (io_resubmit_prep(req))
io_req_task_queue_reissue(req); return -EAGAIN;
else else
io_req_task_queue_fail(req, final_ret); io_req_task_queue_fail(req, final_ret);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment