Commit 2593553a authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: remove __io_req_task_cancel()

Both io_req_complete_failed() and __io_req_task_cancel() do the same
thing: set failure flag, put both req refs and emit an CQE. The former
one is a bit more advance as it puts req back into a req cache, so make
it to take over __io_req_task_cancel() and remove the last one.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent dac7a098
......@@ -1024,7 +1024,6 @@ static bool io_rw_reissue(struct io_kiocb *req);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void io_put_req(struct io_kiocb *req);
static void io_put_req_deferred(struct io_kiocb *req, int nr);
static void io_double_put_req(struct io_kiocb *req);
static void io_dismantle_req(struct io_kiocb *req);
static void io_put_task(struct task_struct *task, int nr);
static void io_queue_next(struct io_kiocb *req);
......@@ -2019,20 +2018,6 @@ static void io_req_task_work_add_fallback(struct io_kiocb *req,
io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
}
static void __io_req_task_cancel(struct io_kiocb *req, int error)
{
struct io_ring_ctx *ctx = req->ctx;
spin_lock_irq(&ctx->completion_lock);
io_cqring_fill_event(req, error);
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
req_set_fail_links(req);
io_double_put_req(req);
}
static void io_req_task_cancel(struct callback_head *cb)
{
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
......@@ -2040,7 +2025,7 @@ static void io_req_task_cancel(struct callback_head *cb)
/* ctx is guaranteed to stay alive while we hold uring_lock */
mutex_lock(&ctx->uring_lock);
__io_req_task_cancel(req, req->result);
io_req_complete_failed(req, req->result);
mutex_unlock(&ctx->uring_lock);
}
......@@ -2053,7 +2038,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
if (!(current->flags & PF_EXITING) && !current->in_execve)
__io_queue_sqe(req);
else
__io_req_task_cancel(req, -EFAULT);
io_req_complete_failed(req, -EFAULT);
mutex_unlock(&ctx->uring_lock);
}
......@@ -2208,13 +2193,6 @@ static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
io_free_req_deferred(req);
}
static void io_double_put_req(struct io_kiocb *req)
{
/* drop both submit and complete references */
if (req_ref_sub_and_test(req, 2))
io_free_req(req);
}
static unsigned io_cqring_events(struct io_ring_ctx *ctx)
{
/* See comment at the top of this file */
......@@ -5106,7 +5084,7 @@ static void io_async_task_func(struct callback_head *cb)
if (!READ_ONCE(apoll->poll.canceled))
__io_req_task_submit(req);
else
__io_req_task_cancel(req, -ECANCELED);
io_req_complete_failed(req, -ECANCELED);
kfree(apoll->double_poll);
kfree(apoll);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment