Commit ba3cdb6f authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: improve task exit timeout cancellations

Don't spin trying to cancel timeouts that are reachable but not
cancellable, e.g. already executing.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/ab8a7440a60bbdf69ae514f672ad050e43dd1b03.1655684496.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent affa87db
...@@ -49,7 +49,7 @@ static inline void io_put_req(struct io_kiocb *req) ...@@ -49,7 +49,7 @@ static inline void io_put_req(struct io_kiocb *req)
} }
} }
static void io_kill_timeout(struct io_kiocb *req, int status) static bool io_kill_timeout(struct io_kiocb *req, int status)
__must_hold(&req->ctx->completion_lock) __must_hold(&req->ctx->completion_lock)
__must_hold(&req->ctx->timeout_lock) __must_hold(&req->ctx->timeout_lock)
{ {
...@@ -64,7 +64,9 @@ static void io_kill_timeout(struct io_kiocb *req, int status) ...@@ -64,7 +64,9 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
atomic_read(&req->ctx->cq_timeouts) + 1); atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&timeout->list); list_del_init(&timeout->list);
io_req_tw_post_queue(req, status, 0); io_req_tw_post_queue(req, status, 0);
return true;
} }
return false;
} }
__cold void io_flush_timeouts(struct io_ring_ctx *ctx) __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
...@@ -620,11 +622,10 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, ...@@ -620,11 +622,10 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
struct io_kiocb *req = cmd_to_io_kiocb(timeout); struct io_kiocb *req = cmd_to_io_kiocb(timeout);
if (io_match_task(req, tsk, cancel_all)) { if (io_match_task(req, tsk, cancel_all) &&
io_kill_timeout(req, -ECANCELED); io_kill_timeout(req, -ECANCELED))
canceled++; canceled++;
} }
}
spin_unlock_irq(&ctx->timeout_lock); spin_unlock_irq(&ctx->timeout_lock);
io_commit_cqring(ctx); io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment