Commit 5d7943d9 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: propagate locking state to poll cancel

Poll cancellation will be soon need to grab ->uring_lock inside, pass
the locking state, i.e. issue_flags, inside the cancellation functions.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/b86781d047727c07163443b57551a3fa57c7c5e1.1655371007.git.asml.silence@gmail.comReviewed-by: default avatarHao Xu <howeyxu@tencent.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e6f89be6
......@@ -78,7 +78,8 @@ static int io_async_cancel_one(struct io_uring_task *tctx,
return ret;
}
int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd,
unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
......@@ -93,7 +94,7 @@ int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
if (!ret)
return 0;
ret = io_poll_cancel(ctx, cd);
ret = io_poll_cancel(ctx, cd, issue_flags);
if (ret != -ENOENT)
return ret;
......@@ -136,7 +137,7 @@ static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req,
int ret, nr = 0;
do {
ret = io_try_cancel(req, cd);
ret = io_try_cancel(req, cd, issue_flags);
if (ret == -ENOENT)
break;
if (!all)
......
......@@ -3,5 +3,6 @@
int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags);
int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd);
int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd,
unsigned int issue_flags);
void init_hash_table(struct io_hash_table *table, unsigned size);
......@@ -649,7 +649,8 @@ static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
return req ? 0 : -ENOENT;
}
int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
unsigned issue_flags)
{
return __io_poll_cancel(ctx, cd, &ctx->cancel_table);
}
......
......@@ -24,7 +24,8 @@ int io_poll_add(struct io_kiocb *req, unsigned int issue_flags);
int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags);
int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd);
int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
unsigned issue_flags);
int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
bool cancel_all);
......@@ -262,6 +262,7 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
{
unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
struct io_timeout *timeout = io_kiocb_to_cmd(req);
struct io_kiocb *prev = timeout->prev;
int ret = -ENOENT;
......@@ -273,7 +274,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
.data = prev->cqe.user_data,
};
ret = io_try_cancel(req, &cd);
ret = io_try_cancel(req, &cd, issue_flags);
}
io_req_set_res(req, ret ?: -ETIME, 0);
io_req_complete_post(req);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment