Commit b0dd8a41 authored by Jens Axboe's avatar Jens Axboe

io_uring: correct poll cancel and linked timeout expiration completion

Currently a poll request fills a completion entry of 0, even if it got
cancelled. This is odd, and it makes it harder to support with chains.
Ensure that it returns -ECANCELED in the completions events if it got
cancelled, and furthermore ensure that the linked timeout that triggered
it completes with -ETIME if we did indeed trigger the completions
through a timeout.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e0e328c4
...@@ -2066,11 +2066,14 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -2066,11 +2066,14 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0; return 0;
} }
static void io_poll_complete(struct io_kiocb *req, __poll_t mask) static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
req->poll.done = true; req->poll.done = true;
if (error)
io_cqring_fill_event(req, error);
else
io_cqring_fill_event(req, mangle_poll(mask)); io_cqring_fill_event(req, mangle_poll(mask));
io_commit_cqring(ctx); io_commit_cqring(ctx);
} }
...@@ -2084,11 +2087,16 @@ static void io_poll_complete_work(struct io_wq_work **workptr) ...@@ -2084,11 +2087,16 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *nxt = NULL; struct io_kiocb *nxt = NULL;
__poll_t mask = 0; __poll_t mask = 0;
int ret = 0;
if (work->flags & IO_WQ_WORK_CANCEL) if (work->flags & IO_WQ_WORK_CANCEL) {
WRITE_ONCE(poll->canceled, true); WRITE_ONCE(poll->canceled, true);
ret = -ECANCELED;
} else if (READ_ONCE(poll->canceled)) {
ret = -ECANCELED;
}
if (!READ_ONCE(poll->canceled)) if (ret != -ECANCELED)
mask = vfs_poll(poll->file, &pt) & poll->events; mask = vfs_poll(poll->file, &pt) & poll->events;
/* /*
...@@ -2099,13 +2107,13 @@ static void io_poll_complete_work(struct io_wq_work **workptr) ...@@ -2099,13 +2107,13 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
* avoid further branches in the fast path. * avoid further branches in the fast path.
*/ */
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
if (!mask && !READ_ONCE(poll->canceled)) { if (!mask && ret != -ECANCELED) {
add_wait_queue(poll->head, &poll->wait); add_wait_queue(poll->head, &poll->wait);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
return; return;
} }
io_poll_remove_req(req); io_poll_remove_req(req);
io_poll_complete(req, mask); io_poll_complete(req, mask, ret);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
...@@ -2139,7 +2147,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, ...@@ -2139,7 +2147,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
*/ */
if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) { if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
io_poll_remove_req(req); io_poll_remove_req(req);
io_poll_complete(req, mask); io_poll_complete(req, mask, 0);
req->flags |= REQ_F_COMP_LOCKED; req->flags |= REQ_F_COMP_LOCKED;
io_put_req(req); io_put_req(req);
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
...@@ -2251,7 +2259,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2251,7 +2259,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} }
if (mask) { /* no async, we'd stolen it */ if (mask) { /* no async, we'd stolen it */
ipt.error = 0; ipt.error = 0;
io_poll_complete(req, mask); io_poll_complete(req, mask, 0);
} }
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
...@@ -2503,7 +2511,7 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr) ...@@ -2503,7 +2511,7 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
static void io_async_find_and_cancel(struct io_ring_ctx *ctx, static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
struct io_kiocb *req, __u64 sqe_addr, struct io_kiocb *req, __u64 sqe_addr,
struct io_kiocb **nxt) struct io_kiocb **nxt, int success_ret)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -2520,6 +2528,8 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx, ...@@ -2520,6 +2528,8 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
goto done; goto done;
ret = io_poll_cancel(ctx, sqe_addr); ret = io_poll_cancel(ctx, sqe_addr);
done: done:
if (!ret)
ret = success_ret;
io_cqring_fill_event(req, ret); io_cqring_fill_event(req, ret);
io_commit_cqring(ctx); io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
...@@ -2541,7 +2551,7 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2541,7 +2551,7 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
sqe->cancel_flags) sqe->cancel_flags)
return -EINVAL; return -EINVAL;
io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt); io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt, 0);
return 0; return 0;
} }
...@@ -2831,7 +2841,8 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) ...@@ -2831,7 +2841,8 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (prev) { if (prev) {
io_async_find_and_cancel(ctx, req, prev->user_data, NULL); io_async_find_and_cancel(ctx, req, prev->user_data, NULL,
-ETIME);
io_put_req(prev); io_put_req(prev);
} else { } else {
io_cqring_add_event(req, -ETIME); io_cqring_add_event(req, -ETIME);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment