Commit 521223d7 authored by Jens Axboe's avatar Jens Axboe

io_uring/cancel: don't default to setting req->work.cancel_seq

Just leave it unset by default, avoiding dipping into the last
cacheline (which is otherwise untouched) for the fast path of using
poll to drive networked traffic. Add a flag that tells us if the
sequence is valid or not, and then we can defer actually assigning
the flag and sequence until someone runs cancelations.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4bcb982c
...@@ -463,6 +463,7 @@ enum { ...@@ -463,6 +463,7 @@ enum {
REQ_F_SUPPORT_NOWAIT_BIT, REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT, REQ_F_ISREG_BIT,
REQ_F_POLL_NO_LAZY_BIT, REQ_F_POLL_NO_LAZY_BIT,
REQ_F_CANCEL_SEQ_BIT,
/* not a real bit, just to check we're not overflowing the space */ /* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT, __REQ_F_LAST_BIT,
...@@ -535,6 +536,8 @@ enum { ...@@ -535,6 +536,8 @@ enum {
REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT), REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT),
/* don't use lazy poll wake for this request */ /* don't use lazy poll wake for this request */
REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT), REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
/* cancel sequence is set and valid */
REQ_F_CANCEL_SEQ = IO_REQ_FLAG(REQ_F_CANCEL_SEQ_BIT),
}; };
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
......
...@@ -58,9 +58,8 @@ bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd) ...@@ -58,9 +58,8 @@ bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
return false; return false;
if (cd->flags & IORING_ASYNC_CANCEL_ALL) { if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
check_seq: check_seq:
if (cd->seq == req->work.cancel_seq) if (io_cancel_match_sequence(req, cd->seq))
return false; return false;
req->work.cancel_seq = cd->seq;
} }
return true; return true;
......
...@@ -25,4 +25,14 @@ void init_hash_table(struct io_hash_table *table, unsigned size); ...@@ -25,4 +25,14 @@ void init_hash_table(struct io_hash_table *table, unsigned size);
int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg); int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg);
bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd); bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence)
{
if ((req->flags & REQ_F_CANCEL_SEQ) && sequence == req->work.cancel_seq)
return true;
req->flags |= REQ_F_CANCEL_SEQ;
req->work.cancel_seq = sequence;
return false;
}
#endif #endif
...@@ -463,7 +463,6 @@ static void io_prep_async_work(struct io_kiocb *req) ...@@ -463,7 +463,6 @@ static void io_prep_async_work(struct io_kiocb *req)
req->work.list.next = NULL; req->work.list.next = NULL;
req->work.flags = 0; req->work.flags = 0;
req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
if (req->flags & REQ_F_FORCE_ASYNC) if (req->flags & REQ_F_FORCE_ASYNC)
req->work.flags |= IO_WQ_WORK_CONCURRENT; req->work.flags |= IO_WQ_WORK_CONCURRENT;
......
...@@ -588,10 +588,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req, ...@@ -588,10 +588,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
struct io_poll_table *ipt, __poll_t mask, struct io_poll_table *ipt, __poll_t mask,
unsigned issue_flags) unsigned issue_flags)
{ {
struct io_ring_ctx *ctx = req->ctx;
INIT_HLIST_NODE(&req->hash_node); INIT_HLIST_NODE(&req->hash_node);
req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
io_init_poll_iocb(poll, mask); io_init_poll_iocb(poll, mask);
poll->file = req->file; poll->file = req->file;
req->apoll_events = poll->events; req->apoll_events = poll->events;
...@@ -818,9 +815,8 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, ...@@ -818,9 +815,8 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
if (poll_only && req->opcode != IORING_OP_POLL_ADD) if (poll_only && req->opcode != IORING_OP_POLL_ADD)
continue; continue;
if (cd->flags & IORING_ASYNC_CANCEL_ALL) { if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
if (cd->seq == req->work.cancel_seq) if (io_cancel_match_sequence(req, cd->seq))
continue; continue;
req->work.cancel_seq = cd->seq;
} }
*out_bucket = hb; *out_bucket = hb;
return req; return req;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment