Commit 3a08576b authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: remove check_cq checking from hot paths

All ctx->check_cq events are slow path, don't test every single flag one
by one in the hot path, but add a common guarding if.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/dff026585cea7ff3a172a7c83894a3b0111bbf6a.1655310733.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent aeaa72c6
...@@ -1259,24 +1259,25 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) ...@@ -1259,24 +1259,25 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
int ret = 0; int ret = 0;
unsigned long check_cq; unsigned long check_cq;
check_cq = READ_ONCE(ctx->check_cq);
if (unlikely(check_cq)) {
if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
__io_cqring_overflow_flush(ctx, false);
/*
* Similarly do not spin if we have not informed the user of any
* dropped CQE.
*/
if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
return -EBADR;
}
/* /*
* Don't enter poll loop if we already have events pending. * Don't enter poll loop if we already have events pending.
* If we do, we can potentially be spinning for commands that * If we do, we can potentially be spinning for commands that
* already triggered a CQE (eg in error). * already triggered a CQE (eg in error).
*/ */
check_cq = READ_ONCE(ctx->check_cq);
if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
__io_cqring_overflow_flush(ctx, false);
if (io_cqring_events(ctx)) if (io_cqring_events(ctx))
return 0; return 0;
/*
* Similarly do not spin if we have not informed the user of any
* dropped CQE.
*/
if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)))
return -EBADR;
do { do {
/* /*
* If a submit got punted to a workqueue, we can have the * If a submit got punted to a workqueue, we can have the
...@@ -2203,12 +2204,15 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, ...@@ -2203,12 +2204,15 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
ret = io_run_task_work_sig(); ret = io_run_task_work_sig();
if (ret || io_should_wake(iowq)) if (ret || io_should_wake(iowq))
return ret; return ret;
check_cq = READ_ONCE(ctx->check_cq); check_cq = READ_ONCE(ctx->check_cq);
/* let the caller flush overflows, retry */ if (unlikely(check_cq)) {
if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) /* let the caller flush overflows, retry */
return 1; if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
if (unlikely(check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))) return 1;
return -EBADR; if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
return -EBADR;
}
if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS)) if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
return -ETIME; return -ETIME;
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment