Commit 66fc25ca authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: shuffle io_eventfd_signal() bits around

A preparation patch, which moves a fast ->io_ev_fd check out of
io_eventfd_signal() into ev_posted*(). Compilers are smart enough for it
to not change anything, but will need it later.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/ec4091ac76d43912b73917e8db651c2dac4b7b01.1647481208.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0f847471
...@@ -1828,10 +1828,6 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx) ...@@ -1828,10 +1828,6 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
{ {
struct io_ev_fd *ev_fd; struct io_ev_fd *ev_fd;
/* Return quickly if ctx->io_ev_fd doesn't exist */
if (likely(!rcu_dereference_raw(ctx->io_ev_fd)))
return;
rcu_read_lock(); rcu_read_lock();
/* /*
* rcu_dereference ctx->io_ev_fd once and use it for both for checking * rcu_dereference ctx->io_ev_fd once and use it for both for checking
...@@ -1851,7 +1847,6 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx) ...@@ -1851,7 +1847,6 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
if (!ev_fd->eventfd_async || io_wq_current_is_worker()) if (!ev_fd->eventfd_async || io_wq_current_is_worker())
eventfd_signal(ev_fd->cq_ev_fd, 1); eventfd_signal(ev_fd->cq_ev_fd, 1);
out: out:
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -1863,7 +1858,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx) ...@@ -1863,7 +1858,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
* 1:1 relationship between how many times this function is called (and * 1:1 relationship between how many times this function is called (and
* hence the eventfd count) and number of CQEs posted to the CQ ring. * hence the eventfd count) and number of CQEs posted to the CQ ring.
*/ */
static void io_cqring_ev_posted(struct io_ring_ctx *ctx) static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{ {
/* /*
* wake_up_all() may seem excessive, but io_wake_function() and * wake_up_all() may seem excessive, but io_wake_function() and
...@@ -1872,6 +1867,7 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) ...@@ -1872,6 +1867,7 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
*/ */
if (wq_has_sleeper(&ctx->cq_wait)) if (wq_has_sleeper(&ctx->cq_wait))
wake_up_all(&ctx->cq_wait); wake_up_all(&ctx->cq_wait);
if (unlikely(rcu_dereference_raw(ctx->io_ev_fd)))
io_eventfd_signal(ctx); io_eventfd_signal(ctx);
} }
...@@ -1881,6 +1877,7 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) ...@@ -1881,6 +1877,7 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
if (wq_has_sleeper(&ctx->cq_wait)) if (wq_has_sleeper(&ctx->cq_wait))
wake_up_all(&ctx->cq_wait); wake_up_all(&ctx->cq_wait);
} }
if (unlikely(rcu_dereference_raw(ctx->io_ev_fd)))
io_eventfd_signal(ctx); io_eventfd_signal(ctx);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment