Commit 120ae585 authored by Christian Brauner's avatar Christian Brauner

eventfd: simplify eventfd_signal_mask()

The eventfd_signal_mask() helper was introduced for io_uring and similar
to eventfd_signal() it always passed 1 for @n. So don't bother with that
argument at all.

Link: https://lore.kernel.org/r/20231122-vfs-eventfd-signal-v2-3-bd549b14ce0c@kernel.orgReviewed-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarChristian Brauner <brauner@kernel.org>
parent 3652117f
...@@ -43,9 +43,10 @@ struct eventfd_ctx { ...@@ -43,9 +43,10 @@ struct eventfd_ctx {
int id; int id;
}; };
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask) __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{ {
unsigned long flags; unsigned long flags;
__u64 n = 1;
/* /*
* Deadlock or stack overflow issues can happen if we recurse here * Deadlock or stack overflow issues can happen if we recurse here
...@@ -68,7 +69,7 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask) ...@@ -68,7 +69,7 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask)
current->in_eventfd = 0; current->in_eventfd = 0;
spin_unlock_irqrestore(&ctx->wqh.lock, flags); spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n; return n == 1;
} }
/** /**
...@@ -84,7 +85,7 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask) ...@@ -84,7 +85,7 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask)
*/ */
__u64 eventfd_signal(struct eventfd_ctx *ctx) __u64 eventfd_signal(struct eventfd_ctx *ctx)
{ {
return eventfd_signal_mask(ctx, 1, 0); return eventfd_signal_mask(ctx, 0);
} }
EXPORT_SYMBOL_GPL(eventfd_signal); EXPORT_SYMBOL_GPL(eventfd_signal);
......
...@@ -36,7 +36,7 @@ struct file *eventfd_fget(int fd); ...@@ -36,7 +36,7 @@ struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd); struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
__u64 eventfd_signal(struct eventfd_ctx *ctx); __u64 eventfd_signal(struct eventfd_ctx *ctx);
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask); __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt); __u64 *cnt);
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
...@@ -63,8 +63,7 @@ static inline int eventfd_signal(struct eventfd_ctx *ctx) ...@@ -63,8 +63,7 @@ static inline int eventfd_signal(struct eventfd_ctx *ctx)
return -ENOSYS; return -ENOSYS;
} }
static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
unsigned mask)
{ {
return -ENOSYS; return -ENOSYS;
} }
......
...@@ -558,7 +558,7 @@ static void io_eventfd_ops(struct rcu_head *rcu) ...@@ -558,7 +558,7 @@ static void io_eventfd_ops(struct rcu_head *rcu)
int ops = atomic_xchg(&ev_fd->ops, 0); int ops = atomic_xchg(&ev_fd->ops, 0);
if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT)) if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE); eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
/* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback /* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
* ordering in a race but if references are 0 we know we have to free * ordering in a race but if references are 0 we know we have to free
...@@ -594,7 +594,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx) ...@@ -594,7 +594,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
goto out; goto out;
if (likely(eventfd_signal_allowed())) { if (likely(eventfd_signal_allowed())) {
eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE); eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
} else { } else {
atomic_inc(&ev_fd->refs); atomic_inc(&ev_fd->refs);
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment