Commit b7638ad0 authored by Christian Brauner's avatar Christian Brauner

eventfd: make eventfd_signal{_mask}() void

No caller care about the return value.

Link: https://lore.kernel.org/r/20231122-vfs-eventfd-signal-v2-4-bd549b14ce0c@kernel.orgReviewed-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarChristian Brauner <brauner@kernel.org>
parent 120ae585
...@@ -43,10 +43,19 @@ struct eventfd_ctx { ...@@ -43,10 +43,19 @@ struct eventfd_ctx {
int id; int id;
}; };
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask) /**
* eventfd_signal_mask - Increment the event counter
* @ctx: [in] Pointer to the eventfd context.
* @mask: [in] poll mask
*
* This function is supposed to be called by the kernel in paths that do not
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
* value, and we signal this as overflow condition by returning a EPOLLERR
* to poll(2).
*/
void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{ {
unsigned long flags; unsigned long flags;
__u64 n = 1;
/* /*
* Deadlock or stack overflow issues can happen if we recurse here * Deadlock or stack overflow issues can happen if we recurse here
...@@ -57,37 +66,18 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask) ...@@ -57,37 +66,18 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
* safe context. * safe context.
*/ */
if (WARN_ON_ONCE(current->in_eventfd)) if (WARN_ON_ONCE(current->in_eventfd))
return 0; return;
spin_lock_irqsave(&ctx->wqh.lock, flags); spin_lock_irqsave(&ctx->wqh.lock, flags);
current->in_eventfd = 1; current->in_eventfd = 1;
if (ULLONG_MAX - ctx->count < n) if (ctx->count < ULLONG_MAX)
n = ULLONG_MAX - ctx->count; ctx->count++;
ctx->count += n;
if (waitqueue_active(&ctx->wqh)) if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask); wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
current->in_eventfd = 0; current->in_eventfd = 0;
spin_unlock_irqrestore(&ctx->wqh.lock, flags); spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n == 1;
}
/**
* eventfd_signal - Increment the event counter
* @ctx: [in] Pointer to the eventfd context.
*
* This function is supposed to be called by the kernel in paths that do not
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
* value, and we signal this as overflow condition by returning a EPOLLERR
* to poll(2).
*
* Returns the amount by which the counter was incremented.
*/
__u64 eventfd_signal(struct eventfd_ctx *ctx)
{
return eventfd_signal_mask(ctx, 0);
} }
EXPORT_SYMBOL_GPL(eventfd_signal); EXPORT_SYMBOL_GPL(eventfd_signal_mask);
static void eventfd_free_ctx(struct eventfd_ctx *ctx) static void eventfd_free_ctx(struct eventfd_ctx *ctx)
{ {
......
...@@ -35,8 +35,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx); ...@@ -35,8 +35,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd); struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd); struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
__u64 eventfd_signal(struct eventfd_ctx *ctx); void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask);
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt); __u64 *cnt);
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
...@@ -58,14 +57,8 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) ...@@ -58,14 +57,8 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
return ERR_PTR(-ENOSYS); return ERR_PTR(-ENOSYS);
} }
static inline int eventfd_signal(struct eventfd_ctx *ctx) static inline void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{ {
return -ENOSYS;
}
static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
return -ENOSYS;
} }
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
...@@ -91,5 +84,10 @@ static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) ...@@ -91,5 +84,10 @@ static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
#endif #endif
static inline void eventfd_signal(struct eventfd_ctx *ctx)
{
eventfd_signal_mask(ctx, 0);
}
#endif /* _LINUX_EVENTFD_H */ #endif /* _LINUX_EVENTFD_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment