Commit 971316f0 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds

epoll: ep_unregister_pollwait() can use the freed pwq->whead

signalfd_cleanup() ensures that ->signalfd_wqh is not used, but
this is not enough. eppoll_entry->whead still points to the memory
we are going to free, ep_unregister_pollwait()->remove_wait_queue()
is obviously unsafe.

Change ep_poll_callback(POLLFREE) to set eppoll_entry->whead = NULL,
change ep_unregister_pollwait() to check pwq->whead != NULL under
rcu_read_lock() before remove_wait_queue(). We add the new helper,
ep_remove_wait_queue(), for this.

This works because sighand_cachep is SLAB_DESTROY_BY_RCU and because
->signalfd_wqh is initialized in sighand_ctor(), not in copy_sighand.
ep_unregister_pollwait()->remove_wait_queue() can play with already
freed and potentially reused ->sighand, but this is fine. This memory
must have the valid ->signalfd_wqh until rcu_read_unlock().
Reported-by: default avatarMaxime Bizon <mbizon@freebox.fr>
Cc: <stable@kernel.org>
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d80e731e
...@@ -320,6 +320,11 @@ static inline int ep_is_linked(struct list_head *p) ...@@ -320,6 +320,11 @@ static inline int ep_is_linked(struct list_head *p)
return !list_empty(p); return !list_empty(p);
} }
static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
{
return container_of(p, struct eppoll_entry, wait);
}
/* Get the "struct epitem" from a wait queue pointer */ /* Get the "struct epitem" from a wait queue pointer */
static inline struct epitem *ep_item_from_wait(wait_queue_t *p) static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
{ {
...@@ -467,6 +472,18 @@ static void ep_poll_safewake(wait_queue_head_t *wq) ...@@ -467,6 +472,18 @@ static void ep_poll_safewake(wait_queue_head_t *wq)
put_cpu(); put_cpu();
} }
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
{
wait_queue_head_t *whead;
rcu_read_lock();
/* If it is cleared by POLLFREE, it should be rcu-safe */
whead = rcu_dereference(pwq->whead);
if (whead)
remove_wait_queue(whead, &pwq->wait);
rcu_read_unlock();
}
/* /*
* This function unregisters poll callbacks from the associated file * This function unregisters poll callbacks from the associated file
* descriptor. Must be called with "mtx" held (or "epmutex" if called from * descriptor. Must be called with "mtx" held (or "epmutex" if called from
...@@ -481,7 +498,7 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) ...@@ -481,7 +498,7 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
pwq = list_first_entry(lsthead, struct eppoll_entry, llink); pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
list_del(&pwq->llink); list_del(&pwq->llink);
remove_wait_queue(pwq->whead, &pwq->wait); ep_remove_wait_queue(pwq);
kmem_cache_free(pwq_cache, pwq); kmem_cache_free(pwq_cache, pwq);
} }
} }
...@@ -842,9 +859,16 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k ...@@ -842,9 +859,16 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
struct epitem *epi = ep_item_from_wait(wait); struct epitem *epi = ep_item_from_wait(wait);
struct eventpoll *ep = epi->ep; struct eventpoll *ep = epi->ep;
/* the caller holds eppoll_entry->whead->lock */ if ((unsigned long)key & POLLFREE) {
if ((unsigned long)key & POLLFREE) ep_pwq_from_wait(wait)->whead = NULL;
/*
* whead = NULL above can race with ep_remove_wait_queue()
* which can do another remove_wait_queue() after us, so we
* can't use __remove_wait_queue(). whead->lock is held by
* the caller.
*/
list_del_init(&wait->task_list); list_del_init(&wait->task_list);
}
spin_lock_irqsave(&ep->lock, flags); spin_lock_irqsave(&ep->lock, flags);
......
...@@ -33,7 +33,11 @@ ...@@ -33,7 +33,11 @@
void signalfd_cleanup(struct sighand_struct *sighand) void signalfd_cleanup(struct sighand_struct *sighand)
{ {
wait_queue_head_t *wqh = &sighand->signalfd_wqh; wait_queue_head_t *wqh = &sighand->signalfd_wqh;
/*
* The lockless check can race with remove_wait_queue() in progress,
* but in this case its caller should run under rcu_read_lock() and
* sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return.
*/
if (likely(!waitqueue_active(wqh))) if (likely(!waitqueue_active(wqh)))
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment