Commit 686a7e32 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

inetpeer: fix race in unused_list manipulations

Several crashes in cleanup_once() were reported in recent kernels.

Commit d6cc1d64 (inetpeer: various changes) added a race in
unlink_from_unused().

One way to avoid taking unused_peers.lock before doing the list_empty()
test is to catch 0->1 refcnt transitions, using full barrier atomic
operations variants (atomic_cmpxchg() and atomic_inc_return()) instead
of previous atomic_inc() and atomic_add_unless() variants.

We then call unlink_from_unused() only for the owner of the 0->1
transition.

Add a new atomic_add_unless_return() static helper

With help from Arun Sharma.

Refs: https://bugzilla.kernel.org/show_bug.cgi?id=32772Reported-by: default avatarArun Sharma <asharma@fb.com>
Reported-by: default avatarMaximilian Engelhardt <maxi@daemonizer.de>
Reported-by: default avatarYann Dupont <Yann.Dupont@univ-nantes.fr>
Reported-by: default avatarDenys Fedoryshchenko <denys@visp.net.lb>
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e7a46b4d
...@@ -154,11 +154,9 @@ void __init inet_initpeers(void) ...@@ -154,11 +154,9 @@ void __init inet_initpeers(void)
/* Called with or without local BH being disabled. */ /* Called with or without local BH being disabled. */
static void unlink_from_unused(struct inet_peer *p) static void unlink_from_unused(struct inet_peer *p)
{ {
if (!list_empty(&p->unused)) { spin_lock_bh(&unused_peers.lock);
spin_lock_bh(&unused_peers.lock); list_del_init(&p->unused);
list_del_init(&p->unused); spin_unlock_bh(&unused_peers.lock);
spin_unlock_bh(&unused_peers.lock);
}
} }
static int addr_compare(const struct inetpeer_addr *a, static int addr_compare(const struct inetpeer_addr *a,
...@@ -205,6 +203,20 @@ static int addr_compare(const struct inetpeer_addr *a, ...@@ -205,6 +203,20 @@ static int addr_compare(const struct inetpeer_addr *a,
u; \ u; \
}) })
static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv)
{
int cur, old = atomic_read(ptr);
while (old != u) {
*newv = old + a;
cur = atomic_cmpxchg(ptr, old, *newv);
if (cur == old)
return true;
old = cur;
}
return false;
}
/* /*
* Called with rcu_read_lock() * Called with rcu_read_lock()
* Because we hold no lock against a writer, its quite possible we fall * Because we hold no lock against a writer, its quite possible we fall
...@@ -213,7 +225,8 @@ static int addr_compare(const struct inetpeer_addr *a, ...@@ -213,7 +225,8 @@ static int addr_compare(const struct inetpeer_addr *a,
* We exit from this function if number of links exceeds PEER_MAXDEPTH * We exit from this function if number of links exceeds PEER_MAXDEPTH
*/ */
static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
struct inet_peer_base *base) struct inet_peer_base *base,
int *newrefcnt)
{ {
struct inet_peer *u = rcu_dereference(base->root); struct inet_peer *u = rcu_dereference(base->root);
int count = 0; int count = 0;
...@@ -226,7 +239,7 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, ...@@ -226,7 +239,7 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
* distinction between an unused entry (refcnt=0) and * distinction between an unused entry (refcnt=0) and
* a freed one. * a freed one.
*/ */
if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1))) if (!atomic_add_unless_return(&u->refcnt, 1, -1, newrefcnt))
u = NULL; u = NULL;
return u; return u;
} }
...@@ -465,22 +478,23 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) ...@@ -465,22 +478,23 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
struct inet_peer_base *base = family_to_base(daddr->family); struct inet_peer_base *base = family_to_base(daddr->family);
struct inet_peer *p; struct inet_peer *p;
unsigned int sequence; unsigned int sequence;
int invalidated; int invalidated, newrefcnt = 0;
/* Look up for the address quickly, lockless. /* Look up for the address quickly, lockless.
* Because of a concurrent writer, we might not find an existing entry. * Because of a concurrent writer, we might not find an existing entry.
*/ */
rcu_read_lock(); rcu_read_lock();
sequence = read_seqbegin(&base->lock); sequence = read_seqbegin(&base->lock);
p = lookup_rcu(daddr, base); p = lookup_rcu(daddr, base, &newrefcnt);
invalidated = read_seqretry(&base->lock, sequence); invalidated = read_seqretry(&base->lock, sequence);
rcu_read_unlock(); rcu_read_unlock();
if (p) { if (p) {
/* The existing node has been found. found: /* The existing node has been found.
* Remove the entry from unused list if it was there. * Remove the entry from unused list if it was there.
*/ */
unlink_from_unused(p); if (newrefcnt == 1)
unlink_from_unused(p);
return p; return p;
} }
...@@ -494,11 +508,9 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) ...@@ -494,11 +508,9 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
write_seqlock_bh(&base->lock); write_seqlock_bh(&base->lock);
p = lookup(daddr, stack, base); p = lookup(daddr, stack, base);
if (p != peer_avl_empty) { if (p != peer_avl_empty) {
atomic_inc(&p->refcnt); newrefcnt = atomic_inc_return(&p->refcnt);
write_sequnlock_bh(&base->lock); write_sequnlock_bh(&base->lock);
/* Remove the entry from unused list if it was there. */ goto found;
unlink_from_unused(p);
return p;
} }
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
if (p) { if (p) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment