Commit 1b5f962e authored by Craig Gallek's avatar Craig Gallek Committed by David S. Miller

soreuseport: fix initialization race

Syzkaller stumbled upon a way to trigger
WARNING: CPU: 1 PID: 13881 at net/core/sock_reuseport.c:41
reuseport_alloc+0x306/0x3b0 net/core/sock_reuseport.c:39

There are two initialization paths for the sock_reuseport structure in a
socket: Through the udp/tcp bind paths of SO_REUSEPORT sockets or through
SO_ATTACH_REUSEPORT_[CE]BPF before bind.  The existing implementation
assumedthat the socket lock protected both of these paths when it actually
only protects the SO_ATTACH_REUSEPORT path.  Syzkaller triggered this
double allocation by running these paths concurrently.

This patch moves the check for double allocation into the reuseport_alloc
function which is protected by a global spin lock.

Fixes: e32ea7e7 ("soreuseport: fast reuseport UDP socket selection")
Fixes: c125e80b ("soreuseport: fast reuseport TCP socket selection")
Signed-off-by: default avatarCraig Gallek <kraig@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 66c54517
...@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk) ...@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
* soft irq of receive path or setsockopt from process context * soft irq of receive path or setsockopt from process context
*/ */
spin_lock_bh(&reuseport_lock); spin_lock_bh(&reuseport_lock);
WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock)), /* Allocation attempts can occur concurrently via the setsockopt path
"multiple allocations for the same socket"); * and the bind/hash path. Nothing to do when we lose the race.
*/
if (rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock)))
goto out;
reuse = __reuseport_alloc(INIT_SOCKS); reuse = __reuseport_alloc(INIT_SOCKS);
if (!reuse) { if (!reuse) {
spin_unlock_bh(&reuseport_lock); spin_unlock_bh(&reuseport_lock);
...@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk) ...@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
reuse->num_socks = 1; reuse->num_socks = 1;
rcu_assign_pointer(sk->sk_reuseport_cb, reuse); rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
out:
spin_unlock_bh(&reuseport_lock); spin_unlock_bh(&reuseport_lock);
return 0; return 0;
......
...@@ -456,10 +456,7 @@ static int inet_reuseport_add_sock(struct sock *sk, ...@@ -456,10 +456,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
return reuseport_add_sock(sk, sk2); return reuseport_add_sock(sk, sk2);
} }
/* Initial allocation may have already happened via setsockopt */
if (!rcu_access_pointer(sk->sk_reuseport_cb))
return reuseport_alloc(sk); return reuseport_alloc(sk);
return 0;
} }
int __inet_hash(struct sock *sk, struct sock *osk) int __inet_hash(struct sock *sk, struct sock *osk)
......
...@@ -231,10 +231,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) ...@@ -231,10 +231,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
} }
} }
/* Initial allocation may have already happened via setsockopt */
if (!rcu_access_pointer(sk->sk_reuseport_cb))
return reuseport_alloc(sk); return reuseport_alloc(sk);
return 0;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment