Commit 6bd4f355 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

ipv6: kill sk_dst_lock

While testing the np->opt RCU conversion, I found that UDP/IPv6 was
using a mixture of xchg() and sk_dst_lock to protect concurrent changes
to sk->sk_dst_cache, leading to possible corruptions and crashes.

ip6_sk_dst_lookup_flow() uses sk_dst_check() anyway, so the simplest
way to fix the mess is to remove sk_dst_lock completely, as we did for
IPv4.

__ip6_dst_store() and ip6_dst_store() share same implementation.

sk_setup_caps() being called with socket lock being held or not,
we have to use sk_dst_set() instead of __sk_dst_set()

Note that I had to move the "np->dst_cookie = rt6_get_cookie(rt);"
in ip6_dst_store() before the sk_setup_caps(sk, dst) call.

This is because ip6_dst_store() can be called from process context,
without any lock held.

As soon as the dst is installed in sk->sk_dst_cache, dst can be freed
from another cpu doing a concurrent ip6_dst_store()

Doing the dst dereference before doing the install is needed to make
sure no use after free would trigger.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Reported-by: default avatarDmitry Vyukov <dvyukov@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c836a8ba
...@@ -133,27 +133,18 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); ...@@ -133,27 +133,18 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
/* /*
* Store a destination cache entry in a socket * Store a destination cache entry in a socket
*/ */
static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
const struct in6_addr *daddr, const struct in6_addr *daddr,
const struct in6_addr *saddr) const struct in6_addr *saddr)
{ {
struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk);
struct rt6_info *rt = (struct rt6_info *) dst;
np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst);
sk_setup_caps(sk, dst); sk_setup_caps(sk, dst);
np->daddr_cache = daddr; np->daddr_cache = daddr;
#ifdef CONFIG_IPV6_SUBTREES #ifdef CONFIG_IPV6_SUBTREES
np->saddr_cache = saddr; np->saddr_cache = saddr;
#endif #endif
np->dst_cookie = rt6_get_cookie(rt);
}
static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
struct in6_addr *daddr, struct in6_addr *saddr)
{
spin_lock(&sk->sk_dst_lock);
__ip6_dst_store(sk, dst, daddr, saddr);
spin_unlock(&sk->sk_dst_lock);
} }
static inline bool ipv6_unicast_destination(const struct sk_buff *skb) static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
......
...@@ -254,7 +254,6 @@ struct cg_proto; ...@@ -254,7 +254,6 @@ struct cg_proto;
* @sk_wq: sock wait queue and async head * @sk_wq: sock wait queue and async head
* @sk_rx_dst: receive input route used by early demux * @sk_rx_dst: receive input route used by early demux
* @sk_dst_cache: destination cache * @sk_dst_cache: destination cache
* @sk_dst_lock: destination cache lock
* @sk_policy: flow policy * @sk_policy: flow policy
* @sk_receive_queue: incoming packets * @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed * @sk_wmem_alloc: transmit queue bytes committed
...@@ -393,7 +392,7 @@ struct sock { ...@@ -393,7 +392,7 @@ struct sock {
#endif #endif
struct dst_entry *sk_rx_dst; struct dst_entry *sk_rx_dst;
struct dst_entry __rcu *sk_dst_cache; struct dst_entry __rcu *sk_dst_cache;
spinlock_t sk_dst_lock; /* Note: 32bit hole on 64bit arches */
atomic_t sk_wmem_alloc; atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc; atomic_t sk_omem_alloc;
int sk_sndbuf; int sk_sndbuf;
......
...@@ -1530,7 +1530,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) ...@@ -1530,7 +1530,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
skb_queue_head_init(&newsk->sk_receive_queue); skb_queue_head_init(&newsk->sk_receive_queue);
skb_queue_head_init(&newsk->sk_write_queue); skb_queue_head_init(&newsk->sk_write_queue);
spin_lock_init(&newsk->sk_dst_lock);
rwlock_init(&newsk->sk_callback_lock); rwlock_init(&newsk->sk_callback_lock);
lockdep_set_class_and_name(&newsk->sk_callback_lock, lockdep_set_class_and_name(&newsk->sk_callback_lock,
af_callback_keys + newsk->sk_family, af_callback_keys + newsk->sk_family,
...@@ -1607,7 +1606,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) ...@@ -1607,7 +1606,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{ {
u32 max_segs = 1; u32 max_segs = 1;
__sk_dst_set(sk, dst); sk_dst_set(sk, dst);
sk->sk_route_caps = dst->dev->features; sk->sk_route_caps = dst->dev->features;
if (sk->sk_route_caps & NETIF_F_GSO) if (sk->sk_route_caps & NETIF_F_GSO)
sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
...@@ -2388,7 +2387,6 @@ void sock_init_data(struct socket *sock, struct sock *sk) ...@@ -2388,7 +2387,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
} else } else
sk->sk_wq = NULL; sk->sk_wq = NULL;
spin_lock_init(&sk->sk_dst_lock);
rwlock_init(&sk->sk_callback_lock); rwlock_init(&sk->sk_callback_lock);
lockdep_set_class_and_name(&sk->sk_callback_lock, lockdep_set_class_and_name(&sk->sk_callback_lock,
af_callback_keys + sk->sk_family, af_callback_keys + sk->sk_family,
......
...@@ -459,7 +459,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, ...@@ -459,7 +459,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
* comment in that function for the gory details. -acme * comment in that function for the gory details. -acme
*/ */
__ip6_dst_store(newsk, dst, NULL, NULL); ip6_dst_store(newsk, dst, NULL, NULL);
newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
NETIF_F_TSO); NETIF_F_TSO);
newdp6 = (struct dccp6_sock *)newsk; newdp6 = (struct dccp6_sock *)newsk;
...@@ -883,7 +883,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, ...@@ -883,7 +883,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
np->saddr = *saddr; np->saddr = *saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6; inet->inet_rcv_saddr = LOOPBACK4_IPV6;
__ip6_dst_store(sk, dst, NULL, NULL); ip6_dst_store(sk, dst, NULL, NULL);
icsk->icsk_ext_hdr_len = 0; icsk->icsk_ext_hdr_len = 0;
if (opt) if (opt)
......
...@@ -673,7 +673,7 @@ int inet6_sk_rebuild_header(struct sock *sk) ...@@ -673,7 +673,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
return PTR_ERR(dst); return PTR_ERR(dst);
} }
__ip6_dst_store(sk, dst, NULL, NULL); ip6_dst_store(sk, dst, NULL, NULL);
} }
return 0; return 0;
......
...@@ -834,11 +834,6 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, ...@@ -834,11 +834,6 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
} }
/*
* Special lock-class for __icmpv6_sk:
*/
static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
static int __net_init icmpv6_sk_init(struct net *net) static int __net_init icmpv6_sk_init(struct net *net)
{ {
struct sock *sk; struct sock *sk;
...@@ -860,15 +855,6 @@ static int __net_init icmpv6_sk_init(struct net *net) ...@@ -860,15 +855,6 @@ static int __net_init icmpv6_sk_init(struct net *net)
net->ipv6.icmp_sk[i] = sk; net->ipv6.icmp_sk[i] = sk;
/*
* Split off their lock-class, because sk->sk_dst_lock
* gets used from softirqs, which is safe for
* __icmpv6_sk (because those never get directly used
* via userspace syscalls), but unsafe for normal sockets.
*/
lockdep_set_class(&sk->sk_dst_lock,
&icmpv6_socket_sk_dst_lock_key);
/* Enough space for 2 64K ICMP packets, including /* Enough space for 2 64K ICMP packets, including
* sk_buff struct overhead. * sk_buff struct overhead.
*/ */
......
...@@ -110,14 +110,6 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) ...@@ -110,14 +110,6 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
} }
EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
static inline
void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
__ip6_dst_store(sk, dst, daddr, saddr);
}
static inline static inline
struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
{ {
...@@ -153,7 +145,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, ...@@ -153,7 +145,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
dst = ip6_dst_lookup_flow(sk, fl6, final_p); dst = ip6_dst_lookup_flow(sk, fl6, final_p);
if (!IS_ERR(dst)) if (!IS_ERR(dst))
__inet6_csk_dst_store(sk, dst, NULL, NULL); ip6_dst_store(sk, dst, NULL, NULL);
} }
return dst; return dst;
} }
......
...@@ -257,7 +257,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, ...@@ -257,7 +257,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
inet->inet_rcv_saddr = LOOPBACK4_IPV6; inet->inet_rcv_saddr = LOOPBACK4_IPV6;
sk->sk_gso_type = SKB_GSO_TCPV6; sk->sk_gso_type = SKB_GSO_TCPV6;
__ip6_dst_store(sk, dst, NULL, NULL); ip6_dst_store(sk, dst, NULL, NULL);
if (tcp_death_row.sysctl_tw_recycle && if (tcp_death_row.sysctl_tw_recycle &&
!tp->rx_opt.ts_recent_stamp && !tp->rx_opt.ts_recent_stamp &&
...@@ -1060,7 +1060,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * ...@@ -1060,7 +1060,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
*/ */
newsk->sk_gso_type = SKB_GSO_TCPV6; newsk->sk_gso_type = SKB_GSO_TCPV6;
__ip6_dst_store(newsk, dst, NULL, NULL); ip6_dst_store(newsk, dst, NULL, NULL);
inet6_sk_rx_dst_set(newsk, skb); inet6_sk_rx_dst_set(newsk, skb);
newtcp6sk = (struct tcp6_sock *)newsk; newtcp6sk = (struct tcp6_sock *)newsk;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment