Commit 38ca2d39 authored by Paolo Abeni's avatar Paolo Abeni Committed by Greg Kroah-Hartman

udp6: set rx_dst_cookie on rx_dst updates


[ Upstream commit 64f0f5d1 ]

Currently, in the udp6 code, the dst cookie is not initialized/updated
concurrently with the RX dst used by early demux.

As a result, the dst_check() in the early_demux path always fails,
the rx dst cache is always invalidated, and we can't really
leverage significant gain from the demux lookup.

Fix it adding udp6 specific variant of sk_rx_dst_set() and use it
to set the dst cookie when the dst entry is really changed.

The issue is there since the introduction of early demux for ipv6.

Fixes: 5425077d ("net: ipv6: Add early demux handler for UDP unicast")
Acked-by: default avatarHannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b4426cf2
...@@ -265,7 +265,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags, ...@@ -265,7 +265,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
} }
void udp_v4_early_demux(struct sk_buff *skb); void udp_v4_early_demux(struct sk_buff *skb);
void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_get_port(struct sock *sk, unsigned short snum, int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *, int (*saddr_cmp)(const struct sock *,
const struct sock *)); const struct sock *));
......
...@@ -1762,13 +1762,14 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) ...@@ -1762,13 +1762,14 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
/* For TCP sockets, sk_rx_dst is protected by socket lock /* For TCP sockets, sk_rx_dst is protected by socket lock
* For UDP, we use xchg() to guard against concurrent changes. * For UDP, we use xchg() to guard against concurrent changes.
*/ */
void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{ {
struct dst_entry *old; struct dst_entry *old;
dst_hold(dst); dst_hold(dst);
old = xchg(&sk->sk_rx_dst, dst); old = xchg(&sk->sk_rx_dst, dst);
dst_release(old); dst_release(old);
return old != dst;
} }
/* /*
......
...@@ -752,6 +752,15 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, ...@@ -752,6 +752,15 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
return 0; return 0;
} }
static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{
if (udp_sk_rx_dst_set(sk, dst)) {
const struct rt6_info *rt = (const struct rt6_info *)dst;
inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
}
}
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto) int proto)
{ {
...@@ -801,7 +810,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, ...@@ -801,7 +810,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int ret; int ret;
if (unlikely(sk->sk_rx_dst != dst)) if (unlikely(sk->sk_rx_dst != dst))
udp_sk_rx_dst_set(sk, dst); udp6_sk_rx_dst_set(sk, dst);
ret = udpv6_queue_rcv_skb(sk, skb); ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk); sock_put(sk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment