Commit 2a04c7ba authored by David S. Miller's avatar David S. Miller

Merge branch 'vxlan-rx-cleanups'

Jiri Benc says:

====================
vxlan: consolidate rx handling

Currently, vxlan_rcv is just called at the end of vxlan_udp_encap_recv,
continuing the rx processing where vxlan_udp_encap_recv left it. There's no
clear border between those two functions. This patchset moves
vxlan_udp_encap_recv and vxlan_rcv into a single function.

This also allows to do some simplification in error path.

The VXLAN-GPE implementation that will follow up this set can be seen at:
https://github.com/jbenc/linux-vxlan/commits/master
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 39661e2c 10a5af23
...@@ -1164,16 +1164,18 @@ static bool vxlan_remcsum(struct vxlanhdr *unparsed, ...@@ -1164,16 +1164,18 @@ static bool vxlan_remcsum(struct vxlanhdr *unparsed,
} }
static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
struct vxlan_metadata *md, struct sk_buff *skb, u32 vxflags,
struct metadata_dst *tun_dst) struct vxlan_metadata *md)
{ {
struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed; struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
struct metadata_dst *tun_dst;
if (!(unparsed->vx_flags & VXLAN_HF_GBP)) if (!(unparsed->vx_flags & VXLAN_HF_GBP))
goto out; goto out;
md->gbp = ntohs(gbp->policy_id); md->gbp = ntohs(gbp->policy_id);
tun_dst = (struct metadata_dst *)skb_dst(skb);
if (tun_dst) if (tun_dst)
tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
...@@ -1183,19 +1185,18 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, ...@@ -1183,19 +1185,18 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
if (gbp->policy_applied) if (gbp->policy_applied)
md->gbp |= VXLAN_GBP_POLICY_APPLIED; md->gbp |= VXLAN_GBP_POLICY_APPLIED;
/* In flow-based mode, GBP is carried in dst_metadata */
if (!(vxflags & VXLAN_F_COLLECT_METADATA))
skb->mark = md->gbp;
out: out:
unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
} }
static void vxlan_rcv(struct vxlan_dev *vxlan, struct vxlan_sock *vs, static bool vxlan_set_mac(struct vxlan_dev *vxlan,
struct sk_buff *skb, struct vxlan_metadata *md, struct vxlan_sock *vs,
struct metadata_dst *tun_dst) struct sk_buff *skb)
{ {
struct iphdr *oip = NULL;
struct ipv6hdr *oip6 = NULL;
struct pcpu_sw_netstats *stats;
union vxlan_addr saddr; union vxlan_addr saddr;
int err = 0;
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
skb->protocol = eth_type_trans(skb, vxlan->dev); skb->protocol = eth_type_trans(skb, vxlan->dev);
...@@ -1203,82 +1204,60 @@ static void vxlan_rcv(struct vxlan_dev *vxlan, struct vxlan_sock *vs, ...@@ -1203,82 +1204,60 @@ static void vxlan_rcv(struct vxlan_dev *vxlan, struct vxlan_sock *vs,
/* Ignore packet loops (and multicast echo) */ /* Ignore packet loops (and multicast echo) */
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
goto drop; return false;
/* Get data from the outer IP header */ /* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) { if (vxlan_get_sk_family(vs) == AF_INET) {
oip = ip_hdr(skb); saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
saddr.sin.sin_addr.s_addr = oip->saddr;
saddr.sa.sa_family = AF_INET; saddr.sa.sa_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
} else { } else {
oip6 = ipv6_hdr(skb); saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
saddr.sin6.sin6_addr = oip6->saddr;
saddr.sa.sa_family = AF_INET6; saddr.sa.sa_family = AF_INET6;
#endif #endif
} }
if (tun_dst) {
skb_dst_set(skb, (struct dst_entry *)tun_dst);
tun_dst = NULL;
}
if ((vxlan->flags & VXLAN_F_LEARN) && if ((vxlan->flags & VXLAN_F_LEARN) &&
vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
goto drop; return false;
skb_reset_network_header(skb);
/* In flow-based mode, GBP is carried in dst_metadata */
if (!(vs->flags & VXLAN_F_COLLECT_METADATA))
skb->mark = md->gbp;
if (oip6)
err = IP6_ECN_decapsulate(oip6, skb);
if (oip)
err = IP_ECN_decapsulate(oip, skb);
if (unlikely(err)) {
if (log_ecn_error) {
if (oip6)
net_info_ratelimited("non-ECT from %pI6\n",
&oip6->saddr);
if (oip)
net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
&oip->saddr, oip->tos);
}
if (err > 1) {
++vxlan->dev->stats.rx_frame_errors;
++vxlan->dev->stats.rx_errors;
goto drop;
}
}
stats = this_cpu_ptr(vxlan->dev->tstats); return true;
u64_stats_update_begin(&stats->syncp); }
stats->rx_packets++;
stats->rx_bytes += skb->len;
u64_stats_update_end(&stats->syncp);
gro_cells_receive(&vxlan->gro_cells, skb); static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
struct sk_buff *skb)
{
int err = 0;
return; if (vxlan_get_sk_family(vs) == AF_INET)
drop: err = IP_ECN_decapsulate(oiph, skb);
if (tun_dst) #if IS_ENABLED(CONFIG_IPV6)
dst_release((struct dst_entry *)tun_dst); else
err = IP6_ECN_decapsulate(oiph, skb);
#endif
/* Consume bad packet */ if (unlikely(err) && log_ecn_error) {
kfree_skb(skb); if (vxlan_get_sk_family(vs) == AF_INET)
net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
&((struct iphdr *)oiph)->saddr,
((struct iphdr *)oiph)->tos);
else
net_info_ratelimited("non-ECT from %pI6\n",
&((struct ipv6hdr *)oiph)->saddr);
}
return err <= 1;
} }
/* Callback from net/ipv4/udp.c to receive packets */ /* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{ {
struct metadata_dst *tun_dst = NULL; struct pcpu_sw_netstats *stats;
struct vxlan_dev *vxlan; struct vxlan_dev *vxlan;
struct vxlan_sock *vs; struct vxlan_sock *vs;
struct vxlanhdr unparsed; struct vxlanhdr unparsed;
struct vxlan_metadata _md; struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md; struct vxlan_metadata *md = &_md;
void *oiph;
/* Need Vxlan and inner Ethernet header to be present */ /* Need Vxlan and inner Ethernet header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN)) if (!pskb_may_pull(skb, VXLAN_HLEN))
...@@ -1310,6 +1289,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) ...@@ -1310,6 +1289,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (vxlan_collect_metadata(vs)) { if (vxlan_collect_metadata(vs)) {
__be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
struct metadata_dst *tun_dst;
tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
vxlan_vni_to_tun_id(vni), sizeof(*md)); vxlan_vni_to_tun_id(vni), sizeof(*md));
...@@ -1318,6 +1298,8 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) ...@@ -1318,6 +1298,8 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
goto drop; goto drop;
md = ip_tunnel_info_opts(&tun_dst->u.tun_info); md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
skb_dst_set(skb, (struct dst_entry *)tun_dst);
} else { } else {
memset(md, 0, sizeof(*md)); memset(md, 0, sizeof(*md));
} }
...@@ -1329,7 +1311,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) ...@@ -1329,7 +1311,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (!vxlan_remcsum(&unparsed, skb, vs->flags)) if (!vxlan_remcsum(&unparsed, skb, vs->flags))
goto drop; goto drop;
if (vs->flags & VXLAN_F_GBP) if (vs->flags & VXLAN_F_GBP)
vxlan_parse_gbp_hdr(&unparsed, md, tun_dst); vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
if (unparsed.vx_flags || unparsed.vx_vni) { if (unparsed.vx_flags || unparsed.vx_vni) {
/* If there are any unprocessed flags remaining treat /* If there are any unprocessed flags remaining treat
...@@ -1343,13 +1325,28 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) ...@@ -1343,13 +1325,28 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
goto drop; goto drop;
} }
vxlan_rcv(vxlan, vs, skb, md, tun_dst); if (!vxlan_set_mac(vxlan, vs, skb))
goto drop;
oiph = skb_network_header(skb);
skb_reset_network_header(skb);
if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
++vxlan->dev->stats.rx_frame_errors;
++vxlan->dev->stats.rx_errors;
goto drop;
}
stats = this_cpu_ptr(vxlan->dev->tstats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
stats->rx_bytes += skb->len;
u64_stats_update_end(&stats->syncp);
gro_cells_receive(&vxlan->gro_cells, skb);
return 0; return 0;
drop: drop:
if (tun_dst)
dst_release((struct dst_entry *)tun_dst);
/* Consume bad packet */ /* Consume bad packet */
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
...@@ -2648,7 +2645,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, ...@@ -2648,7 +2645,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
/* Mark socket as an encapsulation socket. */ /* Mark socket as an encapsulation socket. */
tunnel_cfg.sk_user_data = vs; tunnel_cfg.sk_user_data = vs;
tunnel_cfg.encap_type = 1; tunnel_cfg.encap_type = 1;
tunnel_cfg.encap_rcv = vxlan_udp_encap_recv; tunnel_cfg.encap_rcv = vxlan_rcv;
tunnel_cfg.encap_destroy = NULL; tunnel_cfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, sock, &tunnel_cfg); setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment