Commit a09a4c8d authored by Jesse Gross's avatar Jesse Gross Committed by David S. Miller

tunnels: Remove encapsulation offloads on decap.

If a packet is either locally encapsulated or processed through GRO
it is marked with the offloads that it requires. However, when it is
decapsulated these tunnel offload indications are not removed. This
means that if we receive an encapsulated TCP packet, aggregate it with
GRO, decapsulate, and retransmit the resulting frame on a NIC that does
not support encapsulation, we won't be able to take advantage of hardware
offloads even though it is just a simple TCP packet at this point.

This fixes the problem by stripping off encapsulation offload indications
when packets are decapsulated.

The performance impacts of this bug are significant. In a test where a
Geneve encapsulated TCP stream is sent to a hypervisor, GRO'ed, decapsulated,
and bridged to a VM performance is improved by 60% (5Gbps->8Gbps) as a
result of avoiding unnecessary segmentation at the VM tap interface.
Reported-by: default avatarRamu Ramamurthy <sramamur@linux.vnet.ibm.com>
Fixes: 68c33163 ("v4 GRE: Add TCP segmentation offload for GRE")
Signed-off-by: default avatarJesse Gross <jesse@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fac8e0f5
...@@ -305,6 +305,22 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, ...@@ -305,6 +305,22 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
static inline int iptunnel_pull_offloads(struct sk_buff *skb)
{
if (skb_is_gso(skb)) {
int err;
err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
return err;
skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
NETIF_F_GSO_SHIFT);
}
skb->encapsulation = 0;
return 0;
}
static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
{ {
if (pkt_len > 0) { if (pkt_len > 0) {
......
...@@ -48,7 +48,7 @@ static inline struct fou *fou_from_sock(struct sock *sk) ...@@ -48,7 +48,7 @@ static inline struct fou *fou_from_sock(struct sock *sk)
return sk->sk_user_data; return sk->sk_user_data;
} }
static void fou_recv_pull(struct sk_buff *skb, size_t len) static int fou_recv_pull(struct sk_buff *skb, size_t len)
{ {
struct iphdr *iph = ip_hdr(skb); struct iphdr *iph = ip_hdr(skb);
...@@ -59,6 +59,7 @@ static void fou_recv_pull(struct sk_buff *skb, size_t len) ...@@ -59,6 +59,7 @@ static void fou_recv_pull(struct sk_buff *skb, size_t len)
__skb_pull(skb, len); __skb_pull(skb, len);
skb_postpull_rcsum(skb, udp_hdr(skb), len); skb_postpull_rcsum(skb, udp_hdr(skb), len);
skb_reset_transport_header(skb); skb_reset_transport_header(skb);
return iptunnel_pull_offloads(skb);
} }
static int fou_udp_recv(struct sock *sk, struct sk_buff *skb) static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
...@@ -68,9 +69,14 @@ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb) ...@@ -68,9 +69,14 @@ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
if (!fou) if (!fou)
return 1; return 1;
fou_recv_pull(skb, sizeof(struct udphdr)); if (fou_recv_pull(skb, sizeof(struct udphdr)))
goto drop;
return -fou->protocol; return -fou->protocol;
drop:
kfree_skb(skb);
return 0;
} }
static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr, static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
...@@ -170,6 +176,9 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) ...@@ -170,6 +176,9 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
__skb_pull(skb, sizeof(struct udphdr) + hdrlen); __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
skb_reset_transport_header(skb); skb_reset_transport_header(skb);
if (iptunnel_pull_offloads(skb))
goto drop;
return -guehdr->proto_ctype; return -guehdr->proto_ctype;
drop: drop:
......
...@@ -114,7 +114,8 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto, ...@@ -114,7 +114,8 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto,
skb->vlan_tci = 0; skb->vlan_tci = 0;
skb_set_queue_mapping(skb, 0); skb_set_queue_mapping(skb, 0);
skb_scrub_packet(skb, xnet); skb_scrub_packet(skb, xnet);
return 0;
return iptunnel_pull_offloads(skb);
} }
EXPORT_SYMBOL_GPL(iptunnel_pull_header); EXPORT_SYMBOL_GPL(iptunnel_pull_header);
......
...@@ -681,14 +681,16 @@ static int ipip6_rcv(struct sk_buff *skb) ...@@ -681,14 +681,16 @@ static int ipip6_rcv(struct sk_buff *skb)
skb->mac_header = skb->network_header; skb->mac_header = skb->network_header;
skb_reset_network_header(skb); skb_reset_network_header(skb);
IPCB(skb)->flags = 0; IPCB(skb)->flags = 0;
skb->protocol = htons(ETH_P_IPV6); skb->dev = tunnel->dev;
if (packet_is_spoofed(skb, iph, tunnel)) { if (packet_is_spoofed(skb, iph, tunnel)) {
tunnel->dev->stats.rx_errors++; tunnel->dev->stats.rx_errors++;
goto out; goto out;
} }
__skb_tunnel_rx(skb, tunnel->dev, tunnel->net); if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6),
!net_eq(tunnel->net, dev_net(tunnel->dev))))
goto out;
err = IP_ECN_decapsulate(iph, skb); err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) { if (unlikely(err)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment