Commit 3a80e1fa authored by Alexander Duyck's avatar Alexander Duyck Committed by David S. Miller

ip6gre: Add support for GSO

This patch adds code borrowed from bits and pieces of other protocols to
the IPv6 GRE path so that we can support GSO over IPv6 based GRE tunnels.
By adding this support we are able to significantly improve the throughput
for GRE tunnels as we are able to make use of GSO.
Signed-off-by: default avatarAlexander Duyck <aduyck@mirantis.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e0c20967
...@@ -621,7 +621,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, ...@@ -621,7 +621,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
struct net *net = tunnel->net; struct net *net = tunnel->net;
struct net_device *tdev; /* Device to other host */ struct net_device *tdev; /* Device to other host */
struct ipv6hdr *ipv6h; /* Our new IP header */ struct ipv6hdr *ipv6h; /* Our new IP header */
unsigned int max_headroom = 0; /* The extra header space needed */ unsigned int min_headroom = 0; /* The extra header space needed */
int gre_hlen; int gre_hlen;
struct ipv6_tel_txoption opt; struct ipv6_tel_txoption opt;
int mtu; int mtu;
...@@ -629,7 +629,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, ...@@ -629,7 +629,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
struct net_device_stats *stats = &tunnel->dev->stats; struct net_device_stats *stats = &tunnel->dev->stats;
int err = -1; int err = -1;
u8 proto; u8 proto;
struct sk_buff *new_skb;
__be16 protocol; __be16 protocol;
if (dev->type == ARPHRD_ETHER) if (dev->type == ARPHRD_ETHER)
...@@ -672,14 +671,14 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, ...@@ -672,14 +671,14 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
mtu = dst_mtu(dst) - sizeof(*ipv6h); mtu = dst_mtu(dst) - sizeof(*ipv6h);
if (encap_limit >= 0) { if (encap_limit >= 0) {
max_headroom += 8; min_headroom += 8;
mtu -= 8; mtu -= 8;
} }
if (mtu < IPV6_MIN_MTU) if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU; mtu = IPV6_MIN_MTU;
if (skb_dst(skb)) if (skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len > mtu) { if (skb->len > mtu && !skb_is_gso(skb)) {
*pmtu = mtu; *pmtu = mtu;
err = -EMSGSIZE; err = -EMSGSIZE;
goto tx_err_dst_release; goto tx_err_dst_release;
...@@ -697,20 +696,19 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, ...@@ -697,20 +696,19 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; min_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
if (skb_headroom(skb) < max_headroom || skb_shared(skb) || if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) { int head_delta = SKB_DATA_ALIGN(min_headroom -
new_skb = skb_realloc_headroom(skb, max_headroom); skb_headroom(skb) +
if (max_headroom > dev->needed_headroom) 16);
dev->needed_headroom = max_headroom;
if (!new_skb)
goto tx_err_dst_release;
if (skb->sk) err = pskb_expand_head(skb, max_t(int, head_delta, 0),
skb_set_owner_w(new_skb, skb->sk); 0, GFP_ATOMIC);
consume_skb(skb); if (min_headroom > dev->needed_headroom)
skb = new_skb; dev->needed_headroom = min_headroom;
if (unlikely(err))
goto tx_err_dst_release;
} }
if (!fl6->flowi6_mark && ndst) if (!fl6->flowi6_mark && ndst)
...@@ -723,10 +721,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, ...@@ -723,10 +721,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
} }
if (likely(!skb->encapsulation)) { err = iptunnel_handle_offloads(skb,
skb_reset_inner_headers(skb); (tunnel->parms.o_flags & GRE_CSUM) ?
skb->encapsulation = 1; SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
} if (err)
goto tx_err_dst_release;
skb_push(skb, gre_hlen); skb_push(skb, gre_hlen);
skb_reset_network_header(skb); skb_reset_network_header(skb);
...@@ -760,7 +759,9 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, ...@@ -760,7 +759,9 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
*ptr = tunnel->parms.o_key; *ptr = tunnel->parms.o_key;
ptr--; ptr--;
} }
if (tunnel->parms.o_flags&GRE_CSUM) { if ((tunnel->parms.o_flags & GRE_CSUM) &&
!(skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
*ptr = 0; *ptr = 0;
*(__sum16 *)ptr = gre6_checksum(skb); *(__sum16 *)ptr = gre6_checksum(skb);
} }
...@@ -1559,9 +1560,18 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev, ...@@ -1559,9 +1560,18 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
dev->features |= GRE6_FEATURES; dev->features |= GRE6_FEATURES;
dev->hw_features |= GRE6_FEATURES; dev->hw_features |= GRE6_FEATURES;
/* Can use a lockless transmit, unless we generate output sequences */ if (!(nt->parms.o_flags & GRE_SEQ)) {
if (!(nt->parms.o_flags & GRE_SEQ)) /* TCP segmentation offload is not supported when we
* generate output sequences.
*/
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
/* Can use a lockless transmit, unless we generate
* output sequences
*/
dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_LLTX;
}
err = register_netdevice(dev); err = register_netdevice(dev);
if (err) if (err)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment