Commit 5453205c authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

i40e/i40evf: Enable support for SKB_GSO_UDP_TUNNEL_CSUM

The XL722 has support for providing the outer UDP tunnel checksum on
transmits.  Make use of this feature to support segmenting UDP tunnels with
outer checksums enabled.
Signed-off-by: default avatarAlexander Duyck <aduyck@mirantis.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent fad57330
......@@ -2272,6 +2272,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
} ip;
union {
struct tcphdr *tcp;
struct udphdr *udp;
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
......@@ -2298,7 +2299,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
ip.v6->payload_len = 0;
}
if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE)) {
if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
SKB_GSO_UDP_TUNNEL_CSUM)) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
/* determine offset of outer transport header */
l4_offset = l4.hdr - skb->data;
/* remove payload length from outer checksum */
paylen = (__force u16)l4.udp->check;
paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
l4.udp->check = ~csum_fold((__force __wsum)paylen);
}
/* reset pointers to inner headers */
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
......@@ -2460,6 +2472,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
/* indicate if we need to offload outer UDP header */
if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
/* record tunnel offload values */
*cd_tunneling |= tunnel;
......
......@@ -1532,6 +1532,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
} ip;
union {
struct tcphdr *tcp;
struct udphdr *udp;
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
......@@ -1558,7 +1559,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
ip.v6->payload_len = 0;
}
if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE)) {
if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
SKB_GSO_UDP_TUNNEL_CSUM)) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
/* determine offset of outer transport header */
l4_offset = l4.hdr - skb->data;
/* remove payload length from outer checksum */
paylen = (__force u16)l4.udp->check;
paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
l4.udp->check = ~csum_fold((__force __wsum)paylen);
}
/* reset pointers to inner headers */
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
......@@ -1678,6 +1690,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
/* indicate if we need to offload outer UDP header */
if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
/* record tunnel offload values */
*cd_tunneling |= tunnel;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment