Commit e7e9956d authored by David S. Miller's avatar David S. Miller

Merge branch 'gso-checksums'

Alexander Duyck says:

====================
Add GSO support for outer checksum w/ inner checksum offloads

This patch series updates the existing segmentation offload code for
tunnels to make better use of existing and updated GSO checksum
computation.  This is done primarily through two mechanisms.  First we
maintain a separate checksum in the GSO context block of the sk_buff.  This
allows us to maintain two checksum values, one offloaded with values stored
in csum_start and csum_offset, and one computed and tracked in
SKB_GSO_CB(skb)->csum.  By maintaining these two values we are able to take
advantage of the same sort of math used in local checksum offload so that
we can provide both inner and outer checksums with minimal overhead.

Below is the performance for a netperf session between an ixgbe PF and VF
on the same host but in different namespaces.  As can be seen a significant
gain in performance can be had from allowing the use of Tx checksum offload
on the inner headers while performing a software offload on the outer
header computation:

 Recv   Send   Send                       Utilization  Service Demand
 Socket Socket Message Elapsed            Send  Recv   Send  Recv
 Size   Size   Size    Time    Throughput local remote local remote
 bytes  bytes  bytes   secs.   10^6bits/s % S   % U    us/KB us/KB

Before:
 87380  16384  16384   10.00   12844.38   9.30  -1.00  0.712 -1.00
After:
 87380  16384  16384   10.00   13216.63   6.78  -1.00  0.504 -1.000

Changes from v1:
* Dropped use of CHECKSUM_UNNECESSARY for remote checksum offload
* Left encap_hdr_csum as it will likely be needed in future for SCTP GSO
* Broke the changes out over many more patches
* Updated GRE segmentation to more closely match UDP tunnel segmentation
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a060679c f245d079
...@@ -2161,6 +2161,11 @@ static inline int skb_checksum_start_offset(const struct sk_buff *skb) ...@@ -2161,6 +2161,11 @@ static inline int skb_checksum_start_offset(const struct sk_buff *skb)
return skb->csum_start - skb_headroom(skb); return skb->csum_start - skb_headroom(skb);
} }
static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
{
return skb->head + skb->csum_start;
}
static inline int skb_transport_offset(const struct sk_buff *skb) static inline int skb_transport_offset(const struct sk_buff *skb)
{ {
return skb_transport_header(skb) - skb->data; return skb_transport_header(skb) - skb->data;
...@@ -3549,6 +3554,7 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb) ...@@ -3549,6 +3554,7 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
struct skb_gso_cb { struct skb_gso_cb {
int mac_offset; int mac_offset;
int encap_level; int encap_level;
__wsum csum;
__u16 csum_start; __u16 csum_start;
}; };
#define SKB_SGO_CB_OFFSET 32 #define SKB_SGO_CB_OFFSET 32
...@@ -3575,6 +3581,16 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) ...@@ -3575,6 +3581,16 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
return 0; return 0;
} }
static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
{
/* Do not update partial checksums if remote checksum is enabled. */
if (skb->remcsum_offload)
return;
SKB_GSO_CB(skb)->csum = res;
SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
}
/* Compute the checksum for a gso segment. First compute the checksum value /* Compute the checksum for a gso segment. First compute the checksum value
* from the start of transport header to SKB_GSO_CB(skb)->csum_start, and * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
* then add in skb->csum (checksum from csum_start to end of packet). * then add in skb->csum (checksum from csum_start to end of packet).
...@@ -3585,15 +3601,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) ...@@ -3585,15 +3601,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
*/ */
static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
{ {
int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - unsigned char *csum_start = skb_transport_header(skb);
skb_transport_offset(skb); int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
__wsum partial; __wsum partial = SKB_GSO_CB(skb)->csum;
partial = csum_partial(skb_transport_header(skb), plen, skb->csum); SKB_GSO_CB(skb)->csum = res;
skb->csum = res; SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
SKB_GSO_CB(skb)->csum_start -= plen;
return csum_fold(partial); return csum_fold(csum_partial(csum_start, plen, partial));
} }
static inline bool skb_is_gso(const struct sk_buff *skb) static inline bool skb_is_gso(const struct sk_buff *skb)
......
...@@ -3004,8 +3004,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, ...@@ -3004,8 +3004,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
if (unlikely(!proto)) if (unlikely(!proto))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
csum = !head_skb->encap_hdr_csum && csum = !!can_checksum_protocol(features, proto);
!!can_checksum_protocol(features, proto);
headroom = skb_headroom(head_skb); headroom = skb_headroom(head_skb);
pos = skb_headlen(head_skb); pos = skb_headlen(head_skb);
...@@ -3098,13 +3097,15 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, ...@@ -3098,13 +3097,15 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
if (nskb->len == len + doffset) if (nskb->len == len + doffset)
goto perform_csum_check; goto perform_csum_check;
if (!sg && !nskb->remcsum_offload) { if (!sg) {
nskb->ip_summed = CHECKSUM_NONE; if (!nskb->remcsum_offload)
nskb->csum = skb_copy_and_csum_bits(head_skb, offset, nskb->ip_summed = CHECKSUM_NONE;
skb_put(nskb, len), SKB_GSO_CB(nskb)->csum =
len, 0); skb_copy_and_csum_bits(head_skb, offset,
skb_put(nskb, len),
len, 0);
SKB_GSO_CB(nskb)->csum_start = SKB_GSO_CB(nskb)->csum_start =
skb_headroom(nskb) + doffset; skb_headroom(nskb) + doffset;
continue; continue;
} }
...@@ -3170,12 +3171,19 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, ...@@ -3170,12 +3171,19 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
nskb->truesize += nskb->data_len; nskb->truesize += nskb->data_len;
perform_csum_check: perform_csum_check:
if (!csum && !nskb->remcsum_offload) { if (!csum) {
nskb->csum = skb_checksum(nskb, doffset, if (skb_has_shared_frag(nskb)) {
nskb->len - doffset, 0); err = __skb_linearize(nskb);
nskb->ip_summed = CHECKSUM_NONE; if (err)
goto err;
}
if (!nskb->remcsum_offload)
nskb->ip_summed = CHECKSUM_NONE;
SKB_GSO_CB(nskb)->csum =
skb_checksum(nskb, doffset,
nskb->len - doffset, 0);
SKB_GSO_CB(nskb)->csum_start = SKB_GSO_CB(nskb)->csum_start =
skb_headroom(nskb) + doffset; skb_headroom(nskb) + doffset;
} }
} while ((offset += len) < head_skb->len); } while ((offset += len) < head_skb->len);
......
...@@ -18,15 +18,13 @@ ...@@ -18,15 +18,13 @@
static struct sk_buff *gre_gso_segment(struct sk_buff *skb, static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
struct sk_buff *segs = ERR_PTR(-EINVAL); struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t enc_features;
int ghl;
struct gre_base_hdr *greh;
u16 mac_offset = skb->mac_header; u16 mac_offset = skb->mac_header;
int mac_len = skb->mac_len;
__be16 protocol = skb->protocol; __be16 protocol = skb->protocol;
int tnl_hlen; u16 mac_len = skb->mac_len;
bool csum; int gre_offset, outer_hlen;
bool need_csum;
if (unlikely(skb_shinfo(skb)->gso_type & if (unlikely(skb_shinfo(skb)->gso_type &
~(SKB_GSO_TCPV4 | ~(SKB_GSO_TCPV4 |
...@@ -43,74 +41,59 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, ...@@ -43,74 +41,59 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
if (!skb->encapsulation) if (!skb->encapsulation)
goto out; goto out;
if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr)))
goto out; goto out;
greh = (struct gre_base_hdr *)skb_transport_header(skb); if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
ghl = skb_inner_mac_header(skb) - skb_transport_header(skb);
if (unlikely(ghl < sizeof(*greh)))
goto out; goto out;
csum = !!(greh->flags & GRE_CSUM);
if (csum)
skb->encap_hdr_csum = 1;
/* setup inner skb. */ /* setup inner skb. */
skb->protocol = greh->protocol;
skb->encapsulation = 0; skb->encapsulation = 0;
__skb_pull(skb, tnl_hlen);
if (unlikely(!pskb_may_pull(skb, ghl)))
goto out;
__skb_pull(skb, ghl);
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
skb_set_network_header(skb, skb_inner_network_offset(skb)); skb_set_network_header(skb, skb_inner_network_offset(skb));
skb->mac_len = skb_inner_network_offset(skb); skb->mac_len = skb_inner_network_offset(skb);
skb->protocol = skb->inner_protocol;
need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
skb->encap_hdr_csum = need_csum;
features &= skb->dev->hw_enc_features;
/* segment inner packet. */ /* segment inner packet. */
enc_features = skb->dev->hw_enc_features & features; segs = skb_mac_gso_segment(skb, features);
segs = skb_mac_gso_segment(skb, enc_features);
if (IS_ERR_OR_NULL(segs)) { if (IS_ERR_OR_NULL(segs)) {
skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
mac_len);
goto out; goto out;
} }
outer_hlen = skb_tnl_header_len(skb);
gre_offset = outer_hlen - tnl_hlen;
skb = segs; skb = segs;
tnl_hlen = skb_tnl_header_len(skb);
do { do {
__skb_push(skb, ghl); struct gre_base_hdr *greh;
if (csum) { __be32 *pcsum;
__be32 *pcsum;
if (skb_has_shared_frag(skb)) {
int err;
err = __skb_linearize(skb);
if (err) {
kfree_skb_list(segs);
segs = ERR_PTR(err);
goto out;
}
}
skb_reset_transport_header(skb);
greh = (struct gre_base_hdr *)
skb_transport_header(skb);
pcsum = (__be32 *)(greh + 1);
*pcsum = 0;
*(__sum16 *)pcsum = gso_make_checksum(skb, 0);
}
__skb_push(skb, tnl_hlen - ghl);
skb_reset_inner_headers(skb); skb_reset_inner_headers(skb);
skb->encapsulation = 1; skb->encapsulation = 1;
skb_reset_mac_header(skb);
skb_set_network_header(skb, mac_len);
skb->mac_len = mac_len; skb->mac_len = mac_len;
skb->protocol = protocol; skb->protocol = protocol;
__skb_push(skb, outer_hlen);
skb_reset_mac_header(skb);
skb_set_network_header(skb, mac_len);
skb_set_transport_header(skb, gre_offset);
if (!need_csum)
continue;
greh = (struct gre_base_hdr *)skb_transport_header(skb);
pcsum = (__be32 *)(greh + 1);
*pcsum = 0;
*(__sum16 *)pcsum = gso_make_checksum(skb, 0);
} while ((skb = skb->next)); } while ((skb = skb->next));
out: out:
return segs; return segs;
......
...@@ -135,7 +135,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, ...@@ -135,7 +135,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
th->fin = th->psh = 0; th->fin = th->psh = 0;
th->check = newcheck; th->check = newcheck;
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
gso_reset_checksum(skb, ~th->check);
else
th->check = gso_make_checksum(skb, ~th->check); th->check = gso_make_checksum(skb, ~th->check);
seq += mss; seq += mss;
...@@ -169,7 +171,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, ...@@ -169,7 +171,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
skb->data_len); skb->data_len);
th->check = ~csum_fold((__force __wsum)((__force u32)th->check + th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
(__force u32)delta)); (__force u32)delta));
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
gso_reset_checksum(skb, ~th->check);
else
th->check = gso_make_checksum(skb, ~th->check); th->check = gso_make_checksum(skb, ~th->check);
out: out:
return segs; return segs;
......
...@@ -32,42 +32,56 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, ...@@ -32,42 +32,56 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features), netdev_features_t features),
__be16 new_protocol, bool is_ipv6) __be16 new_protocol, bool is_ipv6)
{ {
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
struct sk_buff *segs = ERR_PTR(-EINVAL); struct sk_buff *segs = ERR_PTR(-EINVAL);
bool remcsum, need_csum, offload_csum;
struct udphdr *uh = udp_hdr(skb);
u16 mac_offset = skb->mac_header; u16 mac_offset = skb->mac_header;
int mac_len = skb->mac_len;
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
__be16 protocol = skb->protocol; __be16 protocol = skb->protocol;
netdev_features_t enc_features; u16 mac_len = skb->mac_len;
int udp_offset, outer_hlen; int udp_offset, outer_hlen;
unsigned int oldlen; u32 partial;
bool need_csum = !!(skb_shinfo(skb)->gso_type &
SKB_GSO_UDP_TUNNEL_CSUM);
bool remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
bool offload_csum = false, dont_encap = (need_csum || remcsum);
oldlen = (u16)~skb->len;
if (unlikely(!pskb_may_pull(skb, tnl_hlen))) if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
goto out; goto out;
/* adjust partial header checksum to negate old length */
partial = (__force u32)uh->check + (__force u16)~uh->len;
/* setup inner skb. */
skb->encapsulation = 0; skb->encapsulation = 0;
__skb_pull(skb, tnl_hlen); __skb_pull(skb, tnl_hlen);
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
skb_set_network_header(skb, skb_inner_network_offset(skb)); skb_set_network_header(skb, skb_inner_network_offset(skb));
skb->mac_len = skb_inner_network_offset(skb); skb->mac_len = skb_inner_network_offset(skb);
skb->protocol = new_protocol; skb->protocol = new_protocol;
need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
skb->encap_hdr_csum = need_csum; skb->encap_hdr_csum = need_csum;
remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
skb->remcsum_offload = remcsum; skb->remcsum_offload = remcsum;
/* Try to offload checksum if possible */ /* Try to offload checksum if possible */
offload_csum = !!(need_csum && offload_csum = !!(need_csum &&
((skb->dev->features & NETIF_F_HW_CSUM) || (skb->dev->features &
(skb->dev->features & (is_ipv6 ? (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) :
NETIF_F_IPV6_CSUM : NETIF_F_IP_CSUM)))); (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
features &= skb->dev->hw_enc_features;
/* The only checksum offload we care about from here on out is the
* outer one so strip the existing checksum feature flags and
* instead set the flag based on our outer checksum offload value.
*/
if (remcsum) {
features &= ~NETIF_F_CSUM_MASK;
if (offload_csum)
features |= NETIF_F_HW_CSUM;
}
/* segment inner packet. */ /* segment inner packet. */
enc_features = skb->dev->hw_enc_features & features; segs = gso_inner_segment(skb, features);
segs = gso_inner_segment(skb, enc_features);
if (IS_ERR_OR_NULL(segs)) { if (IS_ERR_OR_NULL(segs)) {
skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
mac_len); mac_len);
...@@ -78,17 +92,13 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, ...@@ -78,17 +92,13 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
udp_offset = outer_hlen - tnl_hlen; udp_offset = outer_hlen - tnl_hlen;
skb = segs; skb = segs;
do { do {
struct udphdr *uh; __be16 len;
int len;
__be32 delta;
if (dont_encap) { if (remcsum)
skb->encapsulation = 0;
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
} else {
/* Only set up inner headers if we might be offloading /* Set up inner headers if we are offloading inner checksum */
* inner checksum. if (skb->ip_summed == CHECKSUM_PARTIAL) {
*/
skb_reset_inner_headers(skb); skb_reset_inner_headers(skb);
skb->encapsulation = 1; skb->encapsulation = 1;
} }
...@@ -96,43 +106,28 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, ...@@ -96,43 +106,28 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
skb->mac_len = mac_len; skb->mac_len = mac_len;
skb->protocol = protocol; skb->protocol = protocol;
skb_push(skb, outer_hlen); __skb_push(skb, outer_hlen);
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
skb_set_network_header(skb, mac_len); skb_set_network_header(skb, mac_len);
skb_set_transport_header(skb, udp_offset); skb_set_transport_header(skb, udp_offset);
len = skb->len - udp_offset; len = htons(skb->len - udp_offset);
uh = udp_hdr(skb); uh = udp_hdr(skb);
uh->len = htons(len); uh->len = len;
if (!need_csum) if (!need_csum)
continue; continue;
delta = htonl(oldlen + len);
uh->check = ~csum_fold((__force __wsum) uh->check = ~csum_fold((__force __wsum)
((__force u32)uh->check + ((__force u32)len + partial));
(__force u32)delta));
if (offload_csum) {
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
} else if (remcsum) {
/* Need to calculate checksum from scratch,
* inner checksums are never when doing
* remote_checksum_offload.
*/
skb->csum = skb_checksum(skb, udp_offset,
skb->len - udp_offset,
0);
uh->check = csum_fold(skb->csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
} else {
uh->check = gso_make_checksum(skb, ~uh->check);
if (skb->encapsulation || !offload_csum) {
uh->check = gso_make_checksum(skb, ~uh->check);
if (uh->check == 0) if (uh->check == 0)
uh->check = CSUM_MANGLED_0; uh->check = CSUM_MANGLED_0;
} else {
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
} }
} while ((skb = skb->next)); } while ((skb = skb->next));
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment