Commit f3591fd4 authored by David S. Miller's avatar David S. Miller

Merge branch 'inet_csums'

Tom Herbert says:

====================
net: Checksum offload changes - Part IV

I am working on overhauling RX checksum offload. Goals of this effort
are:

- Specify what exactly it means when driver returns CHECKSUM_UNNECESSARY
- Preserve CHECKSUM_COMPLETE through encapsulation layers
- Don't do skb_checksum more than once per packet
- Unify GRO and non-GRO csum verification as much as possible
- Unify the checksum functions (checksum_init)
- Simply code

What is in this fourth patch set:

- Preserve CHECKSUM_COMPLETE instead of changing it to
  CHECKSUM_UNNECESSARY. This allows correct reuse in validating multiple
  csums in a packet.
- When SW needs to compute the packet checksum, save it as
  CHECKSUM_COMPLETE. Also mark that checksum was compute by SW.
- Add skb_gro_postpull_rcsum to udp and vxlan to make GRO work with
  CHECKSUM_COMPLETE.

v2: Removed patch setting skb_encapsulation when validating checksum
    in tcp_gro_receive

Please review carefully and test if possible, mucking with basic
checksum functions is always a little precarious :-)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1054cc15 6bae1d4c
...@@ -565,6 +565,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff ...@@ -565,6 +565,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
goto out; goto out;
} }
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
off_eth = skb_gro_offset(skb); off_eth = skb_gro_offset(skb);
hlen = off_eth + sizeof(*eh); hlen = off_eth + sizeof(*eh);
...@@ -599,6 +600,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff ...@@ -599,6 +600,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
} }
skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */ skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
pp = ptype->callbacks.gro_receive(head, skb); pp = ptype->callbacks.gro_receive(head, skb);
out_unlock: out_unlock:
......
...@@ -572,7 +572,9 @@ struct sk_buff { ...@@ -572,7 +572,9 @@ struct sk_buff {
*/ */
__u8 encapsulation:1; __u8 encapsulation:1;
__u8 encap_hdr_csum:1; __u8 encap_hdr_csum:1;
/* 5/7 bit hole (depending on ndisc_nodetype presence) */ __u8 csum_valid:1;
__u8 csum_complete_sw:1;
/* 3/5 bit hole (depending on ndisc_nodetype presence) */
kmemcheck_bitfield_end(flags2); kmemcheck_bitfield_end(flags2);
#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
...@@ -2735,7 +2737,7 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb); ...@@ -2735,7 +2737,7 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb);
static inline int skb_csum_unnecessary(const struct sk_buff *skb) static inline int skb_csum_unnecessary(const struct sk_buff *skb)
{ {
return skb->ip_summed & CHECKSUM_UNNECESSARY; return ((skb->ip_summed & CHECKSUM_UNNECESSARY) || skb->csum_valid);
} }
/** /**
...@@ -2769,10 +2771,8 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, ...@@ -2769,10 +2771,8 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
bool zero_okay, bool zero_okay,
__sum16 check) __sum16 check)
{ {
if (skb_csum_unnecessary(skb)) { if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
return false; skb->csum_valid = 1;
} else if (zero_okay && !check) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return false; return false;
} }
...@@ -2799,15 +2799,20 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, ...@@ -2799,15 +2799,20 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
{ {
if (skb->ip_summed == CHECKSUM_COMPLETE) { if (skb->ip_summed == CHECKSUM_COMPLETE) {
if (!csum_fold(csum_add(psum, skb->csum))) { if (!csum_fold(csum_add(psum, skb->csum))) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_valid = 1;
return 0; return 0;
} }
} }
skb->csum = psum; skb->csum = psum;
if (complete || skb->len <= CHECKSUM_BREAK) if (complete || skb->len <= CHECKSUM_BREAK) {
return __skb_checksum_complete(skb); __sum16 csum;
csum = __skb_checksum_complete(skb);
skb->csum_valid = !csum;
return csum;
}
return 0; return 0;
} }
...@@ -2831,6 +2836,7 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) ...@@ -2831,6 +2836,7 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
zero_okay, check, compute_pseudo) \ zero_okay, check, compute_pseudo) \
({ \ ({ \
__sum16 __ret = 0; \ __sum16 __ret = 0; \
skb->csum_valid = 0; \
if (__skb_checksum_validate_needed(skb, zero_okay, check)) \ if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
__ret = __skb_checksum_validate_complete(skb, \ __ret = __skb_checksum_validate_complete(skb, \
complete, compute_pseudo(skb, proto)); \ complete, compute_pseudo(skb, proto)); \
......
...@@ -739,11 +739,15 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) ...@@ -739,11 +739,15 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
__sum16 sum; __sum16 sum;
sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
if (likely(!sum)) { if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !sum &&
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) !skb->csum_complete_sw)
netdev_rx_csum_fault(skb->dev); netdev_rx_csum_fault(skb->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
} /* Save checksum complete for later use */
skb->csum = sum;
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum_complete_sw = 1;
return sum; return sum;
} }
EXPORT_SYMBOL(__skb_checksum_complete_head); EXPORT_SYMBOL(__skb_checksum_complete_head);
......
...@@ -131,10 +131,12 @@ static __sum16 gro_skb_checksum(struct sk_buff *skb) ...@@ -131,10 +131,12 @@ static __sum16 gro_skb_checksum(struct sk_buff *skb)
csum_partial(skb->data, skb_gro_offset(skb), 0)); csum_partial(skb->data, skb_gro_offset(skb), 0));
sum = csum_fold(NAPI_GRO_CB(skb)->csum); sum = csum_fold(NAPI_GRO_CB(skb)->csum);
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) { if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) {
if (unlikely(!sum)) if (unlikely(!sum) && !skb->csum_complete_sw)
netdev_rx_csum_fault(skb->dev); netdev_rx_csum_fault(skb->dev);
} else } else {
skb->ip_summed = CHECKSUM_COMPLETE; skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum_complete_sw = 1;
}
return sum; return sum;
} }
......
...@@ -200,6 +200,7 @@ static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *s ...@@ -200,6 +200,7 @@ static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *s
} }
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
pp = uo_priv->offload->callbacks.gro_receive(head, skb); pp = uo_priv->offload->callbacks.gro_receive(head, skb);
out_unlock: out_unlock:
......
...@@ -173,7 +173,8 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) ...@@ -173,7 +173,8 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
return -1; return -1;
if (csum_fold(desc.csum)) if (csum_fold(desc.csum))
return -1; return -1;
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
!skb->csum_complete_sw)
netdev_rx_csum_fault(skb->dev); netdev_rx_csum_fault(skb->dev);
return 0; return 0;
no_checksum: no_checksum:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment