Commit 662880f4 authored by Tom Herbert's avatar Tom Herbert Committed by David S. Miller

net: Allow GRO to use and set levels of checksum unnecessary

Allow GRO path to "consume" checksums provided in CHECKSUM_UNNECESSARY
and to report new checksums verfied for use in fallback to normal
path.

Change GRO checksum path to track csum_level using a csum_cnt field
in NAPI_GRO_CB. On GRO initialization, if ip_summed is
CHECKSUM_UNNECESSARY set NAPI_GRO_CB(skb)->csum_cnt to
skb->csum_level + 1. For each checksum verified, decrement
NAPI_GRO_CB(skb)->csum_cnt while its greater than zero. If a checksum
is verfied and NAPI_GRO_CB(skb)->csum_cnt == 0, we have verified a
deeper checksum than originally indicated in skbuf so increment
csum_level (or initialize to CHECKSUM_UNNECESSARY if ip_summed is
CHECKSUM_NONE or CHECKSUM_COMPLETE).
Signed-off-by: default avatarTom Herbert <therbert@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 77cffe23
...@@ -1883,8 +1883,8 @@ struct napi_gro_cb { ...@@ -1883,8 +1883,8 @@ struct napi_gro_cb {
/* GRO checksum is valid */ /* GRO checksum is valid */
u8 csum_valid:1; u8 csum_valid:1;
/* Number encapsulation layers crossed */ /* Number of checksums via CHECKSUM_UNNECESSARY */
u8 encapsulation; u8 csum_cnt:3;
/* used to support CHECKSUM_COMPLETE for tunneling protocols */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum; __wsum csum;
...@@ -2179,8 +2179,7 @@ static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, ...@@ -2179,8 +2179,7 @@ static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
__sum16 check) __sum16 check)
{ {
return (skb->ip_summed != CHECKSUM_PARTIAL && return (skb->ip_summed != CHECKSUM_PARTIAL &&
(skb->ip_summed != CHECKSUM_UNNECESSARY || NAPI_GRO_CB(skb)->csum_cnt == 0 &&
(NAPI_GRO_CB(skb)->encapsulation > skb->encapsulation)) &&
(!zero_okay || check)); (!zero_okay || check));
} }
...@@ -2196,18 +2195,17 @@ static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb, ...@@ -2196,18 +2195,17 @@ static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
return __skb_gro_checksum_complete(skb); return __skb_gro_checksum_complete(skb);
} }
/* Update skb for CHECKSUM_UNNECESSARY when we verified a top level
* checksum or an encapsulated one during GRO. This saves work
* if we fallback to normal path with the packet.
*/
static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
{ {
if (skb->ip_summed == CHECKSUM_UNNECESSARY) { if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
if (NAPI_GRO_CB(skb)->encapsulation) /* Consume a checksum from CHECKSUM_UNNECESSARY */
skb->encapsulation = 1; NAPI_GRO_CB(skb)->csum_cnt--;
} else if (skb->ip_summed != CHECKSUM_PARTIAL) { } else {
skb->ip_summed = CHECKSUM_UNNECESSARY; /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
skb->encapsulation = 0; * verified a new top level checksum or an encapsulated one
* during GRO. This saves work if we fallback to normal path.
*/
__skb_incr_checksum_unnecessary(skb);
} }
} }
......
...@@ -3962,13 +3962,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff ...@@ -3962,13 +3962,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
gro_list_prepare(napi, skb); gro_list_prepare(napi, skb);
if (skb->ip_summed == CHECKSUM_COMPLETE) {
NAPI_GRO_CB(skb)->csum = skb->csum;
NAPI_GRO_CB(skb)->csum_valid = 1;
} else {
NAPI_GRO_CB(skb)->csum_valid = 0;
}
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) { list_for_each_entry_rcu(ptype, head, list) {
if (ptype->type != type || !ptype->callbacks.gro_receive) if (ptype->type != type || !ptype->callbacks.gro_receive)
...@@ -3980,7 +3973,22 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff ...@@ -3980,7 +3973,22 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0; NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->udp_mark = 0; NAPI_GRO_CB(skb)->udp_mark = 0;
NAPI_GRO_CB(skb)->encapsulation = 0;
/* Setup for GRO checksum validation */
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
NAPI_GRO_CB(skb)->csum = skb->csum;
NAPI_GRO_CB(skb)->csum_valid = 1;
NAPI_GRO_CB(skb)->csum_cnt = 0;
break;
case CHECKSUM_UNNECESSARY:
NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
NAPI_GRO_CB(skb)->csum_valid = 0;
break;
default:
NAPI_GRO_CB(skb)->csum_cnt = 0;
NAPI_GRO_CB(skb)->csum_valid = 0;
}
pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
break; break;
......
...@@ -172,12 +172,9 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, ...@@ -172,12 +172,9 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
} }
/* Don't bother verifying checksum if we're going to flush anyway. */ /* Don't bother verifying checksum if we're going to flush anyway. */
if (greh->flags & GRE_CSUM) { if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush &&
if (!NAPI_GRO_CB(skb)->flush && skb_gro_checksum_simple_validate(skb))
skb_gro_checksum_simple_validate(skb))
goto out_unlock; goto out_unlock;
NAPI_GRO_CB(skb)->encapsulation++;
}
flush = 0; flush = 0;
......
...@@ -238,12 +238,13 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, ...@@ -238,12 +238,13 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
int flush = 1; int flush = 1;
if (NAPI_GRO_CB(skb)->udp_mark || if (NAPI_GRO_CB(skb)->udp_mark ||
(!skb->encapsulation && !NAPI_GRO_CB(skb)->csum_valid)) (skb->ip_summed != CHECKSUM_PARTIAL &&
NAPI_GRO_CB(skb)->csum_cnt == 0 &&
!NAPI_GRO_CB(skb)->csum_valid))
goto out; goto out;
/* mark that this skb passed once through the udp gro layer */ /* mark that this skb passed once through the udp gro layer */
NAPI_GRO_CB(skb)->udp_mark = 1; NAPI_GRO_CB(skb)->udp_mark = 1;
NAPI_GRO_CB(skb)->encapsulation++;
rcu_read_lock(); rcu_read_lock();
uo_priv = rcu_dereference(udp_offload_base); uo_priv = rcu_dereference(udp_offload_base);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment