Commit f22f5f60 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-minor-cleanups-and-optimizations'

Alexander Duyck says:

====================
A couple of minor clean-ups and optimizations

This patch series is basically just a v2 of a couple patches I recently
submitted.

The two patches aren't technically related but there are just items I found
while cleaning up and prepping some further work to enable Tx checksums for
tunnels.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3a8befcd 33803963
......@@ -463,8 +463,6 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
goto out;
}
flush = 0;
for (p = *head; p; p = p->next) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
......@@ -481,14 +479,13 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
rcu_read_lock();
ptype = gro_find_receive_by_type(type);
if (!ptype) {
flush = 1;
if (!ptype)
goto out_unlock;
}
skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len);
pp = ptype->callbacks.gro_receive(head, skb);
flush = 0;
out_unlock:
rcu_read_unlock();
......
......@@ -591,8 +591,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
flush = 0;
for (p = *head; p; p = p->next) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
......@@ -606,6 +604,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
}
pp = eth_gro_receive(head, skb);
flush = 0;
out:
skb_gro_remcsum_cleanup(skb, &grc);
......
......@@ -88,8 +88,11 @@ static inline __wsum
csum_block_add(__wsum csum, __wsum csum2, int offset)
{
u32 sum = (__force u32)csum2;
if (offset&1)
sum = ((sum&0xFF00FF)<<8)+((sum>>8)&0xFF00FF);
/* rotate sum to align it with a 16b boundary */
if (offset & 1)
sum = ror32(sum, 8);
return csum_add(csum, (__force __wsum)sum);
}
......@@ -102,10 +105,7 @@ csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
static inline __wsum
csum_block_sub(__wsum csum, __wsum csum2, int offset)
{
u32 sum = (__force u32)csum2;
if (offset&1)
sum = ((sum&0xFF00FF)<<8)+((sum>>8)&0xFF00FF);
return csum_sub(csum, (__force __wsum)sum);
return csum_block_add(csum, ~csum2, offset);
}
static inline __wsum csum_unfold(__sum16 n)
......
......@@ -319,8 +319,6 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, hdrlen);
flush = 0;
for (p = *head; p; p = p->next) {
const struct guehdr *guehdr2;
......@@ -352,6 +350,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
goto out_unlock;
pp = ops->callbacks.gro_receive(head, skb);
flush = 0;
out_unlock:
rcu_read_unlock();
......
......@@ -175,8 +175,6 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
null_compute_pseudo);
}
flush = 0;
for (p = *head; p; p = p->next) {
const struct gre_base_hdr *greh2;
......@@ -213,6 +211,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
skb_gro_postpull_rcsum(skb, greh, grehlen);
pp = ptype->callbacks.gro_receive(head, skb);
flush = 0;
out_unlock:
rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment