Commit eae3f88e authored by David S. Miller's avatar David S. Miller

net: Separate out SKB validation logic from transmit path.

dev_hard_start_xmit() does two things, it first validates and
canonicalizes the SKB, then it actually sends it.

Make a set of helper functions for doing the first part.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 95f6b3dd
...@@ -2644,32 +2644,35 @@ static struct sk_buff *xmit_list(struct sk_buff *first, struct net_device *dev, ...@@ -2644,32 +2644,35 @@ static struct sk_buff *xmit_list(struct sk_buff *first, struct net_device *dev,
return skb; return skb;
} }
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, netdev_features_t features)
struct netdev_queue *txq)
{ {
int rc = NETDEV_TX_OK; if (vlan_tx_tag_present(skb) &&
!vlan_hw_offload_capable(features, skb->vlan_proto)) {
skb = __vlan_put_tag(skb, skb->vlan_proto,
vlan_tx_tag_get(skb));
if (skb)
skb->vlan_tci = 0;
}
return skb;
}
if (likely(!skb->next)) { static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
{
netdev_features_t features; netdev_features_t features;
/* if (skb->next)
* If device doesn't need skb->dst, release it right now while return skb;
/* If device doesn't need skb->dst, release it right now while
* its hot in this cpu cache * its hot in this cpu cache
*/ */
if (dev->priv_flags & IFF_XMIT_DST_RELEASE) if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(skb); skb_dst_drop(skb);
features = netif_skb_features(skb); features = netif_skb_features(skb);
skb = validate_xmit_vlan(skb, features);
if (vlan_tx_tag_present(skb) &&
!vlan_hw_offload_capable(features, skb->vlan_proto)) {
skb = __vlan_put_tag(skb, skb->vlan_proto,
vlan_tx_tag_get(skb));
if (unlikely(!skb)) if (unlikely(!skb))
goto out; goto out_null;
skb->vlan_tci = 0;
}
/* If encapsulation offload request, verify we are testing /* If encapsulation offload request, verify we are testing
* hardware encapsulation features instead of standard * hardware encapsulation features instead of standard
...@@ -2681,8 +2684,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -2681,8 +2684,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
if (netif_needs_gso(skb, features)) { if (netif_needs_gso(skb, features)) {
if (unlikely(dev_gso_segment(skb, features))) if (unlikely(dev_gso_segment(skb, features)))
goto out_kfree_skb; goto out_kfree_skb;
if (skb->next)
goto gso;
} else { } else {
if (skb_needs_linearize(skb, features) && if (skb_needs_linearize(skb, features) &&
__skb_linearize(skb)) __skb_linearize(skb))
...@@ -2705,19 +2706,35 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -2705,19 +2706,35 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
} }
} }
return skb;
out_kfree_skb:
kfree_skb(skb);
out_null:
return NULL;
}
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
int rc = NETDEV_TX_OK;
skb = validate_xmit_skb(skb, dev);
if (!skb)
return rc;
if (likely(!skb->next))
return xmit_one(skb, dev, txq, false); return xmit_one(skb, dev, txq, false);
}
gso:
skb->next = xmit_list(skb->next, dev, txq, &rc); skb->next = xmit_list(skb->next, dev, txq, &rc);
if (likely(skb->next == NULL)) { if (likely(skb->next == NULL)) {
skb->destructor = DEV_GSO_CB(skb)->destructor; skb->destructor = DEV_GSO_CB(skb)->destructor;
consume_skb(skb); consume_skb(skb);
return rc; return rc;
} }
out_kfree_skb:
kfree_skb(skb); kfree_skb(skb);
out:
return rc; return rc;
} }
EXPORT_SYMBOL_GPL(dev_hard_start_xmit); EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment