Commit a0265d28 authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller

net: Add __dev_forward_skb

This patch adds the helper __dev_forward_skb which is identical to
dev_forward_skb except that it doesn't actually inject the skb into
the stack.  This is useful where we wish to have finer control over
how the packet is injected, e.g., via netif_rx_ni or netif_receive_skb.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1536e285
...@@ -2633,6 +2633,7 @@ int dev_get_phys_port_id(struct net_device *dev, ...@@ -2633,6 +2633,7 @@ int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_port_id *ppid); struct netdev_phys_port_id *ppid);
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq); struct netdev_queue *txq);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
......
...@@ -1661,6 +1661,29 @@ bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb) ...@@ -1661,6 +1661,29 @@ bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
} }
EXPORT_SYMBOL_GPL(is_skb_forwardable); EXPORT_SYMBOL_GPL(is_skb_forwardable);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
}
if (unlikely(!is_skb_forwardable(dev, skb))) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
skb_scrub_packet(skb, true);
skb->protocol = eth_type_trans(skb, dev);
return 0;
}
EXPORT_SYMBOL_GPL(__dev_forward_skb);
/** /**
* dev_forward_skb - loopback an skb to another netif * dev_forward_skb - loopback an skb to another netif
* *
...@@ -1681,24 +1704,7 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable); ...@@ -1681,24 +1704,7 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
*/ */
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{ {
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
}
if (unlikely(!is_skb_forwardable(dev, skb))) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
skb_scrub_packet(skb, true);
skb->protocol = eth_type_trans(skb, dev);
return netif_rx_internal(skb);
} }
EXPORT_SYMBOL_GPL(dev_forward_skb); EXPORT_SYMBOL_GPL(dev_forward_skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment