Commit b6bacd55 authored by Eric W. Biederman's avatar Eric W. Biederman Committed by David S. Miller

netpoll: Don't drop all received packets.

Change the strategy of netpoll from dropping all packets received
during netpoll_poll_dev to calling napi poll with a budget of 0
(to avoid processing drivers rx queue), and to ignore packets received
with netif_rx (those will safely be placed on the backlog queue).

All of the netpoll supporting drivers have been reviewed to ensure
either thay use netif_rx or that a budget of 0 is supported by their
napi poll routine and that a budget of 0 will not process the drivers
rx queues.

Not dropping packets makes NETPOLL_RX_DROP unnecesary so it is removed.

npinfo->rx_flags is removed  as rx_flags with just the NETPOLL_RX_ENABLED
flag becomes just a redundant mirror of list_empty(&npinfo->rx_np).
Signed-off-by: default avatar"Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ff607631
...@@ -39,7 +39,6 @@ struct netpoll { ...@@ -39,7 +39,6 @@ struct netpoll {
struct netpoll_info { struct netpoll_info {
atomic_t refcnt; atomic_t refcnt;
unsigned long rx_flags;
spinlock_t rx_lock; spinlock_t rx_lock;
struct semaphore dev_lock; struct semaphore dev_lock;
struct list_head rx_np; /* netpolls that registered an rx_skb_hook */ struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
...@@ -99,7 +98,7 @@ static inline bool netpoll_rx_on(struct sk_buff *skb) ...@@ -99,7 +98,7 @@ static inline bool netpoll_rx_on(struct sk_buff *skb)
{ {
struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
return npinfo && (netpoll_rx_processing(npinfo) || npinfo->rx_flags); return npinfo && netpoll_rx_processing(npinfo);
} }
static inline bool netpoll_rx(struct sk_buff *skb) static inline bool netpoll_rx(struct sk_buff *skb)
......
...@@ -51,8 +51,6 @@ static atomic_t trapped; ...@@ -51,8 +51,6 @@ static atomic_t trapped;
DEFINE_STATIC_SRCU(netpoll_srcu); DEFINE_STATIC_SRCU(netpoll_srcu);
#define USEC_PER_POLL 50 #define USEC_PER_POLL 50
#define NETPOLL_RX_ENABLED 1
#define NETPOLL_RX_DROP 2
#define MAX_SKB_SIZE \ #define MAX_SKB_SIZE \
(sizeof(struct ethhdr) + \ (sizeof(struct ethhdr) + \
...@@ -193,7 +191,8 @@ static void netpoll_poll_dev(struct net_device *dev) ...@@ -193,7 +191,8 @@ static void netpoll_poll_dev(struct net_device *dev)
{ {
const struct net_device_ops *ops; const struct net_device_ops *ops;
struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
int budget = 16; bool rx_processing = netpoll_rx_processing(ni);
int budget = rx_processing? 16 : 0;
/* Don't do any rx activity if the dev_lock mutex is held /* Don't do any rx activity if the dev_lock mutex is held
* the dev_open/close paths use this to block netpoll activity * the dev_open/close paths use this to block netpoll activity
...@@ -207,8 +206,8 @@ static void netpoll_poll_dev(struct net_device *dev) ...@@ -207,8 +206,8 @@ static void netpoll_poll_dev(struct net_device *dev)
return; return;
} }
ni->rx_flags |= NETPOLL_RX_DROP; if (rx_processing)
atomic_inc(&trapped); atomic_inc(&trapped);
ops = dev->netdev_ops; ops = dev->netdev_ops;
if (!ops->ndo_poll_controller) { if (!ops->ndo_poll_controller) {
...@@ -221,8 +220,8 @@ static void netpoll_poll_dev(struct net_device *dev) ...@@ -221,8 +220,8 @@ static void netpoll_poll_dev(struct net_device *dev)
poll_napi(dev, budget); poll_napi(dev, budget);
atomic_dec(&trapped); if (rx_processing)
ni->rx_flags &= ~NETPOLL_RX_DROP; atomic_dec(&trapped);
up(&ni->dev_lock); up(&ni->dev_lock);
...@@ -1050,7 +1049,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) ...@@ -1050,7 +1049,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
goto out; goto out;
} }
npinfo->rx_flags = 0;
INIT_LIST_HEAD(&npinfo->rx_np); INIT_LIST_HEAD(&npinfo->rx_np);
spin_lock_init(&npinfo->rx_lock); spin_lock_init(&npinfo->rx_lock);
...@@ -1076,7 +1074,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) ...@@ -1076,7 +1074,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
if (np->rx_skb_hook) { if (np->rx_skb_hook) {
spin_lock_irqsave(&npinfo->rx_lock, flags); spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
list_add_tail(&np->rx, &npinfo->rx_np); list_add_tail(&np->rx, &npinfo->rx_np);
spin_unlock_irqrestore(&npinfo->rx_lock, flags); spin_unlock_irqrestore(&npinfo->rx_lock, flags);
} }
...@@ -1258,8 +1255,6 @@ void __netpoll_cleanup(struct netpoll *np) ...@@ -1258,8 +1255,6 @@ void __netpoll_cleanup(struct netpoll *np)
if (!list_empty(&npinfo->rx_np)) { if (!list_empty(&npinfo->rx_np)) {
spin_lock_irqsave(&npinfo->rx_lock, flags); spin_lock_irqsave(&npinfo->rx_lock, flags);
list_del(&np->rx); list_del(&np->rx);
if (list_empty(&npinfo->rx_np))
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
spin_unlock_irqrestore(&npinfo->rx_lock, flags); spin_unlock_irqrestore(&npinfo->rx_lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment