Commit d9452e9f authored by David S. Miller's avatar David S. Miller

[NETPOLL]: Revert two bogus cleanups that broke netconsole.

Based upon a report by Andrew Morton and code analysis done
by Jarek Poplawski.

This reverts 33f807ba ("[NETPOLL]:
Kill NETPOLL_RX_DROP, set but never tested.")  and
c7b6ea24 ("[NETPOLL]: Don't need
rx_flags.").

The rx_flags did get tested for zero vs. non-zero and therefore we do
need those tests and that code which sets NETPOLL_RX_DROP et al.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ec9b6add
......@@ -25,6 +25,7 @@ struct netpoll {
struct netpoll_info {
atomic_t refcnt;
int rx_flags;
spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
......@@ -50,12 +51,12 @@ static inline int netpoll_rx(struct sk_buff *skb)
unsigned long flags;
int ret = 0;
if (!npinfo || !npinfo->rx_np)
if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
return 0;
spin_lock_irqsave(&npinfo->rx_lock, flags);
/* check rx_np again with the lock held */
if (npinfo->rx_np && __netpoll_rx(skb))
/* check rx_flags again with the lock held */
if (npinfo->rx_flags && __netpoll_rx(skb))
ret = 1;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
......
......@@ -39,6 +39,8 @@ static struct sk_buff_head skb_pool;
static atomic_t trapped;
#define USEC_PER_POLL 50
#define NETPOLL_RX_ENABLED 1
#define NETPOLL_RX_DROP 2
#define MAX_SKB_SIZE \
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
......@@ -126,11 +128,13 @@ static int poll_one_napi(struct netpoll_info *npinfo,
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
return budget;
npinfo->rx_flags |= NETPOLL_RX_DROP;
atomic_inc(&trapped);
work = napi->poll(napi, budget);
atomic_dec(&trapped);
npinfo->rx_flags &= ~NETPOLL_RX_DROP;
return budget - work;
}
......@@ -472,7 +476,7 @@ int __netpoll_rx(struct sk_buff *skb)
if (skb->dev->type != ARPHRD_ETHER)
goto out;
/* if receive ARP during middle of NAPI poll, then queue */
/* check if netpoll clients need ARP */
if (skb->protocol == htons(ETH_P_ARP) &&
atomic_read(&trapped)) {
skb_queue_tail(&npi->arp_tx, skb);
......@@ -534,9 +538,6 @@ int __netpoll_rx(struct sk_buff *skb)
return 1;
out:
/* If packet received while already in poll then just
* silently drop.
*/
if (atomic_read(&trapped)) {
kfree_skb(skb);
return 1;
......@@ -675,6 +676,7 @@ int netpoll_setup(struct netpoll *np)
goto release;
}
npinfo->rx_flags = 0;
npinfo->rx_np = NULL;
spin_lock_init(&npinfo->rx_lock);
......@@ -756,6 +758,7 @@ int netpoll_setup(struct netpoll *np)
if (np->rx_hook) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
npinfo->rx_np = np;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
......@@ -797,6 +800,7 @@ void netpoll_cleanup(struct netpoll *np)
if (npinfo->rx_np == np) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_np = NULL;
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment