Commit dbaa1541 authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller

netpoll: Add locking for netpoll_setup/cleanup

As it stands, netpoll_setup and netpoll_cleanup have no locking
protection whatsoever.  So chaos ensures if two entities try to
perform them on the same device.

This patch adds RTNL to the equation.  The code has been rearranged so
that bits that do not need RTNL protection are now moved to the top of
netpoll_setup.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent de85d99e
...@@ -698,7 +698,6 @@ int netpoll_setup(struct netpoll *np) ...@@ -698,7 +698,6 @@ int netpoll_setup(struct netpoll *np)
struct net_device *ndev = NULL; struct net_device *ndev = NULL;
struct in_device *in_dev; struct in_device *in_dev;
struct netpoll_info *npinfo; struct netpoll_info *npinfo;
struct netpoll *npe, *tmp;
unsigned long flags; unsigned long flags;
int err; int err;
...@@ -710,38 +709,6 @@ int netpoll_setup(struct netpoll *np) ...@@ -710,38 +709,6 @@ int netpoll_setup(struct netpoll *np)
return -ENODEV; return -ENODEV;
} }
np->dev = ndev;
if (!ndev->npinfo) {
npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
if (!npinfo) {
err = -ENOMEM;
goto put;
}
npinfo->rx_flags = 0;
INIT_LIST_HEAD(&npinfo->rx_np);
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
skb_queue_head_init(&npinfo->txq);
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
atomic_set(&npinfo->refcnt, 1);
} else {
npinfo = ndev->npinfo;
atomic_inc(&npinfo->refcnt);
}
npinfo->netpoll = np;
if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
!ndev->netdev_ops->ndo_poll_controller) {
printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
np->name, np->dev_name);
err = -ENOTSUPP;
goto release;
}
if (!netif_running(ndev)) { if (!netif_running(ndev)) {
unsigned long atmost, atleast; unsigned long atmost, atleast;
...@@ -755,7 +722,7 @@ int netpoll_setup(struct netpoll *np) ...@@ -755,7 +722,7 @@ int netpoll_setup(struct netpoll *np)
if (err) { if (err) {
printk(KERN_ERR "%s: failed to open %s\n", printk(KERN_ERR "%s: failed to open %s\n",
np->name, ndev->name); np->name, ndev->name);
goto release; goto put;
} }
atleast = jiffies + HZ/10; atleast = jiffies + HZ/10;
...@@ -792,7 +759,7 @@ int netpoll_setup(struct netpoll *np) ...@@ -792,7 +759,7 @@ int netpoll_setup(struct netpoll *np)
printk(KERN_ERR "%s: no IP address for %s, aborting\n", printk(KERN_ERR "%s: no IP address for %s, aborting\n",
np->name, np->dev_name); np->name, np->dev_name);
err = -EDESTADDRREQ; err = -EDESTADDRREQ;
goto release; goto put;
} }
np->local_ip = in_dev->ifa_list->ifa_local; np->local_ip = in_dev->ifa_list->ifa_local;
...@@ -800,6 +767,43 @@ int netpoll_setup(struct netpoll *np) ...@@ -800,6 +767,43 @@ int netpoll_setup(struct netpoll *np)
printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip); printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
} }
np->dev = ndev;
/* fill up the skb queue */
refill_skbs();
rtnl_lock();
if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
!ndev->netdev_ops->ndo_poll_controller) {
printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
np->name, np->dev_name);
err = -ENOTSUPP;
goto unlock;
}
if (!ndev->npinfo) {
npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
if (!npinfo) {
err = -ENOMEM;
goto unlock;
}
npinfo->rx_flags = 0;
INIT_LIST_HEAD(&npinfo->rx_np);
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
skb_queue_head_init(&npinfo->txq);
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
atomic_set(&npinfo->refcnt, 1);
} else {
npinfo = ndev->npinfo;
atomic_inc(&npinfo->refcnt);
}
npinfo->netpoll = np;
if (np->rx_hook) { if (np->rx_hook) {
spin_lock_irqsave(&npinfo->rx_lock, flags); spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_flags |= NETPOLL_RX_ENABLED; npinfo->rx_flags |= NETPOLL_RX_ENABLED;
...@@ -807,24 +811,14 @@ int netpoll_setup(struct netpoll *np) ...@@ -807,24 +811,14 @@ int netpoll_setup(struct netpoll *np)
spin_unlock_irqrestore(&npinfo->rx_lock, flags); spin_unlock_irqrestore(&npinfo->rx_lock, flags);
} }
/* fill up the skb queue */
refill_skbs();
/* last thing to do is link it to the net device structure */ /* last thing to do is link it to the net device structure */
rcu_assign_pointer(ndev->npinfo, npinfo); rcu_assign_pointer(ndev->npinfo, npinfo);
rtnl_unlock();
return 0; return 0;
release: unlock:
if (!ndev->npinfo) { rtnl_unlock();
spin_lock_irqsave(&npinfo->rx_lock, flags);
list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
npe->dev = NULL;
}
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
kfree(npinfo);
}
put: put:
dev_put(ndev); dev_put(ndev);
return err; return err;
...@@ -841,8 +835,12 @@ void netpoll_cleanup(struct netpoll *np) ...@@ -841,8 +835,12 @@ void netpoll_cleanup(struct netpoll *np)
{ {
struct netpoll_info *npinfo; struct netpoll_info *npinfo;
unsigned long flags; unsigned long flags;
int free = 0;
if (np->dev) { if (!np->dev)
return;
rtnl_lock();
npinfo = np->dev->npinfo; npinfo = np->dev->npinfo;
if (npinfo) { if (npinfo) {
if (!list_empty(&npinfo->rx_np)) { if (!list_empty(&npinfo->rx_np)) {
...@@ -853,7 +851,8 @@ void netpoll_cleanup(struct netpoll *np) ...@@ -853,7 +851,8 @@ void netpoll_cleanup(struct netpoll *np)
spin_unlock_irqrestore(&npinfo->rx_lock, flags); spin_unlock_irqrestore(&npinfo->rx_lock, flags);
} }
if (atomic_dec_and_test(&npinfo->refcnt)) { free = atomic_dec_and_test(&npinfo->refcnt);
if (free) {
const struct net_device_ops *ops; const struct net_device_ops *ops;
ops = np->dev->netdev_ops; ops = np->dev->netdev_ops;
...@@ -861,7 +860,11 @@ void netpoll_cleanup(struct netpoll *np) ...@@ -861,7 +860,11 @@ void netpoll_cleanup(struct netpoll *np)
ops->ndo_netpoll_cleanup(np->dev); ops->ndo_netpoll_cleanup(np->dev);
rcu_assign_pointer(np->dev->npinfo, NULL); rcu_assign_pointer(np->dev->npinfo, NULL);
}
}
rtnl_unlock();
if (free) {
/* avoid racing with NAPI reading npinfo */ /* avoid racing with NAPI reading npinfo */
synchronize_rcu_bh(); synchronize_rcu_bh();
...@@ -873,10 +876,8 @@ void netpoll_cleanup(struct netpoll *np) ...@@ -873,10 +876,8 @@ void netpoll_cleanup(struct netpoll *np)
__skb_queue_purge(&npinfo->txq); __skb_queue_purge(&npinfo->txq);
kfree(npinfo); kfree(npinfo);
} }
}
dev_put(np->dev); dev_put(np->dev);
}
np->dev = NULL; np->dev = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment