Commit ffabe98c authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: make dev_unreg_count global

We can use a global dev_unreg_count counter instead
of a per netns one.

As a bonus we can factorize the changes done on it
for bulk device removals.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 45a96c40
...@@ -47,6 +47,7 @@ extern int rtnl_lock_killable(void); ...@@ -47,6 +47,7 @@ extern int rtnl_lock_killable(void);
extern bool refcount_dec_and_rtnl_lock(refcount_t *r); extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
extern wait_queue_head_t netdev_unregistering_wq; extern wait_queue_head_t netdev_unregistering_wq;
extern atomic_t dev_unreg_count;
extern struct rw_semaphore pernet_ops_rwsem; extern struct rw_semaphore pernet_ops_rwsem;
extern struct rw_semaphore net_rwsem; extern struct rw_semaphore net_rwsem;
......
...@@ -67,8 +67,6 @@ struct net { ...@@ -67,8 +67,6 @@ struct net {
*/ */
spinlock_t rules_mod_lock; spinlock_t rules_mod_lock;
atomic_t dev_unreg_count;
unsigned int dev_base_seq; /* protected by rtnl_mutex */ unsigned int dev_base_seq; /* protected by rtnl_mutex */
u32 ifindex; u32 ifindex;
......
...@@ -9698,11 +9698,11 @@ static void dev_index_release(struct net *net, int ifindex) ...@@ -9698,11 +9698,11 @@ static void dev_index_release(struct net *net, int ifindex)
/* Delayed registration/unregisteration */ /* Delayed registration/unregisteration */
LIST_HEAD(net_todo_list); LIST_HEAD(net_todo_list);
DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
atomic_t dev_unreg_count = ATOMIC_INIT(0);
static void net_set_todo(struct net_device *dev) static void net_set_todo(struct net_device *dev)
{ {
list_add_tail(&dev->todo_list, &net_todo_list); list_add_tail(&dev->todo_list, &net_todo_list);
atomic_inc(&dev_net(dev)->dev_unreg_count);
} }
static netdev_features_t netdev_sync_upper_features(struct net_device *lower, static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
...@@ -10529,6 +10529,7 @@ void netdev_run_todo(void) ...@@ -10529,6 +10529,7 @@ void netdev_run_todo(void)
{ {
struct net_device *dev, *tmp; struct net_device *dev, *tmp;
struct list_head list; struct list_head list;
int cnt;
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
struct list_head unlink_list; struct list_head unlink_list;
...@@ -10565,6 +10566,7 @@ void netdev_run_todo(void) ...@@ -10565,6 +10566,7 @@ void netdev_run_todo(void)
linkwatch_sync_dev(dev); linkwatch_sync_dev(dev);
} }
cnt = 0;
while (!list_empty(&list)) { while (!list_empty(&list)) {
dev = netdev_wait_allrefs_any(&list); dev = netdev_wait_allrefs_any(&list);
list_del(&dev->todo_list); list_del(&dev->todo_list);
...@@ -10582,12 +10584,13 @@ void netdev_run_todo(void) ...@@ -10582,12 +10584,13 @@ void netdev_run_todo(void)
if (dev->needs_free_netdev) if (dev->needs_free_netdev)
free_netdev(dev); free_netdev(dev);
if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count)) cnt++;
wake_up(&netdev_unregistering_wq);
/* Free network device */ /* Free network device */
kobject_put(&dev->dev.kobj); kobject_put(&dev->dev.kobj);
} }
if (cnt && atomic_sub_and_test(cnt, &dev_unreg_count))
wake_up(&netdev_unregistering_wq);
} }
/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
...@@ -11034,6 +11037,7 @@ void unregister_netdevice_many_notify(struct list_head *head, ...@@ -11034,6 +11037,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
{ {
struct net_device *dev, *tmp; struct net_device *dev, *tmp;
LIST_HEAD(close_head); LIST_HEAD(close_head);
int cnt = 0;
BUG_ON(dev_boot_phase); BUG_ON(dev_boot_phase);
ASSERT_RTNL(); ASSERT_RTNL();
...@@ -11130,7 +11134,9 @@ void unregister_netdevice_many_notify(struct list_head *head, ...@@ -11130,7 +11134,9 @@ void unregister_netdevice_many_notify(struct list_head *head,
list_for_each_entry(dev, head, unreg_list) { list_for_each_entry(dev, head, unreg_list) {
netdev_put(dev, &dev->dev_registered_tracker); netdev_put(dev, &dev->dev_registered_tracker);
net_set_todo(dev); net_set_todo(dev);
cnt++;
} }
atomic_add(cnt, &dev_unreg_count);
list_del(head); list_del(head);
} }
......
...@@ -483,24 +483,15 @@ EXPORT_SYMBOL_GPL(__rtnl_link_unregister); ...@@ -483,24 +483,15 @@ EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
*/ */
static void rtnl_lock_unregistering_all(void) static void rtnl_lock_unregistering_all(void)
{ {
struct net *net;
bool unregistering;
DEFINE_WAIT_FUNC(wait, woken_wake_function); DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(&netdev_unregistering_wq, &wait); add_wait_queue(&netdev_unregistering_wq, &wait);
for (;;) { for (;;) {
unregistering = false;
rtnl_lock(); rtnl_lock();
/* We held write locked pernet_ops_rwsem, and parallel /* We held write locked pernet_ops_rwsem, and parallel
* setup_net() and cleanup_net() are not possible. * setup_net() and cleanup_net() are not possible.
*/ */
for_each_net(net) { if (!atomic_read(&dev_unreg_count))
if (atomic_read(&net->dev_unreg_count) > 0) {
unregistering = true;
break;
}
}
if (!unregistering)
break; break;
__rtnl_unlock(); __rtnl_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment