Commit 0f37666d authored by David S. Miller's avatar David S. Miller

Merge branch 'net-avoid-slow-rcu'

Eric Dumazet says:

====================
net: avoid slow rcu synchronizations in cleanup_net()

RTNL is a contended mutex, we prefer to expedite rcu synchronizations
in contexts we hold RTNL.

Similarly, cleanup_net() is a single threaded critical component and
should also use synchronize_rcu_expedited() even when not holding RTNL.

First patch removes a barrier with no clear purpose in ipv6_mc_down()
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 03f568a1 1ebb85f9
...@@ -841,7 +841,7 @@ void br_vlan_flush(struct net_bridge *br) ...@@ -841,7 +841,7 @@ void br_vlan_flush(struct net_bridge *br)
vg = br_vlan_group(br); vg = br_vlan_group(br);
__vlan_flush(br, NULL, vg); __vlan_flush(br, NULL, vg);
RCU_INIT_POINTER(br->vlgrp, NULL); RCU_INIT_POINTER(br->vlgrp, NULL);
synchronize_rcu(); synchronize_net();
__vlan_group_free(vg); __vlan_group_free(vg);
} }
...@@ -1372,7 +1372,7 @@ void nbp_vlan_flush(struct net_bridge_port *port) ...@@ -1372,7 +1372,7 @@ void nbp_vlan_flush(struct net_bridge_port *port)
vg = nbp_vlan_group(port); vg = nbp_vlan_group(port);
__vlan_flush(port->br, port, vg); __vlan_flush(port->br, port, vg);
RCU_INIT_POINTER(port->vlgrp, NULL); RCU_INIT_POINTER(port->vlgrp, NULL);
synchronize_rcu(); synchronize_net();
__vlan_group_free(vg); __vlan_group_free(vg);
} }
......
...@@ -1239,7 +1239,7 @@ int dev_change_name(struct net_device *dev, const char *newname) ...@@ -1239,7 +1239,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
netdev_name_node_del(dev->name_node); netdev_name_node_del(dev->name_node);
write_unlock(&dev_base_lock); write_unlock(&dev_base_lock);
synchronize_rcu(); synchronize_net();
write_lock(&dev_base_lock); write_lock(&dev_base_lock);
netdev_name_node_add(net, dev->name_node); netdev_name_node_add(net, dev->name_node);
......
...@@ -622,7 +622,7 @@ static void cleanup_net(struct work_struct *work) ...@@ -622,7 +622,7 @@ static void cleanup_net(struct work_struct *work)
* the rcu_barrier() below isn't sufficient alone. * the rcu_barrier() below isn't sufficient alone.
* Also the pre_exit() and exit() methods need this barrier. * Also the pre_exit() and exit() methods need this barrier.
*/ */
synchronize_rcu(); synchronize_rcu_expedited();
rtnl_lock(); rtnl_lock();
list_for_each_entry_reverse(ops, &pernet_list, list) { list_for_each_entry_reverse(ops, &pernet_list, list) {
......
...@@ -501,7 +501,7 @@ static void tnode_free(struct key_vector *tn) ...@@ -501,7 +501,7 @@ static void tnode_free(struct key_vector *tn)
if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) { if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) {
tnode_free_size = 0; tnode_free_size = 0;
synchronize_rcu(); synchronize_net();
} }
} }
......
...@@ -2719,7 +2719,6 @@ void ipv6_mc_down(struct inet6_dev *idev) ...@@ -2719,7 +2719,6 @@ void ipv6_mc_down(struct inet6_dev *idev)
/* Should stop work after group drop. or we will /* Should stop work after group drop. or we will
* start work again in mld_ifc_event() * start work again in mld_ifc_event()
*/ */
synchronize_net();
mld_query_stop_work(idev); mld_query_stop_work(idev);
mld_report_stop_work(idev); mld_report_stop_work(idev);
......
...@@ -2530,7 +2530,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list) ...@@ -2530,7 +2530,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
* netfilter framework. Roll on, two-stage module * netfilter framework. Roll on, two-stage module
* delete... * delete...
*/ */
synchronize_net(); synchronize_rcu_expedited();
i_see_dead_people: i_see_dead_people:
busy = 0; busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) { list_for_each_entry(net, net_exit_list, exit_list) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment