Commit 1928ecab authored by Johannes Berg's avatar Johannes Berg Committed by John W. Linville

mac80211: fix and simplify mesh locking

The locking in mesh_{mpath,mpp}_table_grow not only
has an rcu_read_unlock() missing, it's also racy
(though really only technically since it's invoked
from a single function only) since it obtains the
new size of the table without any locking, so two
invocations of the function could attempt the same
resize.

Additionally, it uses synchronize_rcu() which is
rather expensive and can be avoided trivially here.

Modify the functions to only use the table lock
and use call_rcu() instead of synchronize_rcu().
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent d07c7cf4
......@@ -120,6 +120,7 @@ struct mesh_path {
* buckets
* @mean_chain_len: maximum average length for the hash buckets' list, if it is
* reached, the table will grow
* rcu_head: RCU head to free the table
*/
struct mesh_table {
/* Number of buckets will be 2^N */
......@@ -132,6 +133,8 @@ struct mesh_table {
int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
int size_order;
int mean_chain_len;
struct rcu_head rcu_head;
};
/* Recent multicast cache */
......
......@@ -370,52 +370,52 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
return err;
}
static void mesh_table_free_rcu(struct rcu_head *rcu)
{
struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
mesh_table_free(tbl, false);
}
void mesh_mpath_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
rcu_read_lock();
newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1);
if (!newtbl)
return;
write_lock_bh(&pathtbl_resize_lock);
newtbl = mesh_table_alloc(mesh_paths->size_order + 1);
if (!newtbl)
goto out;
oldtbl = mesh_paths;
if (mesh_table_grow(mesh_paths, newtbl) < 0) {
rcu_read_unlock();
__mesh_table_free(newtbl);
write_unlock_bh(&pathtbl_resize_lock);
return;
goto out;
}
rcu_read_unlock();
rcu_assign_pointer(mesh_paths, newtbl);
write_unlock_bh(&pathtbl_resize_lock);
synchronize_rcu();
mesh_table_free(oldtbl, false);
call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
out:
write_unlock_bh(&pathtbl_resize_lock);
}
void mesh_mpp_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
rcu_read_lock();
newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1);
if (!newtbl)
return;
write_lock_bh(&pathtbl_resize_lock);
newtbl = mesh_table_alloc(mpp_paths->size_order + 1);
if (!newtbl)
goto out;
oldtbl = mpp_paths;
if (mesh_table_grow(mpp_paths, newtbl) < 0) {
rcu_read_unlock();
__mesh_table_free(newtbl);
write_unlock_bh(&pathtbl_resize_lock);
return;
goto out;
}
rcu_read_unlock();
rcu_assign_pointer(mpp_paths, newtbl);
write_unlock_bh(&pathtbl_resize_lock);
call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
synchronize_rcu();
mesh_table_free(oldtbl, false);
out:
write_unlock_bh(&pathtbl_resize_lock);
}
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment