Commit 60854fd9 authored by Bob Copeland's avatar Bob Copeland Committed by Johannes Berg

mac80211: mesh: convert path table to rhashtable

In the time since the mesh path table was implemented as an
RCU-traversable, dynamically growing hash table, a generic RCU
hashtable implementation was added to the kernel.

Switch the mesh path table over to rhashtable to remove some code
and also gain some features like automatic shrinking.

Cc: Thomas Graf <tgraf@suug.ch>
Cc: netdev@vger.kernel.org
Signed-off-by: default avatarBob Copeland <me@bobcopeland.com>
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent 8f6fd83c
...@@ -697,17 +697,10 @@ struct ieee80211_if_mesh { ...@@ -697,17 +697,10 @@ struct ieee80211_if_mesh {
/* offset from skb->data while building IE */ /* offset from skb->data while building IE */
int meshconf_offset; int meshconf_offset;
struct mesh_table __rcu *mesh_paths; struct mesh_table *mesh_paths;
struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation; int mesh_paths_generation;
int mpp_paths_generation; int mpp_paths_generation;
/* Protects assignment of the mesh_paths/mpp_paths table
* pointer for resize against reading it for add/delete
* of individual paths. Pure readers (lookups) just use
* RCU.
*/
rwlock_t pathtbl_resize_lock;
}; };
#ifdef CONFIG_MAC80211_MESH #ifdef CONFIG_MAC80211_MESH
......
...@@ -1347,12 +1347,6 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) ...@@ -1347,12 +1347,6 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
mesh_path_start_discovery(sdata); mesh_path_start_discovery(sdata);
if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
mesh_mpath_table_grow(sdata);
if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags))
mesh_mpp_table_grow(sdata);
if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
ieee80211_mesh_housekeeping(sdata); ieee80211_mesh_housekeeping(sdata);
......
...@@ -51,10 +51,6 @@ enum mesh_path_flags { ...@@ -51,10 +51,6 @@ enum mesh_path_flags {
* *
* *
* @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks
* @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs
* to grow.
* @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to
* grow
* @MESH_WORK_ROOT: the mesh root station needs to send a frame * @MESH_WORK_ROOT: the mesh root station needs to send a frame
* @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other
* mesh nodes * mesh nodes
...@@ -62,8 +58,6 @@ enum mesh_path_flags { ...@@ -62,8 +58,6 @@ enum mesh_path_flags {
*/ */
enum mesh_deferred_task_flags { enum mesh_deferred_task_flags {
MESH_WORK_HOUSEKEEPING, MESH_WORK_HOUSEKEEPING,
MESH_WORK_GROW_MPATH_TABLE,
MESH_WORK_GROW_MPP_TABLE,
MESH_WORK_ROOT, MESH_WORK_ROOT,
MESH_WORK_DRIFT_ADJUST, MESH_WORK_DRIFT_ADJUST,
MESH_WORK_MBSS_CHANGED, MESH_WORK_MBSS_CHANGED,
...@@ -105,6 +99,7 @@ enum mesh_deferred_task_flags { ...@@ -105,6 +99,7 @@ enum mesh_deferred_task_flags {
struct mesh_path { struct mesh_path {
u8 dst[ETH_ALEN]; u8 dst[ETH_ALEN];
u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
struct rhash_head rhash;
struct hlist_node gate_list; struct hlist_node gate_list;
struct ieee80211_sub_if_data *sdata; struct ieee80211_sub_if_data *sdata;
struct sta_info __rcu *next_hop; struct sta_info __rcu *next_hop;
...@@ -129,34 +124,17 @@ struct mesh_path { ...@@ -129,34 +124,17 @@ struct mesh_path {
/** /**
* struct mesh_table * struct mesh_table
* *
* @hash_buckets: array of hash buckets of the table
* @hashwlock: array of locks to protect write operations, one per bucket
* @hash_mask: 2^size_order - 1, used to compute hash idx
* @hash_rnd: random value used for hash computations
* @entries: number of entries in the table * @entries: number of entries in the table
* @free_node: function to free nodes of the table
* @copy_node: function to copy nodes of the table
* @size_order: determines size of the table, there will be 2^size_order hash
* buckets
* @known_gates: list of known mesh gates and their mpaths by the station. The * @known_gates: list of known mesh gates and their mpaths by the station. The
* gate's mpath may or may not be resolved and active. * gate's mpath may or may not be resolved and active.
* * @rhash: the rhashtable containing struct mesh_paths, keyed by dest addr
* rcu_head: RCU head to free the table
*/ */
struct mesh_table { struct mesh_table {
/* Number of buckets will be 2^N */
struct hlist_head *hash_buckets;
spinlock_t *hashwlock; /* One per bucket, for add/del */
unsigned int hash_mask; /* (2^size_order) - 1 */
__u32 hash_rnd; /* Used for hash generation */
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
void (*free_node) (struct hlist_node *p, bool free_leafs);
int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
int size_order;
struct hlist_head *known_gates; struct hlist_head *known_gates;
spinlock_t gates_lock; spinlock_t gates_lock;
struct rcu_head rcu_head; struct rhashtable rhead;
}; };
/* Recent multicast cache */ /* Recent multicast cache */
...@@ -300,9 +278,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, ...@@ -300,9 +278,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
void mesh_sta_cleanup(struct sta_info *sta); void mesh_sta_cleanup(struct sta_info *sta);
/* Private interfaces */ /* Private interfaces */
/* Mesh tables */
void mesh_mpath_table_grow(struct ieee80211_sub_if_data *sdata);
void mesh_mpp_table_grow(struct ieee80211_sub_if_data *sdata);
/* Mesh paths */ /* Mesh paths */
int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
u8 ttl, const u8 *target, u32 target_sn, u8 ttl, const u8 *target, u32 target_sn,
......
...@@ -18,11 +18,20 @@ ...@@ -18,11 +18,20 @@
#include "ieee80211_i.h" #include "ieee80211_i.h"
#include "mesh.h" #include "mesh.h"
/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
#define INIT_PATHS_SIZE_ORDER 2 {
/* Use last four bytes of hw addr as hash index */
return jhash_1word(*(u32 *)(addr+2), seed);
}
/* Keep the mean chain length below this constant */ static const struct rhashtable_params mesh_rht_params = {
#define MEAN_CHAIN_LEN 2 .nelem_hint = 2,
.automatic_shrinking = true,
.key_len = ETH_ALEN,
.key_offset = offsetof(struct mesh_path, dst),
.head_offset = offsetof(struct mesh_path, rhash),
.hashfn = mesh_table_hash,
};
static inline bool mpath_expired(struct mesh_path *mpath) static inline bool mpath_expired(struct mesh_path *mpath)
{ {
...@@ -31,157 +40,47 @@ static inline bool mpath_expired(struct mesh_path *mpath) ...@@ -31,157 +40,47 @@ static inline bool mpath_expired(struct mesh_path *mpath)
!(mpath->flags & MESH_PATH_FIXED); !(mpath->flags & MESH_PATH_FIXED);
} }
struct mpath_node { static void mesh_path_reclaim(struct rcu_head *rp)
struct hlist_node list;
struct rcu_head rcu;
/* This indirection allows two different tables to point to the same
* mesh_path structure, useful when resizing
*/
struct mesh_path *mpath;
};
static inline struct mesh_table *resize_dereference_paths(
struct ieee80211_sub_if_data *sdata,
struct mesh_table __rcu *table)
{ {
return rcu_dereference_protected(table, struct mesh_path *mpath = container_of(rp, struct mesh_path, rcu);
lockdep_is_held(&sdata->u.mesh.pathtbl_resize_lock));
}
static inline struct mesh_table *resize_dereference_mesh_paths( del_timer_sync(&mpath->timer);
struct ieee80211_sub_if_data *sdata) kfree(mpath);
{
return resize_dereference_paths(sdata, sdata->u.mesh.mesh_paths);
} }
static inline struct mesh_table *resize_dereference_mpp_paths( static void mesh_path_rht_free(void *ptr, void *unused_arg)
struct ieee80211_sub_if_data *sdata)
{ {
return resize_dereference_paths(sdata, sdata->u.mesh.mpp_paths); struct mesh_path *mpath = ptr;
call_rcu(&mpath->rcu, mesh_path_reclaim);
} }
/* static struct mesh_table *mesh_table_alloc(void)
* CAREFUL -- "tbl" must not be an expression,
* in particular not an rcu_dereference(), since
* it's used twice. So it is illegal to do
* for_each_mesh_entry(rcu_dereference(...), ...)
*/
#define for_each_mesh_entry(tbl, node, i) \
for (i = 0; i <= tbl->hash_mask; i++) \
hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
static struct mesh_table *mesh_table_alloc(int size_order)
{ {
int i;
struct mesh_table *newtbl; struct mesh_table *newtbl;
newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
if (!newtbl) if (!newtbl)
return NULL; return NULL;
newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * newtbl->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
(1 << size_order), GFP_ATOMIC); if (!newtbl->known_gates) {
if (!newtbl->hash_buckets) {
kfree(newtbl);
return NULL;
}
newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
(1 << size_order), GFP_ATOMIC);
if (!newtbl->hashwlock) {
kfree(newtbl->hash_buckets);
kfree(newtbl); kfree(newtbl);
return NULL; return NULL;
} }
INIT_HLIST_HEAD(newtbl->known_gates);
newtbl->size_order = size_order;
newtbl->hash_mask = (1 << size_order) - 1;
atomic_set(&newtbl->entries, 0); atomic_set(&newtbl->entries, 0);
get_random_bytes(&newtbl->hash_rnd,
sizeof(newtbl->hash_rnd));
for (i = 0; i <= newtbl->hash_mask; i++)
spin_lock_init(&newtbl->hashwlock[i]);
spin_lock_init(&newtbl->gates_lock); spin_lock_init(&newtbl->gates_lock);
return newtbl; return newtbl;
} }
static void __mesh_table_free(struct mesh_table *tbl) static void mesh_table_free(struct mesh_table *tbl)
{ {
kfree(tbl->hash_buckets); rhashtable_free_and_destroy(&tbl->rhead,
kfree(tbl->hashwlock); mesh_path_rht_free, NULL);
kfree(tbl); kfree(tbl);
} }
static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
{
struct hlist_head *mesh_hash;
struct hlist_node *p, *q;
struct mesh_path *gate;
int i;
mesh_hash = tbl->hash_buckets;
if (free_leafs) {
spin_lock_bh(&tbl->gates_lock);
hlist_for_each_entry_safe(gate, q,
tbl->known_gates, gate_list)
hlist_del(&gate->gate_list);
kfree(tbl->known_gates);
spin_unlock_bh(&tbl->gates_lock);
}
for (i = 0; i <= tbl->hash_mask; i++) {
spin_lock_bh(&tbl->hashwlock[i]);
hlist_for_each_safe(p, q, &mesh_hash[i]) {
tbl->free_node(p, free_leafs);
atomic_dec(&tbl->entries);
}
spin_unlock_bh(&tbl->hashwlock[i]);
}
__mesh_table_free(tbl);
}
static int mesh_table_grow(struct mesh_table *oldtbl,
struct mesh_table *newtbl)
{
struct hlist_head *oldhash;
struct hlist_node *p, *q;
int i;
if (atomic_read(&oldtbl->entries)
< MEAN_CHAIN_LEN * (oldtbl->hash_mask + 1))
return -EAGAIN;
newtbl->free_node = oldtbl->free_node;
newtbl->copy_node = oldtbl->copy_node;
newtbl->known_gates = oldtbl->known_gates;
atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
oldhash = oldtbl->hash_buckets;
for (i = 0; i <= oldtbl->hash_mask; i++)
hlist_for_each(p, &oldhash[i])
if (oldtbl->copy_node(p, newtbl) < 0)
goto errcopy;
return 0;
errcopy:
for (i = 0; i <= newtbl->hash_mask; i++) {
hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
oldtbl->free_node(p, 0);
}
return -ENOMEM;
}
static u32 mesh_table_hash(const u8 *addr, struct mesh_table *tbl)
{
/* Use last four bytes of hw addr as hash index */
return jhash_1word(*(u32 *)(addr+2), tbl->hash_rnd) & tbl->hash_mask;
}
/** /**
* *
* mesh_path_assign_nexthop - update mesh path next hop * mesh_path_assign_nexthop - update mesh path next hop
...@@ -324,22 +223,15 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, ...@@ -324,22 +223,15 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
struct ieee80211_sub_if_data *sdata) struct ieee80211_sub_if_data *sdata)
{ {
struct mesh_path *mpath; struct mesh_path *mpath;
struct hlist_head *bucket;
struct mpath_node *node;
bucket = &tbl->hash_buckets[mesh_table_hash(dst, tbl)]; mpath = rhashtable_lookup_fast(&tbl->rhead, dst, mesh_rht_params);
hlist_for_each_entry_rcu(node, bucket, list) {
mpath = node->mpath; if (mpath && mpath_expired(mpath)) {
if (ether_addr_equal(dst, mpath->dst)) {
if (mpath_expired(mpath)) {
spin_lock_bh(&mpath->state_lock); spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE; mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&mpath->state_lock); spin_unlock_bh(&mpath->state_lock);
} }
return mpath; return mpath;
}
}
return NULL;
} }
/** /**
...@@ -354,17 +246,52 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, ...@@ -354,17 +246,52 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
struct mesh_path * struct mesh_path *
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{ {
return mpath_lookup(rcu_dereference(sdata->u.mesh.mesh_paths), dst, return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
sdata);
} }
struct mesh_path * struct mesh_path *
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{ {
return mpath_lookup(rcu_dereference(sdata->u.mesh.mpp_paths), dst, return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
sdata);
} }
static struct mesh_path *
__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
{
int i = 0, ret;
struct mesh_path *mpath = NULL;
struct rhashtable_iter iter;
ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
if (ret)
return NULL;
ret = rhashtable_walk_start(&iter);
if (ret && ret != -EAGAIN)
goto err;
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
if (IS_ERR(mpath))
break;
if (i++ == idx)
break;
}
err:
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
if (IS_ERR(mpath) || !mpath)
return NULL;
if (mpath_expired(mpath)) {
spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&mpath->state_lock);
}
return mpath;
}
/** /**
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
...@@ -378,23 +305,7 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) ...@@ -378,23 +305,7 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
struct mesh_path * struct mesh_path *
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{ {
struct mesh_table *tbl = rcu_dereference(sdata->u.mesh.mesh_paths); return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
struct mpath_node *node;
int i;
int j = 0;
for_each_mesh_entry(tbl, node, i) {
if (j++ == idx) {
if (mpath_expired(node->mpath)) {
spin_lock_bh(&node->mpath->state_lock);
node->mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&node->mpath->state_lock);
}
return node->mpath;
}
}
return NULL;
} }
/** /**
...@@ -409,17 +320,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) ...@@ -409,17 +320,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
struct mesh_path * struct mesh_path *
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{ {
struct mesh_table *tbl = rcu_dereference(sdata->u.mesh.mpp_paths); return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
struct mpath_node *node;
int i;
int j = 0;
for_each_mesh_entry(tbl, node, i) {
if (j++ == idx)
return node->mpath;
}
return NULL;
} }
/** /**
...@@ -432,7 +333,7 @@ int mesh_path_add_gate(struct mesh_path *mpath) ...@@ -432,7 +333,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
int err; int err;
rcu_read_lock(); rcu_read_lock();
tbl = rcu_dereference(mpath->sdata->u.mesh.mesh_paths); tbl = mpath->sdata->u.mesh.mesh_paths;
spin_lock_bh(&mpath->state_lock); spin_lock_bh(&mpath->state_lock);
if (mpath->is_gate) { if (mpath->is_gate) {
...@@ -526,15 +427,9 @@ struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, ...@@ -526,15 +427,9 @@ struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
const u8 *dst) const u8 *dst)
{ {
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
struct mesh_table *tbl; struct mesh_table *tbl;
struct mesh_path *mpath, *new_mpath; struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node; int ret;
struct hlist_head *bucket;
int grow = 0;
int err;
u32 hash_idx;
if (ether_addr_equal(dst, sdata->vif.addr)) if (ether_addr_equal(dst, sdata->vif.addr))
/* never add ourselves as neighbours */ /* never add ourselves as neighbours */
...@@ -546,116 +441,44 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, ...@@ -546,116 +441,44 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
tbl = resize_dereference_mesh_paths(sdata);
hash_idx = mesh_table_hash(dst, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock(&tbl->hashwlock[hash_idx]);
hlist_for_each_entry(node, bucket, list) {
mpath = node->mpath;
if (ether_addr_equal(dst, mpath->dst))
goto found;
}
err = -ENOMEM;
new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
if (!new_mpath) if (!new_mpath)
goto err_path_alloc; return ERR_PTR(-ENOMEM);
new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); tbl = sdata->u.mesh.mesh_paths;
if (!new_node) do {
goto err_node_alloc; ret = rhashtable_lookup_insert_fast(&tbl->rhead,
&new_mpath->rhash,
mesh_rht_params);
new_node->mpath = new_mpath; if (ret == -EEXIST)
hlist_add_head_rcu(&new_node->list, bucket); mpath = rhashtable_lookup_fast(&tbl->rhead,
if (atomic_inc_return(&tbl->entries) >= dst,
MEAN_CHAIN_LEN * (tbl->hash_mask + 1)) mesh_rht_params);
grow = 1;
sdata->u.mesh.mesh_paths_generation++; } while (unlikely(ret == -EEXIST && !mpath));
if (grow) { if (ret && ret != -EEXIST)
set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); return ERR_PTR(ret);
ieee80211_queue_work(&local->hw, &sdata->work);
}
mpath = new_mpath;
found:
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
return mpath;
err_node_alloc: /* At this point either new_mpath was added, or we found a
* matching entry already in the table; in the latter case
* free the unnecessary new entry.
*/
if (ret == -EEXIST) {
kfree(new_mpath); kfree(new_mpath);
err_path_alloc: new_mpath = mpath;
atomic_dec(&sdata->u.mesh.mpaths);
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
return ERR_PTR(err);
}
static void mesh_table_free_rcu(struct rcu_head *rcu)
{
struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
mesh_table_free(tbl, false);
}
void mesh_mpath_table_grow(struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *oldtbl, *newtbl;
write_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
oldtbl = resize_dereference_mesh_paths(sdata);
newtbl = mesh_table_alloc(oldtbl->size_order + 1);
if (!newtbl)
goto out;
if (mesh_table_grow(oldtbl, newtbl) < 0) {
__mesh_table_free(newtbl);
goto out;
} }
rcu_assign_pointer(sdata->u.mesh.mesh_paths, newtbl); sdata->u.mesh.mesh_paths_generation++;
return new_mpath;
call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
out:
write_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
}
void mesh_mpp_table_grow(struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *oldtbl, *newtbl;
write_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
oldtbl = resize_dereference_mpp_paths(sdata);
newtbl = mesh_table_alloc(oldtbl->size_order + 1);
if (!newtbl)
goto out;
if (mesh_table_grow(oldtbl, newtbl) < 0) {
__mesh_table_free(newtbl);
goto out;
}
rcu_assign_pointer(sdata->u.mesh.mpp_paths, newtbl);
call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
out:
write_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
} }
int mpp_path_add(struct ieee80211_sub_if_data *sdata, int mpp_path_add(struct ieee80211_sub_if_data *sdata,
const u8 *dst, const u8 *mpp) const u8 *dst, const u8 *mpp)
{ {
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
struct mesh_table *tbl; struct mesh_table *tbl;
struct mesh_path *mpath, *new_mpath; struct mesh_path *new_mpath;
struct mpath_node *node, *new_node; int ret;
struct hlist_head *bucket;
int grow = 0;
int err = 0;
u32 hash_idx;
if (ether_addr_equal(dst, sdata->vif.addr)) if (ether_addr_equal(dst, sdata->vif.addr))
/* never add ourselves as neighbours */ /* never add ourselves as neighbours */
...@@ -664,56 +487,19 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, ...@@ -664,56 +487,19 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
if (is_multicast_ether_addr(dst)) if (is_multicast_ether_addr(dst))
return -ENOTSUPP; return -ENOTSUPP;
err = -ENOMEM;
new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
if (!new_mpath)
goto err_path_alloc;
new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); if (!new_mpath)
if (!new_node) return -ENOMEM;
goto err_node_alloc;
memcpy(new_mpath->mpp, mpp, ETH_ALEN); memcpy(new_mpath->mpp, mpp, ETH_ALEN);
new_node->mpath = new_mpath; tbl = sdata->u.mesh.mpp_paths;
read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); ret = rhashtable_lookup_insert_fast(&tbl->rhead,
tbl = resize_dereference_mpp_paths(sdata); &new_mpath->rhash,
mesh_rht_params);
hash_idx = mesh_table_hash(dst, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, bucket, list) {
mpath = node->mpath;
if (ether_addr_equal(dst, mpath->dst))
goto err_exists;
}
hlist_add_head_rcu(&new_node->list, bucket);
if (atomic_inc_return(&tbl->entries) >=
MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
grow = 1;
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
sdata->u.mesh.mpp_paths_generation++; sdata->u.mesh.mpp_paths_generation++;
return ret;
if (grow) {
set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
}
return 0;
err_exists:
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
kfree(new_mpath);
err_path_alloc:
return err;
} }
...@@ -727,17 +513,26 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, ...@@ -727,17 +513,26 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
*/ */
void mesh_plink_broken(struct sta_info *sta) void mesh_plink_broken(struct sta_info *sta)
{ {
struct mesh_table *tbl; struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath; struct mesh_path *mpath;
struct mpath_node *node; struct rhashtable_iter iter;
struct ieee80211_sub_if_data *sdata = sta->sdata; int ret;
int i;
rcu_read_lock(); ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
tbl = rcu_dereference(sdata->u.mesh.mesh_paths); if (ret)
for_each_mesh_entry(tbl, node, i) { return;
mpath = node->mpath;
ret = rhashtable_walk_start(&iter);
if (ret && ret != -EAGAIN)
goto out;
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
if (IS_ERR(mpath))
break;
if (rcu_access_pointer(mpath->next_hop) == sta && if (rcu_access_pointer(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE && mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) { !(mpath->flags & MESH_PATH_FIXED)) {
...@@ -751,30 +546,20 @@ void mesh_plink_broken(struct sta_info *sta) ...@@ -751,30 +546,20 @@ void mesh_plink_broken(struct sta_info *sta)
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
} }
} }
rcu_read_unlock(); out:
} rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
static void mesh_path_node_reclaim(struct rcu_head *rp)
{
struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
del_timer_sync(&node->mpath->timer);
kfree(node->mpath);
kfree(node);
} }
/* needs to be called with the corresponding hashwlock taken */ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
{ {
struct mesh_path *mpath = node->mpath; struct ieee80211_sub_if_data *sdata = mpath->sdata;
struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
spin_lock_bh(&mpath->state_lock); spin_lock_bh(&mpath->state_lock);
mpath->flags |= MESH_PATH_RESOLVING; mpath->flags |= MESH_PATH_RESOLVING;
if (mpath->is_gate)
mesh_gate_del(tbl, mpath); mesh_gate_del(tbl, mpath);
hlist_del_rcu(&node->list); call_rcu(&mpath->rcu, mesh_path_reclaim);
call_rcu(&node->rcu, mesh_path_node_reclaim);
spin_unlock_bh(&mpath->state_lock); spin_unlock_bh(&mpath->state_lock);
atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&sdata->u.mesh.mpaths);
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
...@@ -794,63 +579,87 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) ...@@ -794,63 +579,87 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
void mesh_path_flush_by_nexthop(struct sta_info *sta) void mesh_path_flush_by_nexthop(struct sta_info *sta)
{ {
struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl; struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
struct mesh_path *mpath; struct mesh_path *mpath;
struct mpath_node *node; struct rhashtable_iter iter;
int i; int ret;
rcu_read_lock(); ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); if (ret)
tbl = resize_dereference_mesh_paths(sdata); return;
for_each_mesh_entry(tbl, node, i) {
mpath = node->mpath; ret = rhashtable_walk_start(&iter);
if (rcu_access_pointer(mpath->next_hop) == sta) { if (ret && ret != -EAGAIN)
spin_lock(&tbl->hashwlock[i]); goto out;
__mesh_path_del(tbl, node);
spin_unlock(&tbl->hashwlock[i]); while ((mpath = rhashtable_walk_next(&iter))) {
} if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
if (IS_ERR(mpath))
break;
if (rcu_access_pointer(mpath->next_hop) == sta)
__mesh_path_del(tbl, mpath);
} }
read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); out:
rcu_read_unlock(); rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
} }
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
const u8 *proxy) const u8 *proxy)
{ {
struct mesh_table *tbl; struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
struct mesh_path *mpp; struct mesh_path *mpath;
struct mpath_node *node; struct rhashtable_iter iter;
int i; int ret;
rcu_read_lock(); ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); if (ret)
tbl = resize_dereference_mpp_paths(sdata); return;
for_each_mesh_entry(tbl, node, i) {
mpp = node->mpath; ret = rhashtable_walk_start(&iter);
if (ether_addr_equal(mpp->mpp, proxy)) { if (ret && ret != -EAGAIN)
spin_lock(&tbl->hashwlock[i]); goto out;
__mesh_path_del(tbl, node);
spin_unlock(&tbl->hashwlock[i]); while ((mpath = rhashtable_walk_next(&iter))) {
} if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
if (IS_ERR(mpath))
break;
if (ether_addr_equal(mpath->mpp, proxy))
__mesh_path_del(tbl, mpath);
} }
read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); out:
rcu_read_unlock(); rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
} }
static void table_flush_by_iface(struct mesh_table *tbl, static void table_flush_by_iface(struct mesh_table *tbl)
struct ieee80211_sub_if_data *sdata)
{ {
struct mesh_path *mpath; struct mesh_path *mpath;
struct mpath_node *node; struct rhashtable_iter iter;
int i; int ret;
WARN_ON(!rcu_read_lock_held()); ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
for_each_mesh_entry(tbl, node, i) { if (ret)
mpath = node->mpath; return;
spin_lock_bh(&tbl->hashwlock[i]);
__mesh_path_del(tbl, node); ret = rhashtable_walk_start(&iter);
spin_unlock_bh(&tbl->hashwlock[i]); if (ret && ret != -EAGAIN)
goto out;
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
if (IS_ERR(mpath))
break;
__mesh_path_del(tbl, mpath);
} }
out:
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
} }
/** /**
...@@ -863,16 +672,8 @@ static void table_flush_by_iface(struct mesh_table *tbl, ...@@ -863,16 +672,8 @@ static void table_flush_by_iface(struct mesh_table *tbl,
*/ */
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{ {
struct mesh_table *tbl; table_flush_by_iface(sdata->u.mesh.mesh_paths);
table_flush_by_iface(sdata->u.mesh.mpp_paths);
rcu_read_lock();
read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
tbl = resize_dereference_mesh_paths(sdata);
table_flush_by_iface(tbl, sdata);
tbl = resize_dereference_mpp_paths(sdata);
table_flush_by_iface(tbl, sdata);
read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
rcu_read_unlock();
} }
/** /**
...@@ -884,36 +685,25 @@ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) ...@@ -884,36 +685,25 @@ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
* *
* Returns: 0 if successful * Returns: 0 if successful
*/ */
static int table_path_del(struct mesh_table __rcu *rcu_tbl, static int table_path_del(struct mesh_table *tbl,
struct ieee80211_sub_if_data *sdata, struct ieee80211_sub_if_data *sdata,
const u8 *addr) const u8 *addr)
{ {
struct mesh_table *tbl;
struct mesh_path *mpath; struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_head *bucket; rcu_read_lock();
int hash_idx; mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
int err = 0; if (!mpath) {
rcu_read_unlock();
tbl = resize_dereference_paths(sdata, rcu_tbl); return -ENXIO;
hash_idx = mesh_table_hash(addr, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock(&tbl->hashwlock[hash_idx]);
hlist_for_each_entry(node, bucket, list) {
mpath = node->mpath;
if (ether_addr_equal(addr, mpath->dst)) {
__mesh_path_del(tbl, node);
goto enddel;
}
} }
err = -ENXIO; __mesh_path_del(tbl, mpath);
enddel: rcu_read_unlock();
spin_unlock(&tbl->hashwlock[hash_idx]); return 0;
return err;
} }
/** /**
* mesh_path_del - delete a mesh path from the table * mesh_path_del - delete a mesh path from the table
* *
...@@ -924,36 +714,13 @@ static int table_path_del(struct mesh_table __rcu *rcu_tbl, ...@@ -924,36 +714,13 @@ static int table_path_del(struct mesh_table __rcu *rcu_tbl,
*/ */
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
{ {
int err = 0; int err;
/* flush relevant mpp entries first */ /* flush relevant mpp entries first */
mpp_flush_by_proxy(sdata, addr); mpp_flush_by_proxy(sdata, addr);
read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
sdata->u.mesh.mesh_paths_generation++; sdata->u.mesh.mesh_paths_generation++;
read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
return err;
}
/**
* mpp_path_del - delete a mesh proxy path from the table
*
* @addr: addr address (ETH_ALEN length)
* @sdata: local subif
*
* Returns: 0 if successful
*/
static int mpp_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
{
int err = 0;
read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
err = table_path_del(sdata->u.mesh.mpp_paths, sdata, addr);
sdata->u.mesh.mpp_paths_generation++;
read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
return err; return err;
} }
...@@ -987,18 +754,17 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) ...@@ -987,18 +754,17 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
struct ieee80211_sub_if_data *sdata = mpath->sdata; struct ieee80211_sub_if_data *sdata = mpath->sdata;
struct mesh_table *tbl; struct mesh_table *tbl;
struct mesh_path *from_mpath = mpath; struct mesh_path *from_mpath = mpath;
struct mesh_path *gate = NULL; struct mesh_path *gate;
bool copy = false; bool copy = false;
struct hlist_head *known_gates; struct hlist_head *known_gates;
rcu_read_lock(); tbl = sdata->u.mesh.mesh_paths;
tbl = rcu_dereference(sdata->u.mesh.mesh_paths);
known_gates = tbl->known_gates; known_gates = tbl->known_gates;
rcu_read_unlock();
if (!known_gates) if (!known_gates)
return -EHOSTUNREACH; return -EHOSTUNREACH;
rcu_read_lock();
hlist_for_each_entry_rcu(gate, known_gates, gate_list) { hlist_for_each_entry_rcu(gate, known_gates, gate_list) {
if (gate->flags & MESH_PATH_ACTIVE) { if (gate->flags & MESH_PATH_ACTIVE) {
mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
...@@ -1016,6 +782,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) ...@@ -1016,6 +782,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
mpath_dbg(sdata, "Sending to %pM\n", gate->dst); mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
mesh_path_tx_pending(gate); mesh_path_tx_pending(gate);
} }
rcu_read_unlock();
return (from_mpath == mpath) ? -EHOSTUNREACH : 0; return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
} }
...@@ -1072,118 +839,73 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) ...@@ -1072,118 +839,73 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
mesh_path_tx_pending(mpath); mesh_path_tx_pending(mpath);
} }
static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
{
struct mesh_path *mpath;
struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
mpath = node->mpath;
hlist_del_rcu(p);
if (free_leafs) {
del_timer_sync(&mpath->timer);
kfree(mpath);
}
kfree(node);
}
static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
{
struct mesh_path *mpath;
struct mpath_node *node, *new_node;
u32 hash_idx;
new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (new_node == NULL)
return -ENOMEM;
node = hlist_entry(p, struct mpath_node, list);
mpath = node->mpath;
new_node->mpath = mpath;
hash_idx = mesh_table_hash(mpath->dst, newtbl);
hlist_add_head(&new_node->list,
&newtbl->hash_buckets[hash_idx]);
return 0;
}
int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
{ {
struct mesh_table *tbl_path, *tbl_mpp; struct mesh_table *tbl_path, *tbl_mpp;
int ret; int ret;
tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); tbl_path = mesh_table_alloc();
if (!tbl_path) if (!tbl_path)
return -ENOMEM; return -ENOMEM;
tbl_path->free_node = &mesh_path_node_free;
tbl_path->copy_node = &mesh_path_node_copy;
tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
if (!tbl_path->known_gates) {
ret = -ENOMEM;
goto free_path;
}
INIT_HLIST_HEAD(tbl_path->known_gates);
tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); tbl_mpp = mesh_table_alloc();
if (!tbl_mpp) { if (!tbl_mpp) {
ret = -ENOMEM; ret = -ENOMEM;
goto free_path; goto free_path;
} }
tbl_mpp->free_node = &mesh_path_node_free;
tbl_mpp->copy_node = &mesh_path_node_copy;
tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
if (!tbl_mpp->known_gates) {
ret = -ENOMEM;
goto free_mpp;
}
INIT_HLIST_HEAD(tbl_mpp->known_gates);
rwlock_init(&sdata->u.mesh.pathtbl_resize_lock); rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
/* Need no locking since this is during init */ sdata->u.mesh.mesh_paths = tbl_path;
RCU_INIT_POINTER(sdata->u.mesh.mesh_paths, tbl_path); sdata->u.mesh.mpp_paths = tbl_mpp;
RCU_INIT_POINTER(sdata->u.mesh.mpp_paths, tbl_mpp);
return 0; return 0;
free_mpp:
mesh_table_free(tbl_mpp, true);
free_path: free_path:
mesh_table_free(tbl_path, true); mesh_table_free(tbl_path);
return ret; return ret;
} }
void mesh_path_expire(struct ieee80211_sub_if_data *sdata) static
void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl)
{ {
struct mesh_table *tbl;
struct mesh_path *mpath; struct mesh_path *mpath;
struct mpath_node *node; struct rhashtable_iter iter;
int i; int ret;
rcu_read_lock(); ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
tbl = rcu_dereference(sdata->u.mesh.mesh_paths); if (ret)
for_each_mesh_entry(tbl, node, i) { return;
mpath = node->mpath;
ret = rhashtable_walk_start(&iter);
if (ret && ret != -EAGAIN)
goto out;
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
if (IS_ERR(mpath))
break;
if ((!(mpath->flags & MESH_PATH_RESOLVING)) && if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) && (!(mpath->flags & MESH_PATH_FIXED)) &&
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
mesh_path_del(sdata, mpath->dst); __mesh_path_del(tbl, mpath);
}
tbl = rcu_dereference(sdata->u.mesh.mpp_paths);
for_each_mesh_entry(tbl, node, i) {
mpath = node->mpath;
if ((!(mpath->flags & MESH_PATH_FIXED)) &&
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
mpp_path_del(sdata, mpath->dst);
} }
out:
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
}
rcu_read_unlock(); void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
{
mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
} }
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
{ {
/* no need for locking during exit path */ mesh_table_free(sdata->u.mesh.mesh_paths);
mesh_table_free(rcu_dereference_protected(sdata->u.mesh.mesh_paths, 1), mesh_table_free(sdata->u.mesh.mpp_paths);
true);
mesh_table_free(rcu_dereference_protected(sdata->u.mesh.mpp_paths, 1),
true);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment