Commit c0690016 authored by NeilBrown's avatar NeilBrown Committed by David S. Miller

rhashtable: clean up dereference of ->future_tbl.

Using rht_dereference_bucket() to dereference
->future_tbl looks like a type error, and could be confusing.
Using rht_dereference_rcu() to test a pointer for NULL
adds an unnecessary barrier - rcu_access_pointer() is preferred
for NULL tests when no lock is held.

This uses 3 different ways to access ->future_tbl.
- if we know the mutex is held, use rht_dereference()
- if we don't hold the mutex, and are only testing for NULL,
  use rcu_access_pointer()
- otherwise (using RCU protection for true dereference),
  use rht_dereference_rcu().

Note that this includes a simplification of the call to
rhashtable_last_table() - we don't do an extra dereference
before the call any more.
Acked-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarNeilBrown <neilb@suse.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0ad66449
...@@ -595,7 +595,7 @@ static inline void *__rhashtable_insert_fast( ...@@ -595,7 +595,7 @@ static inline void *__rhashtable_insert_fast(
lock = rht_bucket_lock(tbl, hash); lock = rht_bucket_lock(tbl, hash);
spin_lock_bh(lock); spin_lock_bh(lock);
if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) { if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
slow_path: slow_path:
spin_unlock_bh(lock); spin_unlock_bh(lock);
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -226,8 +226,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, ...@@ -226,8 +226,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
{ {
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
struct bucket_table *new_tbl = rhashtable_last_table(ht, struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
rht_dereference_rcu(old_tbl->future_tbl, ht));
struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
int err = -EAGAIN; int err = -EAGAIN;
struct rhash_head *head, *next, *entry; struct rhash_head *head, *next, *entry;
...@@ -467,7 +466,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht, ...@@ -467,7 +466,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
fail: fail:
/* Do not fail the insert if someone else did a rehash. */ /* Do not fail the insert if someone else did a rehash. */
if (likely(rcu_dereference_raw(tbl->future_tbl))) if (likely(rcu_access_pointer(tbl->future_tbl)))
return 0; return 0;
/* Schedule async rehash to retry allocation in process context. */ /* Schedule async rehash to retry allocation in process context. */
...@@ -540,7 +539,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, ...@@ -540,7 +539,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
return ERR_CAST(data); return ERR_CAST(data);
new_tbl = rcu_dereference(tbl->future_tbl); new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (new_tbl) if (new_tbl)
return new_tbl; return new_tbl;
...@@ -599,7 +598,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, ...@@ -599,7 +598,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
break; break;
spin_unlock_bh(lock); spin_unlock_bh(lock);
tbl = rcu_dereference(tbl->future_tbl); tbl = rht_dereference_rcu(tbl->future_tbl, ht);
} }
data = rhashtable_lookup_one(ht, tbl, hash, key, obj); data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment