Commit b824478b authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller

rhashtable: Add multiple rehash support

This patch adds the missing bits to allow multiple rehashes.  The
read-side as well as remove already handle this correctly.  So it's
only the rehasher and insertion that need modification to handle
this.

Note that this patch doesn't actually enable it so for now rehashing
is still only performed by the worker thread.

This patch also disables the explicit expand/shrink interface because
the table is meant to expand and shrink automatically, and continuing
to export these interfaces unnecessarily complicates the life of the
rehasher since the rehash process is now composed of two parts.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Acked-by: default avatarThomas Graf <tgraf@suug.ch>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 18093d1c
...@@ -308,9 +308,6 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key, ...@@ -308,9 +308,6 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
struct rhash_head *obj, struct rhash_head *obj,
struct bucket_table *old_tbl); struct bucket_table *old_tbl);
int rhashtable_expand(struct rhashtable *ht);
int rhashtable_shrink(struct rhashtable *ht);
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter);
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
...@@ -541,17 +538,22 @@ static inline int __rhashtable_insert_fast( ...@@ -541,17 +538,22 @@ static inline int __rhashtable_insert_fast(
rcu_read_lock(); rcu_read_lock();
tbl = rht_dereference_rcu(ht->tbl, ht); tbl = rht_dereference_rcu(ht->tbl, ht);
hash = rht_head_hashfn(ht, tbl, obj, params);
lock = rht_bucket_lock(tbl, hash);
spin_lock_bh(lock);
/* Because we have already taken the bucket lock in tbl, /* All insertions must grab the oldest table containing
* if we find that future_tbl is not yet visible then * the hashed bucket that is yet to be rehashed.
* that guarantees all other insertions of the same entry
* will also grab the bucket lock in tbl because until
* the rehash completes ht->tbl won't be changed.
*/ */
for (;;) {
hash = rht_head_hashfn(ht, tbl, obj, params);
lock = rht_bucket_lock(tbl, hash);
spin_lock_bh(lock);
if (tbl->rehash <= hash)
break;
spin_unlock_bh(lock);
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
}
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(new_tbl)) { if (unlikely(new_tbl)) {
err = rhashtable_insert_slow(ht, key, obj, new_tbl); err = rhashtable_insert_slow(ht, key, obj, new_tbl);
......
...@@ -136,11 +136,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, ...@@ -136,11 +136,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
return tbl; return tbl;
} }
static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
struct bucket_table *tbl)
{
struct bucket_table *new_tbl;
do {
new_tbl = tbl;
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
} while (tbl);
return new_tbl;
}
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
{ {
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
struct bucket_table *new_tbl = struct bucket_table *new_tbl = rhashtable_last_table(ht,
rht_dereference(old_tbl->future_tbl, ht) ?: old_tbl; rht_dereference_rcu(old_tbl->future_tbl, ht));
struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
int err = -ENOENT; int err = -ENOENT;
struct rhash_head *head, *next, *entry; struct rhash_head *head, *next, *entry;
...@@ -196,12 +209,18 @@ static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash) ...@@ -196,12 +209,18 @@ static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
spin_unlock_bh(old_bucket_lock); spin_unlock_bh(old_bucket_lock);
} }
static void rhashtable_rehash(struct rhashtable *ht, static int rhashtable_rehash_attach(struct rhashtable *ht,
struct bucket_table *new_tbl) struct bucket_table *old_tbl,
struct bucket_table *new_tbl)
{ {
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); /* Protect future_tbl using the first bucket lock. */
struct rhashtable_walker *walker; spin_lock_bh(old_tbl->locks);
unsigned old_hash;
/* Did somebody beat us to it? */
if (rcu_access_pointer(old_tbl->future_tbl)) {
spin_unlock_bh(old_tbl->locks);
return -EEXIST;
}
/* Make insertions go into the new, empty table right away. Deletions /* Make insertions go into the new, empty table right away. Deletions
* and lookups will be attempted in both tables until we synchronize. * and lookups will be attempted in both tables until we synchronize.
...@@ -211,6 +230,22 @@ static void rhashtable_rehash(struct rhashtable *ht, ...@@ -211,6 +230,22 @@ static void rhashtable_rehash(struct rhashtable *ht,
/* Ensure the new table is visible to readers. */ /* Ensure the new table is visible to readers. */
smp_wmb(); smp_wmb();
spin_unlock_bh(old_tbl->locks);
return 0;
}
static int rhashtable_rehash_table(struct rhashtable *ht)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
struct bucket_table *new_tbl;
struct rhashtable_walker *walker;
unsigned old_hash;
new_tbl = rht_dereference(old_tbl->future_tbl, ht);
if (!new_tbl)
return 0;
for (old_hash = 0; old_hash < old_tbl->size; old_hash++) for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
rhashtable_rehash_chain(ht, old_hash); rhashtable_rehash_chain(ht, old_hash);
...@@ -225,6 +260,8 @@ static void rhashtable_rehash(struct rhashtable *ht, ...@@ -225,6 +260,8 @@ static void rhashtable_rehash(struct rhashtable *ht,
* remain. * remain.
*/ */
call_rcu(&old_tbl->rcu, bucket_table_free_rcu); call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
} }
/** /**
...@@ -242,20 +279,25 @@ static void rhashtable_rehash(struct rhashtable *ht, ...@@ -242,20 +279,25 @@ static void rhashtable_rehash(struct rhashtable *ht,
* It is valid to have concurrent insertions and deletions protected by per * It is valid to have concurrent insertions and deletions protected by per
* bucket locks or concurrent RCU protected lookups and traversals. * bucket locks or concurrent RCU protected lookups and traversals.
*/ */
int rhashtable_expand(struct rhashtable *ht) static int rhashtable_expand(struct rhashtable *ht)
{ {
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
int err;
ASSERT_RHT_MUTEX(ht); ASSERT_RHT_MUTEX(ht);
old_tbl = rhashtable_last_table(ht, old_tbl);
new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
if (new_tbl == NULL) if (new_tbl == NULL)
return -ENOMEM; return -ENOMEM;
rhashtable_rehash(ht, new_tbl); err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
return 0; if (err)
bucket_table_free(new_tbl);
return err;
} }
EXPORT_SYMBOL_GPL(rhashtable_expand);
/** /**
* rhashtable_shrink - Shrink hash table while allowing concurrent lookups * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
...@@ -273,10 +315,11 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); ...@@ -273,10 +315,11 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
* It is valid to have concurrent insertions and deletions protected by per * It is valid to have concurrent insertions and deletions protected by per
* bucket locks or concurrent RCU protected lookups and traversals. * bucket locks or concurrent RCU protected lookups and traversals.
*/ */
int rhashtable_shrink(struct rhashtable *ht) static int rhashtable_shrink(struct rhashtable *ht)
{ {
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
int err;
ASSERT_RHT_MUTEX(ht); ASSERT_RHT_MUTEX(ht);
...@@ -286,19 +329,25 @@ int rhashtable_shrink(struct rhashtable *ht) ...@@ -286,19 +329,25 @@ int rhashtable_shrink(struct rhashtable *ht)
if (old_tbl->size <= size) if (old_tbl->size <= size)
return 0; return 0;
if (rht_dereference(old_tbl->future_tbl, ht))
return -EEXIST;
new_tbl = bucket_table_alloc(ht, size); new_tbl = bucket_table_alloc(ht, size);
if (new_tbl == NULL) if (new_tbl == NULL)
return -ENOMEM; return -ENOMEM;
rhashtable_rehash(ht, new_tbl); err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
return 0; if (err)
bucket_table_free(new_tbl);
return err;
} }
EXPORT_SYMBOL_GPL(rhashtable_shrink);
static void rht_deferred_worker(struct work_struct *work) static void rht_deferred_worker(struct work_struct *work)
{ {
struct rhashtable *ht; struct rhashtable *ht;
struct bucket_table *tbl; struct bucket_table *tbl;
int err = 0;
ht = container_of(work, struct rhashtable, run_work); ht = container_of(work, struct rhashtable, run_work);
mutex_lock(&ht->mutex); mutex_lock(&ht->mutex);
...@@ -306,13 +355,20 @@ static void rht_deferred_worker(struct work_struct *work) ...@@ -306,13 +355,20 @@ static void rht_deferred_worker(struct work_struct *work)
goto unlock; goto unlock;
tbl = rht_dereference(ht->tbl, ht); tbl = rht_dereference(ht->tbl, ht);
tbl = rhashtable_last_table(ht, tbl);
if (rht_grow_above_75(ht, tbl)) if (rht_grow_above_75(ht, tbl))
rhashtable_expand(ht); rhashtable_expand(ht);
else if (rht_shrink_below_30(ht, tbl)) else if (rht_shrink_below_30(ht, tbl))
rhashtable_shrink(ht); rhashtable_shrink(ht);
err = rhashtable_rehash_table(ht);
unlock: unlock:
mutex_unlock(&ht->mutex); mutex_unlock(&ht->mutex);
if (err)
schedule_work(&ht->run_work);
} }
int rhashtable_insert_slow(struct rhashtable *ht, const void *key, int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
...@@ -323,6 +379,7 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key, ...@@ -323,6 +379,7 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
unsigned hash; unsigned hash;
int err = -EEXIST; int err = -EEXIST;
tbl = rhashtable_last_table(ht, tbl);
hash = head_hashfn(ht, tbl, obj); hash = head_hashfn(ht, tbl, obj);
spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
......
...@@ -155,30 +155,6 @@ static int __init test_rhashtable(struct rhashtable *ht) ...@@ -155,30 +155,6 @@ static int __init test_rhashtable(struct rhashtable *ht)
test_rht_lookup(ht); test_rht_lookup(ht);
rcu_read_unlock(); rcu_read_unlock();
for (i = 0; i < TEST_NEXPANDS; i++) {
pr_info(" Table expansion iteration %u...\n", i);
mutex_lock(&ht->mutex);
rhashtable_expand(ht);
mutex_unlock(&ht->mutex);
rcu_read_lock();
pr_info(" Verifying lookups...\n");
test_rht_lookup(ht);
rcu_read_unlock();
}
for (i = 0; i < TEST_NEXPANDS; i++) {
pr_info(" Table shrinkage iteration %u...\n", i);
mutex_lock(&ht->mutex);
rhashtable_shrink(ht);
mutex_unlock(&ht->mutex);
rcu_read_lock();
pr_info(" Verifying lookups...\n");
test_rht_lookup(ht);
rcu_read_unlock();
}
rcu_read_lock(); rcu_read_lock();
test_bucket_stats(ht, true); test_bucket_stats(ht, true);
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment