Commit db304854 authored by Ying Xue's avatar Ying Xue Committed by David S. Miller

rhashtable: involve rhashtable_lookup_insert routine

Involve a new function called rhashtable_lookup_insert() which makes
lookup and insertion atomic under bucket lock protection, helping us
avoid to introduce an extra lock when we search and insert an object
into hash table.
Signed-off-by: default avatarYing Xue <ying.xue@windriver.com>
Signed-off-by: default avatarThomas Graf <tgraf@suug.ch>
Acked-by: default avatarThomas Graf <tgraf@suug.ch>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 54c5b7d3
...@@ -168,6 +168,7 @@ int rhashtable_shrink(struct rhashtable *ht); ...@@ -168,6 +168,7 @@ int rhashtable_shrink(struct rhashtable *ht);
void *rhashtable_lookup(struct rhashtable *ht, const void *key); void *rhashtable_lookup(struct rhashtable *ht, const void *key);
void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
bool (*compare)(void *, void *), void *arg); bool (*compare)(void *, void *), void *arg);
bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
void rhashtable_destroy(struct rhashtable *ht); void rhashtable_destroy(struct rhashtable *ht);
......
...@@ -505,8 +505,26 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht) ...@@ -505,8 +505,26 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht)
schedule_delayed_work(&ht->run_work, 0); schedule_delayed_work(&ht->run_work, 0);
} }
static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
struct bucket_table *tbl, u32 hash)
{
struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash],
tbl, hash);
if (rht_is_a_nulls(head))
INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
else
RCU_INIT_POINTER(obj->next, head);
rcu_assign_pointer(tbl->buckets[hash], obj);
atomic_inc(&ht->nelems);
rhashtable_wakeup_worker(ht);
}
/** /**
* rhashtable_insert - insert object into hash hash table * rhashtable_insert - insert object into hash table
* @ht: hash table * @ht: hash table
* @obj: pointer to hash head inside object * @obj: pointer to hash head inside object
* *
...@@ -523,7 +541,6 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht) ...@@ -523,7 +541,6 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht)
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
{ {
struct bucket_table *tbl; struct bucket_table *tbl;
struct rhash_head *head;
spinlock_t *lock; spinlock_t *lock;
unsigned hash; unsigned hash;
...@@ -534,19 +551,9 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) ...@@ -534,19 +551,9 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
lock = bucket_lock(tbl, hash); lock = bucket_lock(tbl, hash);
spin_lock_bh(lock); spin_lock_bh(lock);
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); __rhashtable_insert(ht, obj, tbl, hash);
if (rht_is_a_nulls(head))
INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
else
RCU_INIT_POINTER(obj->next, head);
rcu_assign_pointer(tbl->buckets[hash], obj);
spin_unlock_bh(lock); spin_unlock_bh(lock);
atomic_inc(&ht->nelems);
rhashtable_wakeup_worker(ht);
rcu_read_unlock(); rcu_read_unlock();
} }
EXPORT_SYMBOL_GPL(rhashtable_insert); EXPORT_SYMBOL_GPL(rhashtable_insert);
...@@ -560,7 +567,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); ...@@ -560,7 +567,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert);
* walk the bucket chain upon removal. The removal operation is thus * walk the bucket chain upon removal. The removal operation is thus
* considerable slow if the hash table is not correctly sized. * considerable slow if the hash table is not correctly sized.
* *
* Will automatically shrink the table via rhashtable_expand() if the the * Will automatically shrink the table via rhashtable_expand() if the
* shrink_decision function specified at rhashtable_init() returns true. * shrink_decision function specified at rhashtable_init() returns true.
* *
* The caller must ensure that no concurrent table mutations occur. It is * The caller must ensure that no concurrent table mutations occur. It is
...@@ -641,7 +648,7 @@ static bool rhashtable_compare(void *ptr, void *arg) ...@@ -641,7 +648,7 @@ static bool rhashtable_compare(void *ptr, void *arg)
* for a entry with an identical key. The first matching entry is returned. * for a entry with an identical key. The first matching entry is returned.
* *
* This lookup function may only be used for fixed key hash table (key_len * This lookup function may only be used for fixed key hash table (key_len
* paramter set). It will BUG() if used inappropriately. * parameter set). It will BUG() if used inappropriately.
* *
* Lookups may occur in parallel with hashtable mutations and resizing. * Lookups may occur in parallel with hashtable mutations and resizing.
*/ */
...@@ -702,6 +709,66 @@ void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, ...@@ -702,6 +709,66 @@ void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
} }
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
/**
* rhashtable_lookup_insert - lookup and insert object into hash table
* @ht: hash table
* @obj: pointer to hash head inside object
*
* Locks down the bucket chain in both the old and new table if a resize
* is in progress to ensure that writers can't remove from the old table
* and can't insert to the new table during the atomic operation of search
* and insertion. Searches for duplicates in both the old and new table if
* a resize is in progress.
*
* This lookup function may only be used for fixed key hash table (key_len
* parameter set). It will BUG() if used inappropriately.
*
* It is safe to call this function from atomic context.
*
* Will trigger an automatic deferred table resizing if the size grows
* beyond the watermark indicated by grow_decision() which can be passed
* to rhashtable_init().
*/
bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
{
struct bucket_table *new_tbl, *old_tbl;
spinlock_t *new_bucket_lock, *old_bucket_lock;
u32 new_hash, old_hash;
bool success = true;
BUG_ON(!ht->p.key_len);
rcu_read_lock();
old_tbl = rht_dereference_rcu(ht->tbl, ht);
old_hash = head_hashfn(ht, old_tbl, obj);
old_bucket_lock = bucket_lock(old_tbl, old_hash);
spin_lock_bh(old_bucket_lock);
new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
new_hash = head_hashfn(ht, new_tbl, obj);
new_bucket_lock = bucket_lock(new_tbl, new_hash);
if (unlikely(old_tbl != new_tbl))
spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
if (rhashtable_lookup(ht, rht_obj(ht, obj) + ht->p.key_offset)) {
success = false;
goto exit;
}
__rhashtable_insert(ht, obj, new_tbl, new_hash);
exit:
if (unlikely(old_tbl != new_tbl))
spin_unlock_bh(new_bucket_lock);
spin_unlock_bh(old_bucket_lock);
rcu_read_unlock();
return success;
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
static size_t rounded_hashtable_size(struct rhashtable_params *params) static size_t rounded_hashtable_size(struct rhashtable_params *params)
{ {
return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment