Commit c0eebfa3 authored by David S. Miller's avatar David S. Miller

Merge branch 'rhashtable'

Daniel Borkmann says:

====================
rhashtable updates

As discussed, I'm sending out rhashtable fixups for -net.

I have a couple of more patches I was working on last week pending,
i.e. to get rid of ht->nelems and ht->shift atomic operations which
speed-up pure insertions/deletions, e.g. on my laptop I have 2 threads,
inserting 7M entries each, that will reduce insertion time from ~1,450 ms
to 865 ms (performance should even be better after removing the
grow/shrink indirections). I guess that however is rather something
for net-next.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0d79a493 4c4b52d9
...@@ -79,12 +79,6 @@ struct rhashtable; ...@@ -79,12 +79,6 @@ struct rhashtable;
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128) * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
* @hashfn: Function to hash key * @hashfn: Function to hash key
* @obj_hashfn: Function to hash object * @obj_hashfn: Function to hash object
* @grow_decision: If defined, may return true if table should expand
* @shrink_decision: If defined, may return true if table should shrink
*
* Note: when implementing the grow and shrink decision function, min/max
* shift must be enforced, otherwise, resizing watermarks they set may be
* useless.
*/ */
struct rhashtable_params { struct rhashtable_params {
size_t nelem_hint; size_t nelem_hint;
...@@ -98,10 +92,6 @@ struct rhashtable_params { ...@@ -98,10 +92,6 @@ struct rhashtable_params {
size_t locks_mul; size_t locks_mul;
rht_hashfn_t hashfn; rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn; rht_obj_hashfn_t obj_hashfn;
bool (*grow_decision)(const struct rhashtable *ht,
size_t new_size);
bool (*shrink_decision)(const struct rhashtable *ht,
size_t new_size);
}; };
/** /**
...@@ -193,9 +183,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); ...@@ -193,9 +183,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
int rhashtable_expand(struct rhashtable *ht); int rhashtable_expand(struct rhashtable *ht);
int rhashtable_shrink(struct rhashtable *ht); int rhashtable_shrink(struct rhashtable *ht);
......
...@@ -247,26 +247,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, ...@@ -247,26 +247,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
* @ht: hash table * @ht: hash table
* @new_size: new table size * @new_size: new table size
*/ */
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
{ {
/* Expand table when exceeding 75% load */ /* Expand table when exceeding 75% load */
return atomic_read(&ht->nelems) > (new_size / 4 * 3) && return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
(ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
} }
EXPORT_SYMBOL_GPL(rht_grow_above_75);
/** /**
* rht_shrink_below_30 - returns true if nelems < 0.3 * table-size * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
* @ht: hash table * @ht: hash table
* @new_size: new table size * @new_size: new table size
*/ */
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
{ {
/* Shrink table beneath 30% load */ /* Shrink table beneath 30% load */
return atomic_read(&ht->nelems) < (new_size * 3 / 10) && return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
(atomic_read(&ht->shift) > ht->p.min_shift); (atomic_read(&ht->shift) > ht->p.min_shift);
} }
EXPORT_SYMBOL_GPL(rht_shrink_below_30);
static void lock_buckets(struct bucket_table *new_tbl, static void lock_buckets(struct bucket_table *new_tbl,
struct bucket_table *old_tbl, unsigned int hash) struct bucket_table *old_tbl, unsigned int hash)
...@@ -528,40 +526,19 @@ static void rht_deferred_worker(struct work_struct *work) ...@@ -528,40 +526,19 @@ static void rht_deferred_worker(struct work_struct *work)
list_for_each_entry(walker, &ht->walkers, list) list_for_each_entry(walker, &ht->walkers, list)
walker->resize = true; walker->resize = true;
if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) if (rht_grow_above_75(ht, tbl->size))
rhashtable_expand(ht); rhashtable_expand(ht);
else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) else if (rht_shrink_below_30(ht, tbl->size))
rhashtable_shrink(ht); rhashtable_shrink(ht);
unlock: unlock:
mutex_unlock(&ht->mutex); mutex_unlock(&ht->mutex);
} }
static void rhashtable_probe_expand(struct rhashtable *ht)
{
const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
/* Only adjust the table if no resizing is currently in progress. */
if (tbl == new_tbl && ht->p.grow_decision &&
ht->p.grow_decision(ht, tbl->size))
schedule_work(&ht->run_work);
}
static void rhashtable_probe_shrink(struct rhashtable *ht)
{
const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
/* Only adjust the table if no resizing is currently in progress. */
if (tbl == new_tbl && ht->p.shrink_decision &&
ht->p.shrink_decision(ht, tbl->size))
schedule_work(&ht->run_work);
}
static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
struct bucket_table *tbl, u32 hash) struct bucket_table *tbl,
const struct bucket_table *old_tbl, u32 hash)
{ {
bool no_resize_running = tbl == old_tbl;
struct rhash_head *head; struct rhash_head *head;
hash = rht_bucket_index(tbl, hash); hash = rht_bucket_index(tbl, hash);
...@@ -577,8 +554,8 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, ...@@ -577,8 +554,8 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
rcu_assign_pointer(tbl->buckets[hash], obj); rcu_assign_pointer(tbl->buckets[hash], obj);
atomic_inc(&ht->nelems); atomic_inc(&ht->nelems);
if (no_resize_running && rht_grow_above_75(ht, tbl->size))
rhashtable_probe_expand(ht); schedule_work(&ht->run_work);
} }
/** /**
...@@ -608,7 +585,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) ...@@ -608,7 +585,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
lock_buckets(tbl, old_tbl, hash); lock_buckets(tbl, old_tbl, hash);
__rhashtable_insert(ht, obj, tbl, hash); __rhashtable_insert(ht, obj, tbl, old_tbl, hash);
unlock_buckets(tbl, old_tbl, hash); unlock_buckets(tbl, old_tbl, hash);
rcu_read_unlock(); rcu_read_unlock();
...@@ -690,8 +667,11 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) ...@@ -690,8 +667,11 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
unlock_buckets(new_tbl, old_tbl, new_hash); unlock_buckets(new_tbl, old_tbl, new_hash);
if (ret) { if (ret) {
bool no_resize_running = new_tbl == old_tbl;
atomic_dec(&ht->nelems); atomic_dec(&ht->nelems);
rhashtable_probe_shrink(ht); if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size))
schedule_work(&ht->run_work);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -861,7 +841,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht, ...@@ -861,7 +841,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
goto exit; goto exit;
} }
__rhashtable_insert(ht, obj, new_tbl, new_hash); __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash);
exit: exit:
unlock_buckets(new_tbl, old_tbl, new_hash); unlock_buckets(new_tbl, old_tbl, new_hash);
...@@ -1123,7 +1103,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) ...@@ -1123,7 +1103,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
if (!ht->p.hash_rnd) if (!ht->p.hash_rnd)
get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
if (ht->p.grow_decision || ht->p.shrink_decision)
INIT_WORK(&ht->run_work, rht_deferred_worker); INIT_WORK(&ht->run_work, rht_deferred_worker);
return 0; return 0;
...@@ -1142,7 +1121,6 @@ void rhashtable_destroy(struct rhashtable *ht) ...@@ -1142,7 +1121,6 @@ void rhashtable_destroy(struct rhashtable *ht)
{ {
ht->being_destroyed = true; ht->being_destroyed = true;
if (ht->p.grow_decision || ht->p.shrink_decision)
cancel_work_sync(&ht->run_work); cancel_work_sync(&ht->run_work);
mutex_lock(&ht->mutex); mutex_lock(&ht->mutex);
......
...@@ -201,9 +201,8 @@ static int __init test_rht_init(void) ...@@ -201,9 +201,8 @@ static int __init test_rht_init(void)
.key_offset = offsetof(struct test_obj, value), .key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(int), .key_len = sizeof(int),
.hashfn = jhash, .hashfn = jhash,
.max_shift = 1, /* we expand/shrink manually here */
.nulls_base = (3U << RHT_BASE_SHIFT), .nulls_base = (3U << RHT_BASE_SHIFT),
.grow_decision = rht_grow_above_75,
.shrink_decision = rht_shrink_below_30,
}; };
int err; int err;
......
...@@ -192,8 +192,6 @@ static int nft_hash_init(const struct nft_set *set, ...@@ -192,8 +192,6 @@ static int nft_hash_init(const struct nft_set *set,
.key_offset = offsetof(struct nft_hash_elem, key), .key_offset = offsetof(struct nft_hash_elem, key),
.key_len = set->klen, .key_len = set->klen,
.hashfn = jhash, .hashfn = jhash,
.grow_decision = rht_grow_above_75,
.shrink_decision = rht_shrink_below_30,
}; };
return rhashtable_init(priv, &params); return rhashtable_init(priv, &params);
......
...@@ -3126,8 +3126,6 @@ static int __init netlink_proto_init(void) ...@@ -3126,8 +3126,6 @@ static int __init netlink_proto_init(void)
.key_len = sizeof(u32), /* portid */ .key_len = sizeof(u32), /* portid */
.hashfn = jhash, .hashfn = jhash,
.max_shift = 16, /* 64K */ .max_shift = 16, /* 64K */
.grow_decision = rht_grow_above_75,
.shrink_decision = rht_shrink_below_30,
}; };
if (err != 0) if (err != 0)
......
...@@ -2364,8 +2364,6 @@ int tipc_sk_rht_init(struct net *net) ...@@ -2364,8 +2364,6 @@ int tipc_sk_rht_init(struct net *net)
.hashfn = jhash, .hashfn = jhash,
.max_shift = 20, /* 1M */ .max_shift = 20, /* 1M */
.min_shift = 8, /* 256 */ .min_shift = 8, /* 256 */
.grow_decision = rht_grow_above_75,
.shrink_decision = rht_shrink_below_30,
}; };
return rhashtable_init(&tn->sk_rht, &rht_params); return rhashtable_init(&tn->sk_rht, &rht_params);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment