Commit b65885d2 authored by David S. Miller's avatar David S. Miller

Merge branch 'rhashtable_remove_shift'

Herbert Xu says:

====================
rhashtable: Kill redundant shift parameter

I was trying to squeeze bucket_table->rehash in by downsizing
bucket_table->size, only to find that my spot had been taken
over by bucket_table->shift.  These patches kill shift and makes
me feel better :)

v2 corrects the typo in the test_rhashtable changelog and also
notes the min_shift parameter in the tipc patch changelog.
====================
Acked-by: default avatarThomas Graf <tgraf@suug.ch>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a61bfa65 e2e21c1c
...@@ -51,7 +51,6 @@ struct rhash_head { ...@@ -51,7 +51,6 @@ struct rhash_head {
* @size: Number of hash buckets * @size: Number of hash buckets
* @rehash: Current bucket being rehashed * @rehash: Current bucket being rehashed
* @hash_rnd: Random seed to fold into hash * @hash_rnd: Random seed to fold into hash
* @shift: Current size (1 << shift)
* @locks_mask: Mask to apply before accessing locks[] * @locks_mask: Mask to apply before accessing locks[]
* @locks: Array of spinlocks protecting individual buckets * @locks: Array of spinlocks protecting individual buckets
* @walkers: List of active walkers * @walkers: List of active walkers
...@@ -63,7 +62,6 @@ struct bucket_table { ...@@ -63,7 +62,6 @@ struct bucket_table {
unsigned int size; unsigned int size;
unsigned int rehash; unsigned int rehash;
u32 hash_rnd; u32 hash_rnd;
u32 shift;
unsigned int locks_mask; unsigned int locks_mask;
spinlock_t *locks; spinlock_t *locks;
struct list_head walkers; struct list_head walkers;
...@@ -85,8 +83,8 @@ struct rhashtable; ...@@ -85,8 +83,8 @@ struct rhashtable;
* @key_len: Length of key * @key_len: Length of key
* @key_offset: Offset of key in struct to be hashed * @key_offset: Offset of key in struct to be hashed
* @head_offset: Offset of rhash_head in struct to be hashed * @head_offset: Offset of rhash_head in struct to be hashed
* @max_shift: Maximum number of shifts while expanding * @max_size: Maximum size while expanding
* @min_shift: Minimum number of shifts while shrinking * @min_size: Minimum size while shrinking
* @nulls_base: Base value to generate nulls marker * @nulls_base: Base value to generate nulls marker
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128) * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
* @hashfn: Function to hash key * @hashfn: Function to hash key
...@@ -97,8 +95,8 @@ struct rhashtable_params { ...@@ -97,8 +95,8 @@ struct rhashtable_params {
size_t key_len; size_t key_len;
size_t key_offset; size_t key_offset;
size_t head_offset; size_t head_offset;
size_t max_shift; unsigned int max_size;
size_t min_shift; unsigned int min_size;
u32 nulls_base; u32 nulls_base;
size_t locks_mul; size_t locks_mul;
rht_hashfn_t hashfn; rht_hashfn_t hashfn;
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include <linux/err.h> #include <linux/err.h>
#define HASH_DEFAULT_SIZE 64UL #define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4UL #define HASH_MIN_SIZE 4U
#define BUCKET_LOCKS_PER_CPU 128UL #define BUCKET_LOCKS_PER_CPU 128UL
/* Base bits plus 1 bit for nulls marker */ /* Base bits plus 1 bit for nulls marker */
...@@ -162,7 +162,6 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, ...@@ -162,7 +162,6 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
return NULL; return NULL;
tbl->size = nbuckets; tbl->size = nbuckets;
tbl->shift = ilog2(nbuckets);
if (alloc_bucket_locks(ht, tbl) < 0) { if (alloc_bucket_locks(ht, tbl) < 0) {
bucket_table_free(tbl); bucket_table_free(tbl);
...@@ -189,7 +188,7 @@ static bool rht_grow_above_75(const struct rhashtable *ht, ...@@ -189,7 +188,7 @@ static bool rht_grow_above_75(const struct rhashtable *ht,
{ {
/* Expand table when exceeding 75% load */ /* Expand table when exceeding 75% load */
return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
(!ht->p.max_shift || tbl->shift < ht->p.max_shift); (!ht->p.max_size || tbl->size < ht->p.max_size);
} }
/** /**
...@@ -202,7 +201,7 @@ static bool rht_shrink_below_30(const struct rhashtable *ht, ...@@ -202,7 +201,7 @@ static bool rht_shrink_below_30(const struct rhashtable *ht,
{ {
/* Shrink table beneath 30% load */ /* Shrink table beneath 30% load */
return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
tbl->shift > ht->p.min_shift; tbl->size > ht->p.min_size;
} }
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
...@@ -874,7 +873,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop); ...@@ -874,7 +873,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
static size_t rounded_hashtable_size(struct rhashtable_params *params) static size_t rounded_hashtable_size(struct rhashtable_params *params)
{ {
return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
1UL << params->min_shift); (unsigned long)params->min_size);
} }
/** /**
...@@ -934,8 +933,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) ...@@ -934,8 +933,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
return -EINVAL; return -EINVAL;
params->min_shift = max_t(size_t, params->min_shift, params->min_size = max(params->min_size, HASH_MIN_SIZE);
ilog2(HASH_MIN_SIZE));
if (params->nelem_hint) if (params->nelem_hint)
size = rounded_hashtable_size(params); size = rounded_hashtable_size(params);
......
...@@ -201,7 +201,7 @@ static int __init test_rht_init(void) ...@@ -201,7 +201,7 @@ static int __init test_rht_init(void)
.key_offset = offsetof(struct test_obj, value), .key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(int), .key_len = sizeof(int),
.hashfn = jhash, .hashfn = jhash,
.max_shift = 1, /* we expand/shrink manually here */ .max_size = 2, /* we expand/shrink manually here */
.nulls_base = (3U << RHT_BASE_SHIFT), .nulls_base = (3U << RHT_BASE_SHIFT),
}; };
int err; int err;
......
...@@ -3123,7 +3123,7 @@ static int __init netlink_proto_init(void) ...@@ -3123,7 +3123,7 @@ static int __init netlink_proto_init(void)
.key_offset = offsetof(struct netlink_sock, portid), .key_offset = offsetof(struct netlink_sock, portid),
.key_len = sizeof(u32), /* portid */ .key_len = sizeof(u32), /* portid */
.hashfn = jhash, .hashfn = jhash,
.max_shift = 16, /* 64K */ .max_size = 65536,
}; };
if (err != 0) if (err != 0)
......
...@@ -2286,8 +2286,8 @@ int tipc_sk_rht_init(struct net *net) ...@@ -2286,8 +2286,8 @@ int tipc_sk_rht_init(struct net *net)
.key_offset = offsetof(struct tipc_sock, portid), .key_offset = offsetof(struct tipc_sock, portid),
.key_len = sizeof(u32), /* portid */ .key_len = sizeof(u32), /* portid */
.hashfn = jhash, .hashfn = jhash,
.max_shift = 20, /* 1M */ .max_size = 1048576,
.min_shift = 8, /* 256 */ .min_size = 256,
}; };
return rhashtable_init(&tn->sk_rht, &rht_params); return rhashtable_init(&tn->sk_rht, &rht_params);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment