Commit bf3f14d6 authored by David S. Miller's avatar David S. Miller

rhashtable: Revert nested table changes.

This reverts commits:

6a254780
9dbbfb0a
40137906

It's too risky to put in this late in the release
cycle.  We'll put these changes into the next merge
window instead.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 75224c93
...@@ -1420,32 +1420,26 @@ static struct shrinker glock_shrinker = { ...@@ -1420,32 +1420,26 @@ static struct shrinker glock_shrinker = {
* @sdp: the filesystem * @sdp: the filesystem
* @bucket: the bucket * @bucket: the bucket
* *
* Note that the function can be called multiple times on the same
* object. So the user must ensure that the function can cope with
* that.
*/ */
static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
{ {
struct gfs2_glock *gl; struct gfs2_glock *gl;
struct rhashtable_iter iter; struct rhash_head *pos;
const struct bucket_table *tbl;
rhashtable_walk_enter(&gl_hash_table, &iter); int i;
do {
gl = ERR_PTR(rhashtable_walk_start(&iter));
if (gl)
continue;
while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) rcu_read_lock();
tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
for (i = 0; i < tbl->size; i++) {
rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) {
if ((gl->gl_name.ln_sbd == sdp) && if ((gl->gl_name.ln_sbd == sdp) &&
lockref_get_not_dead(&gl->gl_lockref)) lockref_get_not_dead(&gl->gl_lockref))
examiner(gl); examiner(gl);
}
rhashtable_walk_stop(&iter); }
} while (cond_resched(), gl == ERR_PTR(-EAGAIN)); rcu_read_unlock();
cond_resched();
rhashtable_walk_exit(&iter);
} }
/** /**
......
...@@ -61,7 +61,6 @@ struct rhlist_head { ...@@ -61,7 +61,6 @@ struct rhlist_head {
/** /**
* struct bucket_table - Table of hash buckets * struct bucket_table - Table of hash buckets
* @size: Number of hash buckets * @size: Number of hash buckets
* @nest: Number of bits of first-level nested table.
* @rehash: Current bucket being rehashed * @rehash: Current bucket being rehashed
* @hash_rnd: Random seed to fold into hash * @hash_rnd: Random seed to fold into hash
* @locks_mask: Mask to apply before accessing locks[] * @locks_mask: Mask to apply before accessing locks[]
...@@ -69,12 +68,10 @@ struct rhlist_head { ...@@ -69,12 +68,10 @@ struct rhlist_head {
* @walkers: List of active walkers * @walkers: List of active walkers
* @rcu: RCU structure for freeing the table * @rcu: RCU structure for freeing the table
* @future_tbl: Table under construction during rehashing * @future_tbl: Table under construction during rehashing
* @ntbl: Nested table used when out of memory.
* @buckets: size * hash buckets * @buckets: size * hash buckets
*/ */
struct bucket_table { struct bucket_table {
unsigned int size; unsigned int size;
unsigned int nest;
unsigned int rehash; unsigned int rehash;
u32 hash_rnd; u32 hash_rnd;
unsigned int locks_mask; unsigned int locks_mask;
...@@ -377,12 +374,6 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, ...@@ -377,12 +374,6 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void *arg); void *arg);
void rhashtable_destroy(struct rhashtable *ht); void rhashtable_destroy(struct rhashtable *ht);
struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash);
struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
struct bucket_table *tbl,
unsigned int hash);
#define rht_dereference(p, ht) \ #define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
...@@ -398,27 +389,6 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, ...@@ -398,27 +389,6 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
#define rht_entry(tpos, pos, member) \ #define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; }) ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
static inline struct rhash_head __rcu *const *rht_bucket(
const struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
static inline struct rhash_head __rcu **rht_bucket_var(
struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
static inline struct rhash_head __rcu **rht_bucket_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
&tbl->buckets[hash];
}
/** /**
* rht_for_each_continue - continue iterating over hash chain * rht_for_each_continue - continue iterating over hash chain
* @pos: the &struct rhash_head to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor.
...@@ -438,7 +408,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -438,7 +408,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* @hash: the hash value / bucket index * @hash: the hash value / bucket index
*/ */
#define rht_for_each(pos, tbl, hash) \ #define rht_for_each(pos, tbl, hash) \
rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash) rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
/** /**
* rht_for_each_entry_continue - continue iterating over hash chain * rht_for_each_entry_continue - continue iterating over hash chain
...@@ -463,7 +433,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -463,7 +433,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* @member: name of the &struct rhash_head within the hashable struct. * @member: name of the &struct rhash_head within the hashable struct.
*/ */
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \ #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \ rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
tbl, hash, member) tbl, hash, member)
/** /**
...@@ -479,7 +449,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -479,7 +449,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* remove the loop cursor from the list. * remove the loop cursor from the list.
*/ */
#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \ for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
next = !rht_is_a_nulls(pos) ? \ next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
...@@ -515,7 +485,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -515,7 +485,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* traversal is guarded by rcu_read_lock(). * traversal is guarded by rcu_read_lock().
*/ */
#define rht_for_each_rcu(pos, tbl, hash) \ #define rht_for_each_rcu(pos, tbl, hash) \
rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash) rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
/** /**
* rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
...@@ -549,7 +519,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -549,7 +519,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* traversal is guarded by rcu_read_lock(). * traversal is guarded by rcu_read_lock().
*/ */
#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \ rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
tbl, hash, member) tbl, hash, member)
/** /**
...@@ -595,7 +565,7 @@ static inline struct rhash_head *__rhashtable_lookup( ...@@ -595,7 +565,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht, .ht = ht,
.key = key, .key = key,
}; };
struct bucket_table *tbl; const struct bucket_table *tbl;
struct rhash_head *he; struct rhash_head *he;
unsigned int hash; unsigned int hash;
...@@ -727,12 +697,8 @@ static inline void *__rhashtable_insert_fast( ...@@ -727,12 +697,8 @@ static inline void *__rhashtable_insert_fast(
} }
elasticity = ht->elasticity; elasticity = ht->elasticity;
pprev = rht_bucket_insert(ht, tbl, hash); pprev = &tbl->buckets[hash];
data = ERR_PTR(-ENOMEM); rht_for_each(head, tbl, hash) {
if (!pprev)
goto out;
rht_for_each_continue(head, *pprev, tbl, hash) {
struct rhlist_head *plist; struct rhlist_head *plist;
struct rhlist_head *list; struct rhlist_head *list;
...@@ -770,7 +736,7 @@ static inline void *__rhashtable_insert_fast( ...@@ -770,7 +736,7 @@ static inline void *__rhashtable_insert_fast(
if (unlikely(rht_grow_above_100(ht, tbl))) if (unlikely(rht_grow_above_100(ht, tbl)))
goto slow_path; goto slow_path;
head = rht_dereference_bucket(*pprev, tbl, hash); head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
RCU_INIT_POINTER(obj->next, head); RCU_INIT_POINTER(obj->next, head);
if (rhlist) { if (rhlist) {
...@@ -780,7 +746,7 @@ static inline void *__rhashtable_insert_fast( ...@@ -780,7 +746,7 @@ static inline void *__rhashtable_insert_fast(
RCU_INIT_POINTER(list->next, NULL); RCU_INIT_POINTER(list->next, NULL);
} }
rcu_assign_pointer(*pprev, obj); rcu_assign_pointer(tbl->buckets[hash], obj);
atomic_inc(&ht->nelems); atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl)) if (rht_grow_above_75(ht, tbl))
...@@ -989,8 +955,8 @@ static inline int __rhashtable_remove_fast_one( ...@@ -989,8 +955,8 @@ static inline int __rhashtable_remove_fast_one(
spin_lock_bh(lock); spin_lock_bh(lock);
pprev = rht_bucket_var(tbl, hash); pprev = &tbl->buckets[hash];
rht_for_each_continue(he, *pprev, tbl, hash) { rht_for_each(he, tbl, hash) {
struct rhlist_head *list; struct rhlist_head *list;
list = container_of(he, struct rhlist_head, rhead); list = container_of(he, struct rhlist_head, rhead);
...@@ -1141,8 +1107,8 @@ static inline int __rhashtable_replace_fast( ...@@ -1141,8 +1107,8 @@ static inline int __rhashtable_replace_fast(
spin_lock_bh(lock); spin_lock_bh(lock);
pprev = rht_bucket_var(tbl, hash); pprev = &tbl->buckets[hash];
rht_for_each_continue(he, *pprev, tbl, hash) { rht_for_each(he, tbl, hash) {
if (he != obj_old) { if (he != obj_old) {
pprev = &he->next; pprev = &he->next;
continue; continue;
......
...@@ -32,11 +32,6 @@ ...@@ -32,11 +32,6 @@
#define HASH_MIN_SIZE 4U #define HASH_MIN_SIZE 4U
#define BUCKET_LOCKS_PER_CPU 32UL #define BUCKET_LOCKS_PER_CPU 32UL
union nested_table {
union nested_table __rcu *table;
struct rhash_head __rcu *bucket;
};
static u32 head_hashfn(struct rhashtable *ht, static u32 head_hashfn(struct rhashtable *ht,
const struct bucket_table *tbl, const struct bucket_table *tbl,
const struct rhash_head *he) const struct rhash_head *he)
...@@ -81,9 +76,6 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, ...@@ -81,9 +76,6 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
/* Never allocate more than 0.5 locks per bucket */ /* Never allocate more than 0.5 locks per bucket */
size = min_t(unsigned int, size, tbl->size >> 1); size = min_t(unsigned int, size, tbl->size >> 1);
if (tbl->nest)
size = min(size, 1U << tbl->nest);
if (sizeof(spinlock_t) != 0) { if (sizeof(spinlock_t) != 0) {
tbl->locks = NULL; tbl->locks = NULL;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
...@@ -107,45 +99,8 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, ...@@ -107,45 +99,8 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
return 0; return 0;
} }
static void nested_table_free(union nested_table *ntbl, unsigned int size)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
const unsigned int len = 1 << shift;
unsigned int i;
ntbl = rcu_dereference_raw(ntbl->table);
if (!ntbl)
return;
if (size > len) {
size >>= shift;
for (i = 0; i < len; i++)
nested_table_free(ntbl + i, size);
}
kfree(ntbl);
}
static void nested_bucket_table_free(const struct bucket_table *tbl)
{
unsigned int size = tbl->size >> tbl->nest;
unsigned int len = 1 << tbl->nest;
union nested_table *ntbl;
unsigned int i;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
for (i = 0; i < len; i++)
nested_table_free(ntbl + i, size);
kfree(ntbl);
}
static void bucket_table_free(const struct bucket_table *tbl) static void bucket_table_free(const struct bucket_table *tbl)
{ {
if (tbl->nest)
nested_bucket_table_free(tbl);
if (tbl) if (tbl)
kvfree(tbl->locks); kvfree(tbl->locks);
...@@ -157,59 +112,6 @@ static void bucket_table_free_rcu(struct rcu_head *head) ...@@ -157,59 +112,6 @@ static void bucket_table_free_rcu(struct rcu_head *head)
bucket_table_free(container_of(head, struct bucket_table, rcu)); bucket_table_free(container_of(head, struct bucket_table, rcu));
} }
static union nested_table *nested_table_alloc(struct rhashtable *ht,
union nested_table __rcu **prev,
unsigned int shifted,
unsigned int nhash)
{
union nested_table *ntbl;
int i;
ntbl = rcu_dereference(*prev);
if (ntbl)
return ntbl;
ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
if (ntbl && shifted) {
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
(i << shifted) | nhash);
}
rcu_assign_pointer(*prev, ntbl);
return ntbl;
}
static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
size_t nbuckets,
gfp_t gfp)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
struct bucket_table *tbl;
size_t size;
if (nbuckets < (1 << (shift + 1)))
return NULL;
size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
tbl = kzalloc(size, gfp);
if (!tbl)
return NULL;
if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
0, 0)) {
kfree(tbl);
return NULL;
}
tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
return tbl;
}
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
size_t nbuckets, size_t nbuckets,
gfp_t gfp) gfp_t gfp)
...@@ -224,17 +126,10 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, ...@@ -224,17 +126,10 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
if (tbl == NULL && gfp == GFP_KERNEL) if (tbl == NULL && gfp == GFP_KERNEL)
tbl = vzalloc(size); tbl = vzalloc(size);
size = nbuckets;
if (tbl == NULL && gfp != GFP_KERNEL) {
tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
nbuckets = 0;
}
if (tbl == NULL) if (tbl == NULL)
return NULL; return NULL;
tbl->size = size; tbl->size = nbuckets;
if (alloc_bucket_locks(ht, tbl, gfp) < 0) { if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
bucket_table_free(tbl); bucket_table_free(tbl);
...@@ -269,17 +164,12 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) ...@@ -269,17 +164,12 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
struct bucket_table *new_tbl = rhashtable_last_table(ht, struct bucket_table *new_tbl = rhashtable_last_table(ht,
rht_dereference_rcu(old_tbl->future_tbl, ht)); rht_dereference_rcu(old_tbl->future_tbl, ht));
struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
int err = -EAGAIN; int err = -ENOENT;
struct rhash_head *head, *next, *entry; struct rhash_head *head, *next, *entry;
spinlock_t *new_bucket_lock; spinlock_t *new_bucket_lock;
unsigned int new_hash; unsigned int new_hash;
if (new_tbl->nest)
goto out;
err = -ENOENT;
rht_for_each(entry, old_tbl, old_hash) { rht_for_each(entry, old_tbl, old_hash) {
err = 0; err = 0;
next = rht_dereference_bucket(entry->next, old_tbl, old_hash); next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
...@@ -312,26 +202,19 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) ...@@ -312,26 +202,19 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
return err; return err;
} }
static int rhashtable_rehash_chain(struct rhashtable *ht, static void rhashtable_rehash_chain(struct rhashtable *ht,
unsigned int old_hash) unsigned int old_hash)
{ {
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
spinlock_t *old_bucket_lock; spinlock_t *old_bucket_lock;
int err;
old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
spin_lock_bh(old_bucket_lock); spin_lock_bh(old_bucket_lock);
while (!(err = rhashtable_rehash_one(ht, old_hash))) while (!rhashtable_rehash_one(ht, old_hash))
; ;
if (err == -ENOENT) {
old_tbl->rehash++; old_tbl->rehash++;
err = 0;
}
spin_unlock_bh(old_bucket_lock); spin_unlock_bh(old_bucket_lock);
return err;
} }
static int rhashtable_rehash_attach(struct rhashtable *ht, static int rhashtable_rehash_attach(struct rhashtable *ht,
...@@ -363,17 +246,13 @@ static int rhashtable_rehash_table(struct rhashtable *ht) ...@@ -363,17 +246,13 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
struct bucket_table *new_tbl; struct bucket_table *new_tbl;
struct rhashtable_walker *walker; struct rhashtable_walker *walker;
unsigned int old_hash; unsigned int old_hash;
int err;
new_tbl = rht_dereference(old_tbl->future_tbl, ht); new_tbl = rht_dereference(old_tbl->future_tbl, ht);
if (!new_tbl) if (!new_tbl)
return 0; return 0;
for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
err = rhashtable_rehash_chain(ht, old_hash); rhashtable_rehash_chain(ht, old_hash);
if (err)
return err;
}
/* Publish the new table pointer. */ /* Publish the new table pointer. */
rcu_assign_pointer(ht->tbl, new_tbl); rcu_assign_pointer(ht->tbl, new_tbl);
...@@ -392,16 +271,31 @@ static int rhashtable_rehash_table(struct rhashtable *ht) ...@@ -392,16 +271,31 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
} }
static int rhashtable_rehash_alloc(struct rhashtable *ht, /**
struct bucket_table *old_tbl, * rhashtable_expand - Expand hash table while allowing concurrent lookups
unsigned int size) * @ht: the hash table to expand
*
* A secondary bucket array is allocated and the hash entries are migrated.
*
* This function may only be called in a context where it is safe to call
* synchronize_rcu(), e.g. not within a rcu_read_lock() section.
*
* The caller must ensure that no concurrent resizing occurs by holding
* ht->mutex.
*
* It is valid to have concurrent insertions and deletions protected by per
* bucket locks or concurrent RCU protected lookups and traversals.
*/
static int rhashtable_expand(struct rhashtable *ht)
{ {
struct bucket_table *new_tbl; struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
int err; int err;
ASSERT_RHT_MUTEX(ht); ASSERT_RHT_MUTEX(ht);
new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); old_tbl = rhashtable_last_table(ht, old_tbl);
new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
if (new_tbl == NULL) if (new_tbl == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -430,9 +324,12 @@ static int rhashtable_rehash_alloc(struct rhashtable *ht, ...@@ -430,9 +324,12 @@ static int rhashtable_rehash_alloc(struct rhashtable *ht,
*/ */
static int rhashtable_shrink(struct rhashtable *ht) static int rhashtable_shrink(struct rhashtable *ht)
{ {
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
unsigned int nelems = atomic_read(&ht->nelems); unsigned int nelems = atomic_read(&ht->nelems);
unsigned int size = 0; unsigned int size = 0;
int err;
ASSERT_RHT_MUTEX(ht);
if (nelems) if (nelems)
size = roundup_pow_of_two(nelems * 3 / 2); size = roundup_pow_of_two(nelems * 3 / 2);
...@@ -445,7 +342,15 @@ static int rhashtable_shrink(struct rhashtable *ht) ...@@ -445,7 +342,15 @@ static int rhashtable_shrink(struct rhashtable *ht)
if (rht_dereference(old_tbl->future_tbl, ht)) if (rht_dereference(old_tbl->future_tbl, ht))
return -EEXIST; return -EEXIST;
return rhashtable_rehash_alloc(ht, old_tbl, size); new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
if (new_tbl == NULL)
return -ENOMEM;
err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
if (err)
bucket_table_free(new_tbl);
return err;
} }
static void rht_deferred_worker(struct work_struct *work) static void rht_deferred_worker(struct work_struct *work)
...@@ -461,13 +366,10 @@ static void rht_deferred_worker(struct work_struct *work) ...@@ -461,13 +366,10 @@ static void rht_deferred_worker(struct work_struct *work)
tbl = rhashtable_last_table(ht, tbl); tbl = rhashtable_last_table(ht, tbl);
if (rht_grow_above_75(ht, tbl)) if (rht_grow_above_75(ht, tbl))
err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); rhashtable_expand(ht);
else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
err = rhashtable_shrink(ht); rhashtable_shrink(ht);
else if (tbl->nest)
err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
if (!err)
err = rhashtable_rehash_table(ht); err = rhashtable_rehash_table(ht);
mutex_unlock(&ht->mutex); mutex_unlock(&ht->mutex);
...@@ -537,8 +439,8 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, ...@@ -537,8 +439,8 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
int elasticity; int elasticity;
elasticity = ht->elasticity; elasticity = ht->elasticity;
pprev = rht_bucket_var(tbl, hash); pprev = &tbl->buckets[hash];
rht_for_each_continue(head, *pprev, tbl, hash) { rht_for_each(head, tbl, hash) {
struct rhlist_head *list; struct rhlist_head *list;
struct rhlist_head *plist; struct rhlist_head *plist;
...@@ -575,7 +477,6 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, ...@@ -575,7 +477,6 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
struct rhash_head *obj, struct rhash_head *obj,
void *data) void *data)
{ {
struct rhash_head __rcu **pprev;
struct bucket_table *new_tbl; struct bucket_table *new_tbl;
struct rhash_head *head; struct rhash_head *head;
...@@ -598,11 +499,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, ...@@ -598,11 +499,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
if (unlikely(rht_grow_above_100(ht, tbl))) if (unlikely(rht_grow_above_100(ht, tbl)))
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
pprev = rht_bucket_insert(ht, tbl, hash); head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
if (!pprev)
return ERR_PTR(-ENOMEM);
head = rht_dereference_bucket(*pprev, tbl, hash);
RCU_INIT_POINTER(obj->next, head); RCU_INIT_POINTER(obj->next, head);
if (ht->rhlist) { if (ht->rhlist) {
...@@ -612,7 +509,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, ...@@ -612,7 +509,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
RCU_INIT_POINTER(list->next, NULL); RCU_INIT_POINTER(list->next, NULL);
} }
rcu_assign_pointer(*pprev, obj); rcu_assign_pointer(tbl->buckets[hash], obj);
atomic_inc(&ht->nelems); atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl)) if (rht_grow_above_75(ht, tbl))
...@@ -1078,7 +975,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, ...@@ -1078,7 +975,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg), void (*free_fn)(void *ptr, void *arg),
void *arg) void *arg)
{ {
struct bucket_table *tbl; const struct bucket_table *tbl;
unsigned int i; unsigned int i;
cancel_work_sync(&ht->run_work); cancel_work_sync(&ht->run_work);
...@@ -1089,7 +986,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, ...@@ -1089,7 +986,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
for (i = 0; i < tbl->size; i++) { for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next; struct rhash_head *pos, *next;
for (pos = rht_dereference(*rht_bucket(tbl, i), ht), for (pos = rht_dereference(tbl->buckets[i], ht),
next = !rht_is_a_nulls(pos) ? next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL; rht_dereference(pos->next, ht) : NULL;
!rht_is_a_nulls(pos); !rht_is_a_nulls(pos);
...@@ -1110,70 +1007,3 @@ void rhashtable_destroy(struct rhashtable *ht) ...@@ -1110,70 +1007,3 @@ void rhashtable_destroy(struct rhashtable *ht)
return rhashtable_free_and_destroy(ht, NULL, NULL); return rhashtable_free_and_destroy(ht, NULL, NULL);
} }
EXPORT_SYMBOL_GPL(rhashtable_destroy); EXPORT_SYMBOL_GPL(rhashtable_destroy);
struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
static struct rhash_head __rcu *rhnull =
(struct rhash_head __rcu *)NULLS_MARKER(0);
unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest;
unsigned int subhash = hash;
union nested_table *ntbl;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash);
subhash >>= tbl->nest;
while (ntbl && size > (1 << shift)) {
index = subhash & ((1 << shift) - 1);
ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash);
size >>= shift;
subhash >>= shift;
}
if (!ntbl)
return &rhnull;
return &ntbl[subhash].bucket;
}
EXPORT_SYMBOL_GPL(rht_bucket_nested);
struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
struct bucket_table *tbl,
unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl;
unsigned int shifted;
unsigned int nhash;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
hash >>= tbl->nest;
nhash = index;
shifted = tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
size <= (1 << shift) ? shifted : 0, nhash);
while (ntbl && size > (1 << shift)) {
index = hash & ((1 << shift) - 1);
size >>= shift;
hash >>= shift;
nhash |= index << shifted;
shifted += shift;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
size <= (1 << shift) ? shifted : 0,
nhash);
}
if (!ntbl)
return NULL;
return &ntbl[hash].bucket;
}
EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
...@@ -110,10 +110,6 @@ int tipc_net_start(struct net *net, u32 addr) ...@@ -110,10 +110,6 @@ int tipc_net_start(struct net *net, u32 addr)
char addr_string[16]; char addr_string[16];
tn->own_addr = addr; tn->own_addr = addr;
/* Ensure that the new address is visible before we reinit. */
smp_mb();
tipc_named_reinit(net); tipc_named_reinit(net);
tipc_sk_reinit(net); tipc_sk_reinit(net);
......
...@@ -384,6 +384,8 @@ static int tipc_sk_create(struct net *net, struct socket *sock, ...@@ -384,6 +384,8 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
INIT_LIST_HEAD(&tsk->publications); INIT_LIST_HEAD(&tsk->publications);
msg = &tsk->phdr; msg = &tsk->phdr;
tn = net_generic(sock_net(sk), tipc_net_id); tn = net_generic(sock_net(sk), tipc_net_id);
tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
NAMED_H_SIZE, 0);
/* Finish initializing socket data structures */ /* Finish initializing socket data structures */
sock->ops = ops; sock->ops = ops;
...@@ -393,13 +395,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock, ...@@ -393,13 +395,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
pr_warn("Socket create failed; port number exhausted\n"); pr_warn("Socket create failed; port number exhausted\n");
return -EINVAL; return -EINVAL;
} }
/* Ensure tsk is visible before we read own_addr. */
smp_mb();
tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
NAMED_H_SIZE, 0);
msg_set_origport(msg, tsk->portid); msg_set_origport(msg, tsk->portid);
setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk); setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
sk->sk_shutdown = 0; sk->sk_shutdown = 0;
...@@ -2274,27 +2269,24 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, ...@@ -2274,27 +2269,24 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
void tipc_sk_reinit(struct net *net) void tipc_sk_reinit(struct net *net)
{ {
struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_net *tn = net_generic(net, tipc_net_id);
struct rhashtable_iter iter; const struct bucket_table *tbl;
struct rhash_head *pos;
struct tipc_sock *tsk; struct tipc_sock *tsk;
struct tipc_msg *msg; struct tipc_msg *msg;
int i;
rhashtable_walk_enter(&tn->sk_rht, &iter); rcu_read_lock();
tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
do { for (i = 0; i < tbl->size; i++) {
tsk = ERR_PTR(rhashtable_walk_start(&iter)); rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
if (tsk)
continue;
while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
spin_lock_bh(&tsk->sk.sk_lock.slock); spin_lock_bh(&tsk->sk.sk_lock.slock);
msg = &tsk->phdr; msg = &tsk->phdr;
msg_set_prevnode(msg, tn->own_addr); msg_set_prevnode(msg, tn->own_addr);
msg_set_orignode(msg, tn->own_addr); msg_set_orignode(msg, tn->own_addr);
spin_unlock_bh(&tsk->sk.sk_lock.slock); spin_unlock_bh(&tsk->sk.sk_lock.slock);
} }
}
rhashtable_walk_stop(&iter); rcu_read_unlock();
} while (tsk == ERR_PTR(-EAGAIN));
} }
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment