Commit e1c151a4 authored by David S. Miller's avatar David S. Miller

Merge branch 'rhashtable-allocation-failure-during-insertion'

Herbert Xu says:

====================
rhashtable: Handle table allocation failure during insertion

v2 -

Added Ack to patch 2.
Fixed RCU annotation in code path executed by rehasher by using
rht_dereference_bucket.

v1 -

This series tackles the problem of table allocation failures during
insertion.  The issue is that we cannot vmalloc during insertion.
This series deals with this by introducing nested tables.

The first two patches removes manual hash table walks which cannot
work on a nested table.

The final patch introduces nested tables.

I've tested this with test_rhashtable and it appears to work.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4581be42 da20420f
...@@ -1420,26 +1420,32 @@ static struct shrinker glock_shrinker = { ...@@ -1420,26 +1420,32 @@ static struct shrinker glock_shrinker = {
* @sdp: the filesystem * @sdp: the filesystem
* @bucket: the bucket * @bucket: the bucket
* *
* Note that the function can be called multiple times on the same
* object. So the user must ensure that the function can cope with
* that.
*/ */
static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
{ {
struct gfs2_glock *gl; struct gfs2_glock *gl;
struct rhash_head *pos; struct rhashtable_iter iter;
const struct bucket_table *tbl;
int i;
rcu_read_lock(); rhashtable_walk_enter(&gl_hash_table, &iter);
tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
for (i = 0; i < tbl->size; i++) { do {
rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) { gl = ERR_PTR(rhashtable_walk_start(&iter));
if (gl)
continue;
while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
if ((gl->gl_name.ln_sbd == sdp) && if ((gl->gl_name.ln_sbd == sdp) &&
lockref_get_not_dead(&gl->gl_lockref)) lockref_get_not_dead(&gl->gl_lockref))
examiner(gl); examiner(gl);
}
} rhashtable_walk_stop(&iter);
rcu_read_unlock(); } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
cond_resched();
rhashtable_walk_exit(&iter);
} }
/** /**
......
...@@ -61,6 +61,7 @@ struct rhlist_head { ...@@ -61,6 +61,7 @@ struct rhlist_head {
/** /**
* struct bucket_table - Table of hash buckets * struct bucket_table - Table of hash buckets
* @size: Number of hash buckets * @size: Number of hash buckets
* @nest: Number of bits of first-level nested table.
* @rehash: Current bucket being rehashed * @rehash: Current bucket being rehashed
* @hash_rnd: Random seed to fold into hash * @hash_rnd: Random seed to fold into hash
* @locks_mask: Mask to apply before accessing locks[] * @locks_mask: Mask to apply before accessing locks[]
...@@ -68,10 +69,12 @@ struct rhlist_head { ...@@ -68,10 +69,12 @@ struct rhlist_head {
* @walkers: List of active walkers * @walkers: List of active walkers
* @rcu: RCU structure for freeing the table * @rcu: RCU structure for freeing the table
* @future_tbl: Table under construction during rehashing * @future_tbl: Table under construction during rehashing
* @ntbl: Nested table used when out of memory.
* @buckets: size * hash buckets * @buckets: size * hash buckets
*/ */
struct bucket_table { struct bucket_table {
unsigned int size; unsigned int size;
unsigned int nest;
unsigned int rehash; unsigned int rehash;
u32 hash_rnd; u32 hash_rnd;
unsigned int locks_mask; unsigned int locks_mask;
...@@ -81,7 +84,7 @@ struct bucket_table { ...@@ -81,7 +84,7 @@ struct bucket_table {
struct bucket_table __rcu *future_tbl; struct bucket_table __rcu *future_tbl;
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
}; };
/** /**
...@@ -374,6 +377,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, ...@@ -374,6 +377,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void *arg); void *arg);
void rhashtable_destroy(struct rhashtable *ht); void rhashtable_destroy(struct rhashtable *ht);
struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash);
struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
struct bucket_table *tbl,
unsigned int hash);
#define rht_dereference(p, ht) \ #define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
...@@ -389,6 +398,27 @@ void rhashtable_destroy(struct rhashtable *ht); ...@@ -389,6 +398,27 @@ void rhashtable_destroy(struct rhashtable *ht);
#define rht_entry(tpos, pos, member) \ #define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; }) ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
static inline struct rhash_head __rcu *const *rht_bucket(
const struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
static inline struct rhash_head __rcu **rht_bucket_var(
struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
static inline struct rhash_head __rcu **rht_bucket_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
&tbl->buckets[hash];
}
/** /**
* rht_for_each_continue - continue iterating over hash chain * rht_for_each_continue - continue iterating over hash chain
* @pos: the &struct rhash_head to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor.
...@@ -408,7 +438,7 @@ void rhashtable_destroy(struct rhashtable *ht); ...@@ -408,7 +438,7 @@ void rhashtable_destroy(struct rhashtable *ht);
* @hash: the hash value / bucket index * @hash: the hash value / bucket index
*/ */
#define rht_for_each(pos, tbl, hash) \ #define rht_for_each(pos, tbl, hash) \
rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash) rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
/** /**
* rht_for_each_entry_continue - continue iterating over hash chain * rht_for_each_entry_continue - continue iterating over hash chain
...@@ -433,7 +463,7 @@ void rhashtable_destroy(struct rhashtable *ht); ...@@ -433,7 +463,7 @@ void rhashtable_destroy(struct rhashtable *ht);
* @member: name of the &struct rhash_head within the hashable struct. * @member: name of the &struct rhash_head within the hashable struct.
*/ */
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \ #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \ rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \
tbl, hash, member) tbl, hash, member)
/** /**
...@@ -448,13 +478,13 @@ void rhashtable_destroy(struct rhashtable *ht); ...@@ -448,13 +478,13 @@ void rhashtable_destroy(struct rhashtable *ht);
* This hash chain list-traversal primitive allows for the looped code to * This hash chain list-traversal primitive allows for the looped code to
* remove the loop cursor from the list. * remove the loop cursor from the list.
*/ */
#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \ for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
next = !rht_is_a_nulls(pos) ? \ next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = next, \ pos = next, \
next = !rht_is_a_nulls(pos) ? \ next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL) rht_dereference_bucket(pos->next, tbl, hash) : NULL)
/** /**
...@@ -485,7 +515,7 @@ void rhashtable_destroy(struct rhashtable *ht); ...@@ -485,7 +515,7 @@ void rhashtable_destroy(struct rhashtable *ht);
* traversal is guarded by rcu_read_lock(). * traversal is guarded by rcu_read_lock().
*/ */
#define rht_for_each_rcu(pos, tbl, hash) \ #define rht_for_each_rcu(pos, tbl, hash) \
rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash) rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
/** /**
* rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
...@@ -518,8 +548,8 @@ void rhashtable_destroy(struct rhashtable *ht); ...@@ -518,8 +548,8 @@ void rhashtable_destroy(struct rhashtable *ht);
* the _rcu mutation primitives such as rhashtable_insert() as long as the * the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock(). * traversal is guarded by rcu_read_lock().
*/ */
#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
tbl, hash, member) tbl, hash, member)
/** /**
...@@ -565,7 +595,7 @@ static inline struct rhash_head *__rhashtable_lookup( ...@@ -565,7 +595,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht, .ht = ht,
.key = key, .key = key,
}; };
const struct bucket_table *tbl; struct bucket_table *tbl;
struct rhash_head *he; struct rhash_head *he;
unsigned int hash; unsigned int hash;
...@@ -697,8 +727,12 @@ static inline void *__rhashtable_insert_fast( ...@@ -697,8 +727,12 @@ static inline void *__rhashtable_insert_fast(
} }
elasticity = ht->elasticity; elasticity = ht->elasticity;
pprev = &tbl->buckets[hash]; pprev = rht_bucket_insert(ht, tbl, hash);
rht_for_each(head, tbl, hash) { data = ERR_PTR(-ENOMEM);
if (!pprev)
goto out;
rht_for_each_continue(head, *pprev, tbl, hash) {
struct rhlist_head *plist; struct rhlist_head *plist;
struct rhlist_head *list; struct rhlist_head *list;
...@@ -736,7 +770,7 @@ static inline void *__rhashtable_insert_fast( ...@@ -736,7 +770,7 @@ static inline void *__rhashtable_insert_fast(
if (unlikely(rht_grow_above_100(ht, tbl))) if (unlikely(rht_grow_above_100(ht, tbl)))
goto slow_path; goto slow_path;
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); head = rht_dereference_bucket(*pprev, tbl, hash);
RCU_INIT_POINTER(obj->next, head); RCU_INIT_POINTER(obj->next, head);
if (rhlist) { if (rhlist) {
...@@ -746,7 +780,7 @@ static inline void *__rhashtable_insert_fast( ...@@ -746,7 +780,7 @@ static inline void *__rhashtable_insert_fast(
RCU_INIT_POINTER(list->next, NULL); RCU_INIT_POINTER(list->next, NULL);
} }
rcu_assign_pointer(tbl->buckets[hash], obj); rcu_assign_pointer(*pprev, obj);
atomic_inc(&ht->nelems); atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl)) if (rht_grow_above_75(ht, tbl))
...@@ -955,8 +989,8 @@ static inline int __rhashtable_remove_fast_one( ...@@ -955,8 +989,8 @@ static inline int __rhashtable_remove_fast_one(
spin_lock_bh(lock); spin_lock_bh(lock);
pprev = &tbl->buckets[hash]; pprev = rht_bucket_var(tbl, hash);
rht_for_each(he, tbl, hash) { rht_for_each_continue(he, *pprev, tbl, hash) {
struct rhlist_head *list; struct rhlist_head *list;
list = container_of(he, struct rhlist_head, rhead); list = container_of(he, struct rhlist_head, rhead);
...@@ -1107,8 +1141,8 @@ static inline int __rhashtable_replace_fast( ...@@ -1107,8 +1141,8 @@ static inline int __rhashtable_replace_fast(
spin_lock_bh(lock); spin_lock_bh(lock);
pprev = &tbl->buckets[hash]; pprev = rht_bucket_var(tbl, hash);
rht_for_each(he, tbl, hash) { rht_for_each_continue(he, *pprev, tbl, hash) {
if (he != obj_old) { if (he != obj_old) {
pprev = &he->next; pprev = &he->next;
continue; continue;
......
This diff is collapsed.
...@@ -110,6 +110,10 @@ int tipc_net_start(struct net *net, u32 addr) ...@@ -110,6 +110,10 @@ int tipc_net_start(struct net *net, u32 addr)
char addr_string[16]; char addr_string[16];
tn->own_addr = addr; tn->own_addr = addr;
/* Ensure that the new address is visible before we reinit. */
smp_mb();
tipc_named_reinit(net); tipc_named_reinit(net);
tipc_sk_reinit(net); tipc_sk_reinit(net);
......
...@@ -430,8 +430,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock, ...@@ -430,8 +430,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
INIT_LIST_HEAD(&tsk->cong_links); INIT_LIST_HEAD(&tsk->cong_links);
msg = &tsk->phdr; msg = &tsk->phdr;
tn = net_generic(sock_net(sk), tipc_net_id); tn = net_generic(sock_net(sk), tipc_net_id);
tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
NAMED_H_SIZE, 0);
/* Finish initializing socket data structures */ /* Finish initializing socket data structures */
sock->ops = ops; sock->ops = ops;
...@@ -441,6 +439,13 @@ static int tipc_sk_create(struct net *net, struct socket *sock, ...@@ -441,6 +439,13 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
pr_warn("Socket create failed; port number exhausted\n"); pr_warn("Socket create failed; port number exhausted\n");
return -EINVAL; return -EINVAL;
} }
/* Ensure tsk is visible before we read own_addr. */
smp_mb();
tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
NAMED_H_SIZE, 0);
msg_set_origport(msg, tsk->portid); msg_set_origport(msg, tsk->portid);
setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk); setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
sk->sk_shutdown = 0; sk->sk_shutdown = 0;
...@@ -2234,24 +2239,27 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, ...@@ -2234,24 +2239,27 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
void tipc_sk_reinit(struct net *net) void tipc_sk_reinit(struct net *net)
{ {
struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_net *tn = net_generic(net, tipc_net_id);
const struct bucket_table *tbl; struct rhashtable_iter iter;
struct rhash_head *pos;
struct tipc_sock *tsk; struct tipc_sock *tsk;
struct tipc_msg *msg; struct tipc_msg *msg;
int i;
rcu_read_lock(); rhashtable_walk_enter(&tn->sk_rht, &iter);
tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
for (i = 0; i < tbl->size; i++) { do {
rht_for_each_entry_rcu(tsk, pos, tbl, i, node) { tsk = ERR_PTR(rhashtable_walk_start(&iter));
if (tsk)
continue;
while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
spin_lock_bh(&tsk->sk.sk_lock.slock); spin_lock_bh(&tsk->sk.sk_lock.slock);
msg = &tsk->phdr; msg = &tsk->phdr;
msg_set_prevnode(msg, tn->own_addr); msg_set_prevnode(msg, tn->own_addr);
msg_set_orignode(msg, tn->own_addr); msg_set_orignode(msg, tn->own_addr);
spin_unlock_bh(&tsk->sk.sk_lock.slock); spin_unlock_bh(&tsk->sk.sk_lock.slock);
} }
}
rcu_read_unlock(); rhashtable_walk_stop(&iter);
} while (tsk == ERR_PTR(-EAGAIN));
} }
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment