Commit 97defe1e authored by Thomas Graf's avatar Thomas Graf Committed by David S. Miller

rhashtable: Per bucket locks & deferred expansion/shrinking

Introduces an array of spinlocks to protect bucket mutations. The number
of spinlocks per CPU is configurable and selected based on the hash of
the bucket. This allows for parallel insertions and removals of entries
which do not share a lock.

The patch also defers expansion and shrinking to a worker queue which
allows insertion and removal from atomic context. Insertions and
deletions may occur in parallel to it and are only held up briefly
while the particular bucket is linked or unzipped.

Mutations of the bucket table pointer is protected by a new mutex, read
access is RCU protected.

In the event of an expansion or shrinking, the new bucket table allocated
is exposed as a so called future table as soon as the resize process
starts.  Lookups, deletions, and insertions will briefly use both tables.
The future table becomes the main table after an RCU grace period and
initial linking of the old to the new table was performed. Optimization
of the chains to make use of the new number of buckets follows only the
new table is in use.

The side effect of this is that during that RCU grace period, a bucket
traversal using any rht_for_each() variant on the main table will not see
any insertions performed during the RCU grace period which would at that
point land in the future table. The lookup will see them as it searches
both tables if needed.

Having multiple insertions and removals occur in parallel requires nelems
to become an atomic counter.
Signed-off-by: default avatarThomas Graf <tgraf@suug.ch>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 113948d8
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#define _LINUX_RHASHTABLE_H #define _LINUX_RHASHTABLE_H
#include <linux/rculist.h> #include <linux/rculist.h>
#include <linux/workqueue.h>
struct rhash_head { struct rhash_head {
struct rhash_head __rcu *next; struct rhash_head __rcu *next;
...@@ -26,8 +27,17 @@ struct rhash_head { ...@@ -26,8 +27,17 @@ struct rhash_head {
#define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL) #define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL)
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
* @locks_mask: Mask to apply before accessing locks[]
* @locks: Array of spinlocks protecting individual buckets
* @buckets: size * hash buckets
*/
struct bucket_table { struct bucket_table {
size_t size; size_t size;
unsigned int locks_mask;
spinlock_t *locks;
struct rhash_head __rcu *buckets[]; struct rhash_head __rcu *buckets[];
}; };
...@@ -45,11 +55,11 @@ struct rhashtable; ...@@ -45,11 +55,11 @@ struct rhashtable;
* @hash_rnd: Seed to use while hashing * @hash_rnd: Seed to use while hashing
* @max_shift: Maximum number of shifts while expanding * @max_shift: Maximum number of shifts while expanding
* @min_shift: Minimum number of shifts while shrinking * @min_shift: Minimum number of shifts while shrinking
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
* @hashfn: Function to hash key * @hashfn: Function to hash key
* @obj_hashfn: Function to hash object * @obj_hashfn: Function to hash object
* @grow_decision: If defined, may return true if table should expand * @grow_decision: If defined, may return true if table should expand
* @shrink_decision: If defined, may return true if table should shrink * @shrink_decision: If defined, may return true if table should shrink
* @mutex_is_held: Must return true if protecting mutex is held
*/ */
struct rhashtable_params { struct rhashtable_params {
size_t nelem_hint; size_t nelem_hint;
...@@ -59,37 +69,42 @@ struct rhashtable_params { ...@@ -59,37 +69,42 @@ struct rhashtable_params {
u32 hash_rnd; u32 hash_rnd;
size_t max_shift; size_t max_shift;
size_t min_shift; size_t min_shift;
size_t locks_mul;
rht_hashfn_t hashfn; rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn; rht_obj_hashfn_t obj_hashfn;
bool (*grow_decision)(const struct rhashtable *ht, bool (*grow_decision)(const struct rhashtable *ht,
size_t new_size); size_t new_size);
bool (*shrink_decision)(const struct rhashtable *ht, bool (*shrink_decision)(const struct rhashtable *ht,
size_t new_size); size_t new_size);
#ifdef CONFIG_PROVE_LOCKING
int (*mutex_is_held)(void *parent);
void *parent;
#endif
}; };
/** /**
* struct rhashtable - Hash table handle * struct rhashtable - Hash table handle
* @tbl: Bucket table * @tbl: Bucket table
* @future_tbl: Table under construction during expansion/shrinking
* @nelems: Number of elements in table * @nelems: Number of elements in table
* @shift: Current size (1 << shift) * @shift: Current size (1 << shift)
* @p: Configuration parameters * @p: Configuration parameters
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @being_destroyed: True if table is set up for destruction
*/ */
struct rhashtable { struct rhashtable {
struct bucket_table __rcu *tbl; struct bucket_table __rcu *tbl;
size_t nelems; struct bucket_table __rcu *future_tbl;
atomic_t nelems;
size_t shift; size_t shift;
struct rhashtable_params p; struct rhashtable_params p;
struct delayed_work run_work;
struct mutex mutex;
bool being_destroyed;
}; };
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
int lockdep_rht_mutex_is_held(const struct rhashtable *ht); int lockdep_rht_mutex_is_held(struct rhashtable *ht);
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
#else #else
static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht) static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{ {
return 1; return 1;
} }
...@@ -112,11 +127,11 @@ bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); ...@@ -112,11 +127,11 @@ bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
int rhashtable_expand(struct rhashtable *ht); int rhashtable_expand(struct rhashtable *ht);
int rhashtable_shrink(struct rhashtable *ht); int rhashtable_shrink(struct rhashtable *ht);
void *rhashtable_lookup(const struct rhashtable *ht, const void *key); void *rhashtable_lookup(struct rhashtable *ht, const void *key);
void *rhashtable_lookup_compare(const struct rhashtable *ht, const void *key, void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
bool (*compare)(void *, void *), void *arg); bool (*compare)(void *, void *), void *arg);
void rhashtable_destroy(const struct rhashtable *ht); void rhashtable_destroy(struct rhashtable *ht);
#define rht_dereference(p, ht) \ #define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
......
This diff is collapsed.
...@@ -33,7 +33,7 @@ static bool nft_hash_lookup(const struct nft_set *set, ...@@ -33,7 +33,7 @@ static bool nft_hash_lookup(const struct nft_set *set,
const struct nft_data *key, const struct nft_data *key,
struct nft_data *data) struct nft_data *data)
{ {
const struct rhashtable *priv = nft_set_priv(set); struct rhashtable *priv = nft_set_priv(set);
const struct nft_hash_elem *he; const struct nft_hash_elem *he;
he = rhashtable_lookup(priv, key); he = rhashtable_lookup(priv, key);
...@@ -113,7 +113,7 @@ static bool nft_hash_compare(void *ptr, void *arg) ...@@ -113,7 +113,7 @@ static bool nft_hash_compare(void *ptr, void *arg)
static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem) static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
{ {
const struct rhashtable *priv = nft_set_priv(set); struct rhashtable *priv = nft_set_priv(set);
struct nft_compare_arg arg = { struct nft_compare_arg arg = {
.set = set, .set = set,
.elem = elem, .elem = elem,
...@@ -129,7 +129,7 @@ static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem) ...@@ -129,7 +129,7 @@ static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
struct nft_set_iter *iter) struct nft_set_iter *iter)
{ {
const struct rhashtable *priv = nft_set_priv(set); struct rhashtable *priv = nft_set_priv(set);
const struct bucket_table *tbl; const struct bucket_table *tbl;
const struct nft_hash_elem *he; const struct nft_hash_elem *he;
struct nft_set_elem elem; struct nft_set_elem elem;
...@@ -162,13 +162,6 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[]) ...@@ -162,13 +162,6 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
return sizeof(struct rhashtable); return sizeof(struct rhashtable);
} }
#ifdef CONFIG_PROVE_LOCKING
static int lockdep_nfnl_lock_is_held(void *parent)
{
return lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES);
}
#endif
static int nft_hash_init(const struct nft_set *set, static int nft_hash_init(const struct nft_set *set,
const struct nft_set_desc *desc, const struct nft_set_desc *desc,
const struct nlattr * const tb[]) const struct nlattr * const tb[])
...@@ -182,9 +175,6 @@ static int nft_hash_init(const struct nft_set *set, ...@@ -182,9 +175,6 @@ static int nft_hash_init(const struct nft_set *set,
.hashfn = jhash, .hashfn = jhash,
.grow_decision = rht_grow_above_75, .grow_decision = rht_grow_above_75,
.shrink_decision = rht_shrink_below_30, .shrink_decision = rht_shrink_below_30,
#ifdef CONFIG_PROVE_LOCKING
.mutex_is_held = lockdep_nfnl_lock_is_held,
#endif
}; };
return rhashtable_init(priv, &params); return rhashtable_init(priv, &params);
...@@ -192,16 +182,23 @@ static int nft_hash_init(const struct nft_set *set, ...@@ -192,16 +182,23 @@ static int nft_hash_init(const struct nft_set *set,
static void nft_hash_destroy(const struct nft_set *set) static void nft_hash_destroy(const struct nft_set *set)
{ {
const struct rhashtable *priv = nft_set_priv(set); struct rhashtable *priv = nft_set_priv(set);
const struct bucket_table *tbl = priv->tbl; const struct bucket_table *tbl;
struct nft_hash_elem *he; struct nft_hash_elem *he;
struct rhash_head *pos, *next; struct rhash_head *pos, *next;
unsigned int i; unsigned int i;
/* Stop an eventual async resizing */
priv->being_destroyed = true;
mutex_lock(&priv->mutex);
tbl = rht_dereference(priv->tbl, priv);
for (i = 0; i < tbl->size; i++) { for (i = 0; i < tbl->size; i++) {
rht_for_each_entry_safe(he, pos, next, tbl, i, node) rht_for_each_entry_safe(he, pos, next, tbl, i, node)
nft_hash_elem_destroy(set, he); nft_hash_elem_destroy(set, he);
} }
mutex_unlock(&priv->mutex);
rhashtable_destroy(priv); rhashtable_destroy(priv);
} }
......
...@@ -114,15 +114,6 @@ static atomic_t nl_table_users = ATOMIC_INIT(0); ...@@ -114,15 +114,6 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
DEFINE_MUTEX(nl_sk_hash_lock); DEFINE_MUTEX(nl_sk_hash_lock);
EXPORT_SYMBOL_GPL(nl_sk_hash_lock); EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
#ifdef CONFIG_PROVE_LOCKING
static int lockdep_nl_sk_hash_is_held(void *parent)
{
if (debug_locks)
return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock);
return 1;
}
#endif
static ATOMIC_NOTIFIER_HEAD(netlink_chain); static ATOMIC_NOTIFIER_HEAD(netlink_chain);
static DEFINE_SPINLOCK(netlink_tap_lock); static DEFINE_SPINLOCK(netlink_tap_lock);
...@@ -1063,7 +1054,8 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid) ...@@ -1063,7 +1054,8 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
goto err; goto err;
err = -ENOMEM; err = -ENOMEM;
if (BITS_PER_LONG > 32 && unlikely(table->hash.nelems >= UINT_MAX)) if (BITS_PER_LONG > 32 &&
unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
goto err; goto err;
nlk_sk(sk)->portid = portid; nlk_sk(sk)->portid = portid;
...@@ -3122,9 +3114,6 @@ static int __init netlink_proto_init(void) ...@@ -3122,9 +3114,6 @@ static int __init netlink_proto_init(void)
.max_shift = 16, /* 64K */ .max_shift = 16, /* 64K */
.grow_decision = rht_grow_above_75, .grow_decision = rht_grow_above_75,
.shrink_decision = rht_shrink_below_30, .shrink_decision = rht_shrink_below_30,
#ifdef CONFIG_PROVE_LOCKING
.mutex_is_held = lockdep_nl_sk_hash_is_held,
#endif
}; };
if (err != 0) if (err != 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment