Commit b9ecfdaa authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller

rhashtable: Allow GFP_ATOMIC bucket table allocation

This patch adds the ability to allocate bucket table with GFP_ATOMIC
instead of GFP_KERNEL.  This is needed when we perform an immediate
rehash during insertion.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Acked-by: default avatarThomas Graf <tgraf@suug.ch>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b824478b
...@@ -58,7 +58,8 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); ...@@ -58,7 +58,8 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#endif #endif
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
gfp_t gfp)
{ {
unsigned int i, size; unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING) #if defined(CONFIG_PROVE_LOCKING)
...@@ -75,12 +76,13 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) ...@@ -75,12 +76,13 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
if (sizeof(spinlock_t) != 0) { if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (size * sizeof(spinlock_t) > PAGE_SIZE) if (size * sizeof(spinlock_t) > PAGE_SIZE &&
gfp == GFP_KERNEL)
tbl->locks = vmalloc(size * sizeof(spinlock_t)); tbl->locks = vmalloc(size * sizeof(spinlock_t));
else else
#endif #endif
tbl->locks = kmalloc_array(size, sizeof(spinlock_t), tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
GFP_KERNEL); gfp);
if (!tbl->locks) if (!tbl->locks)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < size; i++) for (i = 0; i < size; i++)
...@@ -105,23 +107,25 @@ static void bucket_table_free_rcu(struct rcu_head *head) ...@@ -105,23 +107,25 @@ static void bucket_table_free_rcu(struct rcu_head *head)
} }
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
size_t nbuckets) size_t nbuckets,
gfp_t gfp)
{ {
struct bucket_table *tbl = NULL; struct bucket_table *tbl = NULL;
size_t size; size_t size;
int i; int i;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); gfp != GFP_KERNEL)
if (tbl == NULL) tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
if (tbl == NULL && gfp == GFP_KERNEL)
tbl = vzalloc(size); tbl = vzalloc(size);
if (tbl == NULL) if (tbl == NULL)
return NULL; return NULL;
tbl->size = nbuckets; tbl->size = nbuckets;
if (alloc_bucket_locks(ht, tbl) < 0) { if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
bucket_table_free(tbl); bucket_table_free(tbl);
return NULL; return NULL;
} }
...@@ -288,7 +292,7 @@ static int rhashtable_expand(struct rhashtable *ht) ...@@ -288,7 +292,7 @@ static int rhashtable_expand(struct rhashtable *ht)
old_tbl = rhashtable_last_table(ht, old_tbl); old_tbl = rhashtable_last_table(ht, old_tbl);
new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
if (new_tbl == NULL) if (new_tbl == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -332,7 +336,7 @@ static int rhashtable_shrink(struct rhashtable *ht) ...@@ -332,7 +336,7 @@ static int rhashtable_shrink(struct rhashtable *ht)
if (rht_dereference(old_tbl->future_tbl, ht)) if (rht_dereference(old_tbl->future_tbl, ht))
return -EEXIST; return -EEXIST;
new_tbl = bucket_table_alloc(ht, size); new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
if (new_tbl == NULL) if (new_tbl == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -689,7 +693,7 @@ int rhashtable_init(struct rhashtable *ht, ...@@ -689,7 +693,7 @@ int rhashtable_init(struct rhashtable *ht,
} }
} }
tbl = bucket_table_alloc(ht, size); tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
if (tbl == NULL) if (tbl == NULL)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment