Commit 074b8517 authored by Dimitri Sivanich's avatar Dimitri Sivanich Committed by Al Viro

vfs: fix panic in __d_lookup() with high dentry hashtable counts

When the number of dentry cache hash table entries gets too high
(2147483648 entries), as happens by default on a 16TB system, use of a
signed integer in the dcache_init() initialization loop prevents the
dentry_hashtable from getting initialized, causing a panic in
__d_lookup().  Fix this in dcache_init() and similar areas.
Signed-off-by: default avatarDimitri Sivanich <sivanich@sgi.com>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 1d6f2097
...@@ -2968,7 +2968,7 @@ __setup("dhash_entries=", set_dhash_entries); ...@@ -2968,7 +2968,7 @@ __setup("dhash_entries=", set_dhash_entries);
static void __init dcache_init_early(void) static void __init dcache_init_early(void)
{ {
int loop; unsigned int loop;
/* If hashes are distributed across NUMA nodes, defer /* If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available. * hash allocation until vmalloc space is available.
...@@ -2986,13 +2986,13 @@ static void __init dcache_init_early(void) ...@@ -2986,13 +2986,13 @@ static void __init dcache_init_early(void)
&d_hash_mask, &d_hash_mask,
0); 0);
for (loop = 0; loop < (1 << d_hash_shift); loop++) for (loop = 0; loop < (1U << d_hash_shift); loop++)
INIT_HLIST_BL_HEAD(dentry_hashtable + loop); INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
} }
static void __init dcache_init(void) static void __init dcache_init(void)
{ {
int loop; unsigned int loop;
/* /*
* A constructor could be added for stable state like the lists, * A constructor could be added for stable state like the lists,
...@@ -3016,7 +3016,7 @@ static void __init dcache_init(void) ...@@ -3016,7 +3016,7 @@ static void __init dcache_init(void)
&d_hash_mask, &d_hash_mask,
0); 0);
for (loop = 0; loop < (1 << d_hash_shift); loop++) for (loop = 0; loop < (1U << d_hash_shift); loop++)
INIT_HLIST_BL_HEAD(dentry_hashtable + loop); INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
} }
......
...@@ -1651,7 +1651,7 @@ __setup("ihash_entries=", set_ihash_entries); ...@@ -1651,7 +1651,7 @@ __setup("ihash_entries=", set_ihash_entries);
*/ */
void __init inode_init_early(void) void __init inode_init_early(void)
{ {
int loop; unsigned int loop;
/* If hashes are distributed across NUMA nodes, defer /* If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available. * hash allocation until vmalloc space is available.
...@@ -1669,13 +1669,13 @@ void __init inode_init_early(void) ...@@ -1669,13 +1669,13 @@ void __init inode_init_early(void)
&i_hash_mask, &i_hash_mask,
0); 0);
for (loop = 0; loop < (1 << i_hash_shift); loop++) for (loop = 0; loop < (1U << i_hash_shift); loop++)
INIT_HLIST_HEAD(&inode_hashtable[loop]); INIT_HLIST_HEAD(&inode_hashtable[loop]);
} }
void __init inode_init(void) void __init inode_init(void)
{ {
int loop; unsigned int loop;
/* inode slab cache */ /* inode slab cache */
inode_cachep = kmem_cache_create("inode_cache", inode_cachep = kmem_cache_create("inode_cache",
...@@ -1699,7 +1699,7 @@ void __init inode_init(void) ...@@ -1699,7 +1699,7 @@ void __init inode_init(void)
&i_hash_mask, &i_hash_mask,
0); 0);
for (loop = 0; loop < (1 << i_hash_shift); loop++) for (loop = 0; loop < (1U << i_hash_shift); loop++)
INIT_HLIST_HEAD(&inode_hashtable[loop]); INIT_HLIST_HEAD(&inode_hashtable[loop]);
} }
......
...@@ -543,12 +543,12 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns) ...@@ -543,12 +543,12 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
*/ */
void __init pidhash_init(void) void __init pidhash_init(void)
{ {
int i, pidhash_size; unsigned int i, pidhash_size;
pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
HASH_EARLY | HASH_SMALL, HASH_EARLY | HASH_SMALL,
&pidhash_shift, NULL, 4096); &pidhash_shift, NULL, 4096);
pidhash_size = 1 << pidhash_shift; pidhash_size = 1U << pidhash_shift;
for (i = 0; i < pidhash_size; i++) for (i = 0; i < pidhash_size; i++)
INIT_HLIST_HEAD(&pid_hash[i]); INIT_HLIST_HEAD(&pid_hash[i]);
......
...@@ -5236,6 +5236,7 @@ void *__init alloc_large_system_hash(const char *tablename, ...@@ -5236,6 +5236,7 @@ void *__init alloc_large_system_hash(const char *tablename,
max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
do_div(max, bucketsize); do_div(max, bucketsize);
} }
max = min(max, 0x80000000ULL);
if (numentries > max) if (numentries > max)
numentries = max; numentries = max;
......
...@@ -3240,7 +3240,8 @@ void __init tcp_init(void) ...@@ -3240,7 +3240,8 @@ void __init tcp_init(void)
{ {
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
unsigned long limit; unsigned long limit;
int i, max_share, cnt; int max_share, cnt;
unsigned int i;
unsigned long jiffy = jiffies; unsigned long jiffy = jiffies;
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
...@@ -3283,7 +3284,7 @@ void __init tcp_init(void) ...@@ -3283,7 +3284,7 @@ void __init tcp_init(void)
&tcp_hashinfo.bhash_size, &tcp_hashinfo.bhash_size,
NULL, NULL,
64 * 1024); 64 * 1024);
tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
for (i = 0; i < tcp_hashinfo.bhash_size; i++) { for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
spin_lock_init(&tcp_hashinfo.bhash[i].lock); spin_lock_init(&tcp_hashinfo.bhash[i].lock);
INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment