Commit 4481374c authored by Jan Beulich's avatar Jan Beulich Committed by Linus Torvalds

mm: replace various uses of num_physpages by totalram_pages

Sizing of memory allocations shouldn't depend on the number of physical
pages found in a system, as that generally includes (perhaps a huge amount
of) non-RAM pages.  The amount of what actually is usable as storage
should instead be used as a basis here.

Some of the calculations (i.e.  those not intending to use high memory)
should likely even use (totalram_pages - totalhigh_pages).
Signed-off-by: default avatarJan Beulich <jbeulich@novell.com>
Acked-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: Dave Airlie <airlied@linux.ie>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Patrick McHardy <kaber@trash.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4738e1b9
...@@ -210,8 +210,8 @@ static ssize_t microcode_write(struct file *file, const char __user *buf, ...@@ -210,8 +210,8 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
{ {
ssize_t ret = -EINVAL; ssize_t ret = -EINVAL;
if ((len >> PAGE_SHIFT) > num_physpages) { if ((len >> PAGE_SHIFT) > totalram_pages) {
pr_err("microcode: too much data (max %ld pages)\n", num_physpages); pr_err("microcode: too much data (max %ld pages)\n", totalram_pages);
return ret; return ret;
} }
......
...@@ -114,9 +114,9 @@ static int agp_find_max(void) ...@@ -114,9 +114,9 @@ static int agp_find_max(void)
long memory, index, result; long memory, index, result;
#if PAGE_SHIFT < 20 #if PAGE_SHIFT < 20
memory = num_physpages >> (20 - PAGE_SHIFT); memory = totalram_pages >> (20 - PAGE_SHIFT);
#else #else
memory = num_physpages << (PAGE_SHIFT - 20); memory = totalram_pages << (PAGE_SHIFT - 20);
#endif #endif
index = 1; index = 1;
......
...@@ -1266,7 +1266,7 @@ ccio_ioc_init(struct ioc *ioc) ...@@ -1266,7 +1266,7 @@ ccio_ioc_init(struct ioc *ioc)
** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
*/ */
iova_space_size = (u32) (num_physpages / count_parisc_driver(&ccio_driver)); iova_space_size = (u32) (totalram_pages / count_parisc_driver(&ccio_driver));
/* limit IOVA space size to 1MB-1GB */ /* limit IOVA space size to 1MB-1GB */
...@@ -1305,7 +1305,7 @@ ccio_ioc_init(struct ioc *ioc) ...@@ -1305,7 +1305,7 @@ ccio_ioc_init(struct ioc *ioc)
DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n", DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
__func__, ioc->ioc_regs, __func__, ioc->ioc_regs,
(unsigned long) num_physpages >> (20 - PAGE_SHIFT), (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
iova_space_size>>20, iova_space_size>>20,
iov_order + PAGE_SHIFT); iov_order + PAGE_SHIFT);
......
...@@ -1390,7 +1390,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) ...@@ -1390,7 +1390,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
** for DMA hints - ergo only 30 bits max. ** for DMA hints - ergo only 30 bits max.
*/ */
iova_space_size = (u32) (num_physpages/global_ioc_cnt); iova_space_size = (u32) (totalram_pages/global_ioc_cnt);
/* limit IOVA space size to 1MB-1GB */ /* limit IOVA space size to 1MB-1GB */
if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
...@@ -1415,7 +1415,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) ...@@ -1415,7 +1415,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
__func__, __func__,
ioc->ioc_hpa, ioc->ioc_hpa,
(unsigned long) num_physpages >> (20 - PAGE_SHIFT), (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
iova_space_size>>20, iova_space_size>>20,
iov_order + PAGE_SHIFT); iov_order + PAGE_SHIFT);
......
...@@ -96,11 +96,7 @@ static struct balloon_stats balloon_stats; ...@@ -96,11 +96,7 @@ static struct balloon_stats balloon_stats;
/* We increase/decrease in batches which fit in a page */ /* We increase/decrease in batches which fit in a page */
static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
/* VM /proc information for memory */
extern unsigned long totalram_pages;
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
extern unsigned long totalhigh_pages;
#define inc_totalhigh_pages() (totalhigh_pages++) #define inc_totalhigh_pages() (totalhigh_pages++)
#define dec_totalhigh_pages() (totalhigh_pages--) #define dec_totalhigh_pages() (totalhigh_pages--)
#else #else
......
...@@ -47,7 +47,7 @@ static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) ...@@ -47,7 +47,7 @@ static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask)
return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM);
/* return (void *)__get_free_page(gfp_mask); */ /* return (void *)__get_free_page(gfp_mask); */
} }
if (likely(size >> PAGE_SHIFT < num_physpages)) if (likely((size >> PAGE_SHIFT) < totalram_pages))
return __vmalloc(size, gfp_mask, PAGE_KERNEL); return __vmalloc(size, gfp_mask, PAGE_KERNEL);
return NULL; return NULL;
} }
......
...@@ -25,6 +25,7 @@ extern unsigned long max_mapnr; ...@@ -25,6 +25,7 @@ extern unsigned long max_mapnr;
#endif #endif
extern unsigned long num_physpages; extern unsigned long num_physpages;
extern unsigned long totalram_pages;
extern void * high_memory; extern void * high_memory;
extern int page_cluster; extern int page_cluster;
......
...@@ -668,12 +668,12 @@ asmlinkage void __init start_kernel(void) ...@@ -668,12 +668,12 @@ asmlinkage void __init start_kernel(void)
#endif #endif
thread_info_cache_init(); thread_info_cache_init();
cred_init(); cred_init();
fork_init(num_physpages); fork_init(totalram_pages);
proc_caches_init(); proc_caches_init();
buffer_init(); buffer_init();
key_init(); key_init();
security_init(); security_init();
vfs_caches_init(num_physpages); vfs_caches_init(totalram_pages);
radix_tree_init(); radix_tree_init();
signals_init(); signals_init();
/* rootfs populating might need page-writeback */ /* rootfs populating might need page-writeback */
......
...@@ -1384,7 +1384,7 @@ void __init kmem_cache_init(void) ...@@ -1384,7 +1384,7 @@ void __init kmem_cache_init(void)
* Fragmentation resistance on low memory - only use bigger * Fragmentation resistance on low memory - only use bigger
* page orders on machines with more than 32MB of memory. * page orders on machines with more than 32MB of memory.
*/ */
if (num_physpages > (32 << 20) >> PAGE_SHIFT) if (totalram_pages > (32 << 20) >> PAGE_SHIFT)
slab_break_gfp_order = BREAK_GFP_ORDER_HI; slab_break_gfp_order = BREAK_GFP_ORDER_HI;
/* Bootstrap is tricky, because several objects are allocated /* Bootstrap is tricky, because several objects are allocated
......
...@@ -496,7 +496,7 @@ EXPORT_SYMBOL(pagevec_lookup_tag); ...@@ -496,7 +496,7 @@ EXPORT_SYMBOL(pagevec_lookup_tag);
*/ */
void __init swap_setup(void) void __init swap_setup(void)
{ {
unsigned long megs = num_physpages >> (20 - PAGE_SHIFT); unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
#ifdef CONFIG_SWAP #ifdef CONFIG_SWAP
bdi_init(swapper_space.backing_dev_info); bdi_init(swapper_space.backing_dev_info);
......
...@@ -1386,7 +1386,7 @@ void *vmap(struct page **pages, unsigned int count, ...@@ -1386,7 +1386,7 @@ void *vmap(struct page **pages, unsigned int count,
might_sleep(); might_sleep();
if (count > num_physpages) if (count > totalram_pages)
return NULL; return NULL;
area = get_vm_area_caller((count << PAGE_SHIFT), flags, area = get_vm_area_caller((count << PAGE_SHIFT), flags,
...@@ -1493,7 +1493,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, ...@@ -1493,7 +1493,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
unsigned long real_size = size; unsigned long real_size = size;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
if (!size || (size >> PAGE_SHIFT) > num_physpages) if (!size || (size >> PAGE_SHIFT) > totalram_pages)
return NULL; return NULL;
area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
......
...@@ -1206,12 +1206,12 @@ EXPORT_SYMBOL_GPL(sk_setup_caps); ...@@ -1206,12 +1206,12 @@ EXPORT_SYMBOL_GPL(sk_setup_caps);
void __init sk_init(void) void __init sk_init(void)
{ {
if (num_physpages <= 4096) { if (totalram_pages <= 4096) {
sysctl_wmem_max = 32767; sysctl_wmem_max = 32767;
sysctl_rmem_max = 32767; sysctl_rmem_max = 32767;
sysctl_wmem_default = 32767; sysctl_wmem_default = 32767;
sysctl_rmem_default = 32767; sysctl_rmem_default = 32767;
} else if (num_physpages >= 131072) { } else if (totalram_pages >= 131072) {
sysctl_wmem_max = 131071; sysctl_wmem_max = 131071;
sysctl_rmem_max = 131071; sysctl_rmem_max = 131071;
} }
......
...@@ -1049,10 +1049,10 @@ static int __init dccp_init(void) ...@@ -1049,10 +1049,10 @@ static int __init dccp_init(void)
* *
* The methodology is similar to that of the buffer cache. * The methodology is similar to that of the buffer cache.
*/ */
if (num_physpages >= (128 * 1024)) if (totalram_pages >= (128 * 1024))
goal = num_physpages >> (21 - PAGE_SHIFT); goal = totalram_pages >> (21 - PAGE_SHIFT);
else else
goal = num_physpages >> (23 - PAGE_SHIFT); goal = totalram_pages >> (23 - PAGE_SHIFT);
if (thash_entries) if (thash_entries)
goal = (thash_entries * goal = (thash_entries *
......
...@@ -1750,7 +1750,7 @@ void __init dn_route_init(void) ...@@ -1750,7 +1750,7 @@ void __init dn_route_init(void)
dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
add_timer(&dn_route_timer); add_timer(&dn_route_timer);
goal = num_physpages >> (26 - PAGE_SHIFT); goal = totalram_pages >> (26 - PAGE_SHIFT);
for(order = 0; (1UL << order) < goal; order++) for(order = 0; (1UL << order) < goal; order++)
/* NOTHING */; /* NOTHING */;
......
...@@ -3414,7 +3414,7 @@ int __init ip_rt_init(void) ...@@ -3414,7 +3414,7 @@ int __init ip_rt_init(void)
alloc_large_system_hash("IP route cache", alloc_large_system_hash("IP route cache",
sizeof(struct rt_hash_bucket), sizeof(struct rt_hash_bucket),
rhash_entries, rhash_entries,
(num_physpages >= 128 * 1024) ? (totalram_pages >= 128 * 1024) ?
15 : 17, 15 : 17,
0, 0,
&rt_hash_log, &rt_hash_log,
......
...@@ -2862,7 +2862,7 @@ void __init tcp_init(void) ...@@ -2862,7 +2862,7 @@ void __init tcp_init(void)
alloc_large_system_hash("TCP established", alloc_large_system_hash("TCP established",
sizeof(struct inet_ehash_bucket), sizeof(struct inet_ehash_bucket),
thash_entries, thash_entries,
(num_physpages >= 128 * 1024) ? (totalram_pages >= 128 * 1024) ?
13 : 15, 13 : 15,
0, 0,
&tcp_hashinfo.ehash_size, &tcp_hashinfo.ehash_size,
...@@ -2879,7 +2879,7 @@ void __init tcp_init(void) ...@@ -2879,7 +2879,7 @@ void __init tcp_init(void)
alloc_large_system_hash("TCP bind", alloc_large_system_hash("TCP bind",
sizeof(struct inet_bind_hashbucket), sizeof(struct inet_bind_hashbucket),
tcp_hashinfo.ehash_size, tcp_hashinfo.ehash_size,
(num_physpages >= 128 * 1024) ? (totalram_pages >= 128 * 1024) ?
13 : 15, 13 : 15,
0, 0,
&tcp_hashinfo.bhash_size, &tcp_hashinfo.bhash_size,
......
...@@ -1245,9 +1245,9 @@ static int nf_conntrack_init_init_net(void) ...@@ -1245,9 +1245,9 @@ static int nf_conntrack_init_init_net(void)
* machine has 512 buckets. >= 1GB machines have 16384 buckets. */ * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
if (!nf_conntrack_htable_size) { if (!nf_conntrack_htable_size) {
nf_conntrack_htable_size nf_conntrack_htable_size
= (((num_physpages << PAGE_SHIFT) / 16384) = (((totalram_pages << PAGE_SHIFT) / 16384)
/ sizeof(struct hlist_head)); / sizeof(struct hlist_head));
if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
nf_conntrack_htable_size = 16384; nf_conntrack_htable_size = 16384;
if (nf_conntrack_htable_size < 32) if (nf_conntrack_htable_size < 32)
nf_conntrack_htable_size = 32; nf_conntrack_htable_size = 32;
......
...@@ -617,7 +617,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) ...@@ -617,7 +617,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
int cpu; int cpu;
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages) if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
return NULL; return NULL;
newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL); newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
......
...@@ -194,9 +194,9 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family) ...@@ -194,9 +194,9 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family)
if (minfo->cfg.size) if (minfo->cfg.size)
size = minfo->cfg.size; size = minfo->cfg.size;
else { else {
size = ((num_physpages << PAGE_SHIFT) / 16384) / size = ((totalram_pages << PAGE_SHIFT) / 16384) /
sizeof(struct list_head); sizeof(struct list_head);
if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
size = 8192; size = 8192;
if (size < 16) if (size < 16)
size = 16; size = 16;
...@@ -266,9 +266,9 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family) ...@@ -266,9 +266,9 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family)
if (minfo->cfg.size) { if (minfo->cfg.size) {
size = minfo->cfg.size; size = minfo->cfg.size;
} else { } else {
size = (num_physpages << PAGE_SHIFT) / 16384 / size = (totalram_pages << PAGE_SHIFT) / 16384 /
sizeof(struct list_head); sizeof(struct list_head);
if (num_physpages > 1024 * 1024 * 1024 / PAGE_SIZE) if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
size = 8192; size = 8192;
if (size < 16) if (size < 16)
size = 16; size = 16;
......
...@@ -2091,10 +2091,10 @@ static int __init netlink_proto_init(void) ...@@ -2091,10 +2091,10 @@ static int __init netlink_proto_init(void)
if (!nl_table) if (!nl_table)
goto panic; goto panic;
if (num_physpages >= (128 * 1024)) if (totalram_pages >= (128 * 1024))
limit = num_physpages >> (21 - PAGE_SHIFT); limit = totalram_pages >> (21 - PAGE_SHIFT);
else else
limit = num_physpages >> (23 - PAGE_SHIFT); limit = totalram_pages >> (23 - PAGE_SHIFT);
order = get_bitmask_order(limit) - 1 + PAGE_SHIFT; order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
limit = (1UL << order) / sizeof(struct hlist_head); limit = (1UL << order) / sizeof(struct hlist_head);
......
...@@ -1184,10 +1184,10 @@ SCTP_STATIC __init int sctp_init(void) ...@@ -1184,10 +1184,10 @@ SCTP_STATIC __init int sctp_init(void)
/* Size and allocate the association hash table. /* Size and allocate the association hash table.
* The methodology is similar to that of the tcp hash tables. * The methodology is similar to that of the tcp hash tables.
*/ */
if (num_physpages >= (128 * 1024)) if (totalram_pages >= (128 * 1024))
goal = num_physpages >> (22 - PAGE_SHIFT); goal = totalram_pages >> (22 - PAGE_SHIFT);
else else
goal = num_physpages >> (24 - PAGE_SHIFT); goal = totalram_pages >> (24 - PAGE_SHIFT);
for (order = 0; (1UL << order) < goal; order++) for (order = 0; (1UL << order) < goal; order++)
; ;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment