Commit 0b52663f authored by Baoquan He's avatar Baoquan He Committed by Andrew Morton

mm/mm_init.c: remove arch_reserved_kernel_pages()

Since the current calculation of calc_nr_kernel_pages() has taken into
consideration of kernel reserved memory, no need to have
arch_reserved_kernel_pages() any more.

Link: https://lkml.kernel.org/r/20240325145646.1044760-7-bhe@redhat.comSigned-off-by: default avatarBaoquan He <bhe@redhat.com>
Reviewed-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 90e796e2
...@@ -406,9 +406,5 @@ extern void *abatron_pteptrs[2]; ...@@ -406,9 +406,5 @@ extern void *abatron_pteptrs[2];
#include <asm/nohash/mmu.h> #include <asm/nohash/mmu.h>
#endif #endif
#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MMU_H_ */ #endif /* _ASM_POWERPC_MMU_H_ */
...@@ -1735,8 +1735,3 @@ static void __init fadump_reserve_crash_area(u64 base) ...@@ -1735,8 +1735,3 @@ static void __init fadump_reserve_crash_area(u64 base)
memblock_reserve(mstart, msize); memblock_reserve(mstart, msize);
} }
} }
unsigned long __init arch_reserved_kernel_pages(void)
{
return memblock_reserved_size() / PAGE_SIZE;
}
...@@ -3261,9 +3261,6 @@ static inline void show_mem(void) ...@@ -3261,9 +3261,6 @@ static inline void show_mem(void)
extern long si_mem_available(void); extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val); extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid); extern void si_meminfo_node(struct sysinfo *val, int nid);
#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
extern unsigned long arch_reserved_kernel_pages(void);
#endif
extern __printf(3, 4) extern __printf(3, 4)
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
......
...@@ -2374,17 +2374,6 @@ void __init page_alloc_init_late(void) ...@@ -2374,17 +2374,6 @@ void __init page_alloc_init_late(void)
page_alloc_sysctl_init(); page_alloc_sysctl_init();
} }
#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
/*
* Returns the number of pages that arch has reserved but
* is not known to alloc_large_system_hash().
*/
static unsigned long __init arch_reserved_kernel_pages(void)
{
return 0;
}
#endif
/* /*
* Adaptive scale is meant to reduce sizes of hash tables on large memory * Adaptive scale is meant to reduce sizes of hash tables on large memory
* machines. As memory size is increased the scale is also increased but at * machines. As memory size is increased the scale is also increased but at
...@@ -2427,7 +2416,6 @@ void *__init alloc_large_system_hash(const char *tablename, ...@@ -2427,7 +2416,6 @@ void *__init alloc_large_system_hash(const char *tablename,
if (!numentries) { if (!numentries) {
/* round applicable memory size up to nearest megabyte */ /* round applicable memory size up to nearest megabyte */
numentries = nr_kernel_pages; numentries = nr_kernel_pages;
numentries -= arch_reserved_kernel_pages();
/* It isn't necessary when PAGE_SIZE >= 1MB */ /* It isn't necessary when PAGE_SIZE >= 1MB */
if (PAGE_SIZE < SZ_1M) if (PAGE_SIZE < SZ_1M)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment