Commit 1279aa06 authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: make show_free_areas() static

All callers of show_free_areas() pass 0 and NULL, so we can directly use
show_mem() instead of show_free_areas(0, NULL), which could make
show_free_areas() a static function.

Link: https://lkml.kernel.org/r/20230630062253.189440-2-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 527ed4f7
...@@ -83,7 +83,7 @@ static void prom_sync_me(void) ...@@ -83,7 +83,7 @@ static void prom_sync_me(void)
"nop\n\t" : : "r" (&trapbase)); "nop\n\t" : : "r" (&trapbase));
prom_printf("PROM SYNC COMMAND...\n"); prom_printf("PROM SYNC COMMAND...\n");
show_free_areas(0, NULL); show_mem();
if (!is_idle_task(current)) { if (!is_idle_task(current)) {
local_irq_enable(); local_irq_enable();
ksys_sync(); ksys_sync();
......
...@@ -2236,18 +2236,6 @@ extern void pagefault_out_of_memory(void); ...@@ -2236,18 +2236,6 @@ extern void pagefault_out_of_memory(void);
#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1)) #define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
#define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1)) #define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
/*
* Flags passed to show_mem() and show_free_areas() to suppress output in
* various contexts.
*/
#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
extern void __show_free_areas(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodemask)
{
__show_free_areas(flags, nodemask, MAX_NR_ZONES - 1);
}
/* /*
* Parameter block passed down to zap_pte_range in exceptional cases. * Parameter block passed down to zap_pte_range in exceptional cases.
*/ */
......
...@@ -61,6 +61,12 @@ void page_writeback_init(void); ...@@ -61,6 +61,12 @@ void page_writeback_init(void);
#define COMPOUND_MAPPED 0x800000 #define COMPOUND_MAPPED 0x800000
#define FOLIO_PAGES_MAPPED (COMPOUND_MAPPED - 1) #define FOLIO_PAGES_MAPPED (COMPOUND_MAPPED - 1)
/*
* Flags passed to __show_mem() and show_free_areas() to suppress output in
* various contexts.
*/
#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
/* /*
* How many individual pages have an elevated _mapcount. Excludes * How many individual pages have an elevated _mapcount. Excludes
* the folio's entire_mapcount. * the folio's entire_mapcount.
......
...@@ -1003,7 +1003,7 @@ static int do_mmap_private(struct vm_area_struct *vma, ...@@ -1003,7 +1003,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
enomem: enomem:
pr_err("Allocation of length %lu from process %d (%s) failed\n", pr_err("Allocation of length %lu from process %d (%s) failed\n",
len, current->pid, current->comm); len, current->pid, current->comm);
show_free_areas(0, NULL); show_mem();
return -ENOMEM; return -ENOMEM;
} }
...@@ -1236,20 +1236,20 @@ unsigned long do_mmap(struct file *file, ...@@ -1236,20 +1236,20 @@ unsigned long do_mmap(struct file *file,
kmem_cache_free(vm_region_jar, region); kmem_cache_free(vm_region_jar, region);
pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n", pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
len, current->pid); len, current->pid);
show_free_areas(0, NULL); show_mem();
return -ENOMEM; return -ENOMEM;
error_getting_region: error_getting_region:
pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n", pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
len, current->pid); len, current->pid);
show_free_areas(0, NULL); show_mem();
return -ENOMEM; return -ENOMEM;
error_vma_iter_prealloc: error_vma_iter_prealloc:
kmem_cache_free(vm_region_jar, region); kmem_cache_free(vm_region_jar, region);
vm_area_free(vma); vm_area_free(vma);
pr_warn("Allocation of vma tree for process %d failed\n", current->pid); pr_warn("Allocation of vma tree for process %d failed\n", current->pid);
show_free_areas(0, NULL); show_mem();
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -186,7 +186,7 @@ static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) ...@@ -186,7 +186,7 @@ static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx)
* SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
* cpuset. * cpuset.
*/ */
void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
{ {
unsigned long free_pcp = 0; unsigned long free_pcp = 0;
int cpu, nid; int cpu, nid;
...@@ -406,7 +406,7 @@ void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) ...@@ -406,7 +406,7 @@ void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
struct zone *zone; struct zone *zone;
printk("Mem-Info:\n"); printk("Mem-Info:\n");
__show_free_areas(filter, nodemask, max_zone_idx); show_free_areas(filter, nodemask, max_zone_idx);
for_each_populated_zone(zone) { for_each_populated_zone(zone) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment