Commit d507e2eb authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: fix global NR_SLAB_.*CLAIMABLE counter reads

As Tetsuo points out:
 "Commit 385386cf ("mm: vmstat: move slab statistics from zone to
  node counters") broke "Slab:" field of /proc/meminfo . It shows nearly
  0kB"

In addition to /proc/meminfo, this problem also affects the slab
counters OOM/allocation failure info dumps, can cause early -ENOMEM from
overcommit protection, and miscalculate image size requirements during
suspend-to-disk.

This is because the patch in question switched the slab counters from
the zone level to the node level, but forgot to update the global
accessor functions to read the aggregate node data instead of the
aggregate zone data.

Use global_node_page_state() to access the global slab counters.

Fixes: 385386cf ("mm: vmstat: move slab statistics from zone to node counters")
Link: http://lkml.kernel.org/r/20170801134256.5400-1-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reported-by: default avatarTetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Stefan Agner <stefan@agner.ch>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 26273939
...@@ -106,13 +106,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v) ...@@ -106,13 +106,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
global_node_page_state(NR_FILE_MAPPED)); global_node_page_state(NR_FILE_MAPPED));
show_val_kb(m, "Shmem: ", i.sharedram); show_val_kb(m, "Shmem: ", i.sharedram);
show_val_kb(m, "Slab: ", show_val_kb(m, "Slab: ",
global_page_state(NR_SLAB_RECLAIMABLE) + global_node_page_state(NR_SLAB_RECLAIMABLE) +
global_page_state(NR_SLAB_UNRECLAIMABLE)); global_node_page_state(NR_SLAB_UNRECLAIMABLE));
show_val_kb(m, "SReclaimable: ", show_val_kb(m, "SReclaimable: ",
global_page_state(NR_SLAB_RECLAIMABLE)); global_node_page_state(NR_SLAB_RECLAIMABLE));
show_val_kb(m, "SUnreclaim: ", show_val_kb(m, "SUnreclaim: ",
global_page_state(NR_SLAB_UNRECLAIMABLE)); global_node_page_state(NR_SLAB_UNRECLAIMABLE));
seq_printf(m, "KernelStack: %8lu kB\n", seq_printf(m, "KernelStack: %8lu kB\n",
global_page_state(NR_KERNEL_STACK_KB)); global_page_state(NR_KERNEL_STACK_KB));
show_val_kb(m, "PageTables: ", show_val_kb(m, "PageTables: ",
......
...@@ -1650,7 +1650,7 @@ static unsigned long minimum_image_size(unsigned long saveable) ...@@ -1650,7 +1650,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
{ {
unsigned long size; unsigned long size;
size = global_page_state(NR_SLAB_RECLAIMABLE) size = global_node_page_state(NR_SLAB_RECLAIMABLE)
+ global_node_page_state(NR_ACTIVE_ANON) + global_node_page_state(NR_ACTIVE_ANON)
+ global_node_page_state(NR_INACTIVE_ANON) + global_node_page_state(NR_INACTIVE_ANON)
+ global_node_page_state(NR_ACTIVE_FILE) + global_node_page_state(NR_ACTIVE_FILE)
......
...@@ -4458,8 +4458,9 @@ long si_mem_available(void) ...@@ -4458,8 +4458,9 @@ long si_mem_available(void)
* Part of the reclaimable slab consists of items that are in use, * Part of the reclaimable slab consists of items that are in use,
* and cannot be freed. Cap this estimate at the low watermark. * and cannot be freed. Cap this estimate at the low watermark.
*/ */
available += global_page_state(NR_SLAB_RECLAIMABLE) - available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low); min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
wmark_low);
if (available < 0) if (available < 0)
available = 0; available = 0;
...@@ -4602,8 +4603,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) ...@@ -4602,8 +4603,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
global_node_page_state(NR_FILE_DIRTY), global_node_page_state(NR_FILE_DIRTY),
global_node_page_state(NR_WRITEBACK), global_node_page_state(NR_WRITEBACK),
global_node_page_state(NR_UNSTABLE_NFS), global_node_page_state(NR_UNSTABLE_NFS),
global_page_state(NR_SLAB_RECLAIMABLE), global_node_page_state(NR_SLAB_RECLAIMABLE),
global_page_state(NR_SLAB_UNRECLAIMABLE), global_node_page_state(NR_SLAB_UNRECLAIMABLE),
global_node_page_state(NR_FILE_MAPPED), global_node_page_state(NR_FILE_MAPPED),
global_node_page_state(NR_SHMEM), global_node_page_state(NR_SHMEM),
global_page_state(NR_PAGETABLE), global_page_state(NR_PAGETABLE),
......
...@@ -633,7 +633,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) ...@@ -633,7 +633,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
* which are reclaimable, under pressure. The dentry * which are reclaimable, under pressure. The dentry
* cache and most inode caches should fall into this * cache and most inode caches should fall into this
*/ */
free += global_page_state(NR_SLAB_RECLAIMABLE); free += global_node_page_state(NR_SLAB_RECLAIMABLE);
/* /*
* Leave reserved pages. The pages are not for anonymous pages. * Leave reserved pages. The pages are not for anonymous pages.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment