Commit 7779f212 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: account slab stats per lruvec

Josef's redesign of the balancing between slab caches and the page cache
requires slab cache statistics at the lruvec level.

Link: http://lkml.kernel.org/r/20170530181724.27197-7-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarVladimir Davydov <vdavydov.dev@gmail.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 00f3ca2c
...@@ -1425,11 +1425,9 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, ...@@ -1425,11 +1425,9 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
nr_pages = (1 << cachep->gfporder); nr_pages = (1 << cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT) if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
add_node_page_state(page_pgdat(page), mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
NR_SLAB_RECLAIMABLE, nr_pages);
else else
add_node_page_state(page_pgdat(page), mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
NR_SLAB_UNRECLAIMABLE, nr_pages);
__SetPageSlab(page); __SetPageSlab(page);
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
...@@ -1459,11 +1457,9 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page) ...@@ -1459,11 +1457,9 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
kmemcheck_free_shadow(page, order); kmemcheck_free_shadow(page, order);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT) if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
sub_node_page_state(page_pgdat(page), mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
NR_SLAB_RECLAIMABLE, nr_freed);
else else
sub_node_page_state(page_pgdat(page), mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
NR_SLAB_UNRECLAIMABLE, nr_freed);
BUG_ON(!PageSlab(page)); BUG_ON(!PageSlab(page));
__ClearPageSlabPfmemalloc(page); __ClearPageSlabPfmemalloc(page);
......
...@@ -274,22 +274,11 @@ static __always_inline int memcg_charge_slab(struct page *page, ...@@ -274,22 +274,11 @@ static __always_inline int memcg_charge_slab(struct page *page,
gfp_t gfp, int order, gfp_t gfp, int order,
struct kmem_cache *s) struct kmem_cache *s)
{ {
int ret;
if (!memcg_kmem_enabled()) if (!memcg_kmem_enabled())
return 0; return 0;
if (is_root_cache(s)) if (is_root_cache(s))
return 0; return 0;
return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
if (ret)
return ret;
mod_memcg_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1 << order);
return 0;
} }
static __always_inline void memcg_uncharge_slab(struct page *page, int order, static __always_inline void memcg_uncharge_slab(struct page *page, int order,
...@@ -297,11 +286,6 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order, ...@@ -297,11 +286,6 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
{ {
if (!memcg_kmem_enabled()) if (!memcg_kmem_enabled())
return; return;
mod_memcg_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-(1 << order));
memcg_kmem_uncharge(page, order); memcg_kmem_uncharge(page, order);
} }
......
...@@ -1615,7 +1615,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1615,7 +1615,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (!page) if (!page)
return NULL; return NULL;
mod_node_page_state(page_pgdat(page), mod_lruvec_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ? (s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1 << oo_order(oo)); 1 << oo_order(oo));
...@@ -1655,7 +1655,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) ...@@ -1655,7 +1655,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
kmemcheck_free_shadow(page, compound_order(page)); kmemcheck_free_shadow(page, compound_order(page));
mod_node_page_state(page_pgdat(page), mod_lruvec_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ? (s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-pages); -pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment