Commit 28454265 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: implement lruvec stat functions on top of each other

The implementation of the lruvec stat functions and their variants for
accounting through a page, or accounting from a preemptible context, are
mostly identical and needlessly repetitive.

Implement the lruvec_page functions by looking up the page's lruvec and
then using the lruvec function.

Implement the functions for preemptible contexts by disabling preemption
before calling the atomic context functions.

Link: http://lkml.kernel.org/r/20171103153336.24044-2-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarVladimir Davydov <vdavydov.dev@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c9019e9b
...@@ -569,51 +569,51 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec, ...@@ -569,51 +569,51 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec,
{ {
struct mem_cgroup_per_node *pn; struct mem_cgroup_per_node *pn;
/* Update node */
__mod_node_page_state(lruvec_pgdat(lruvec), idx, val); __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
/* Update memcg */
__mod_memcg_state(pn->memcg, idx, val); __mod_memcg_state(pn->memcg, idx, val);
/* Update lruvec */
__this_cpu_add(pn->lruvec_stat->count[idx], val); __this_cpu_add(pn->lruvec_stat->count[idx], val);
} }
static inline void mod_lruvec_state(struct lruvec *lruvec, static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val) enum node_stat_item idx, int val)
{ {
struct mem_cgroup_per_node *pn; preempt_disable();
__mod_lruvec_state(lruvec, idx, val);
mod_node_page_state(lruvec_pgdat(lruvec), idx, val); preempt_enable();
if (mem_cgroup_disabled())
return;
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
mod_memcg_state(pn->memcg, idx, val);
this_cpu_add(pn->lruvec_stat->count[idx], val);
} }
static inline void __mod_lruvec_page_state(struct page *page, static inline void __mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val) enum node_stat_item idx, int val)
{ {
struct mem_cgroup_per_node *pn; pg_data_t *pgdat = page_pgdat(page);
struct lruvec *lruvec;
__mod_node_page_state(page_pgdat(page), idx, val); /* Untracked pages have no memcg, no lruvec. Update only the node */
if (mem_cgroup_disabled() || !page->mem_cgroup) if (!page->mem_cgroup) {
__mod_node_page_state(pgdat, idx, val);
return; return;
__mod_memcg_state(page->mem_cgroup, idx, val); }
pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
__this_cpu_add(pn->lruvec_stat->count[idx], val); lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
__mod_lruvec_state(lruvec, idx, val);
} }
static inline void mod_lruvec_page_state(struct page *page, static inline void mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val) enum node_stat_item idx, int val)
{ {
struct mem_cgroup_per_node *pn; preempt_disable();
__mod_lruvec_page_state(page, idx, val);
mod_node_page_state(page_pgdat(page), idx, val); preempt_enable();
if (mem_cgroup_disabled() || !page->mem_cgroup)
return;
mod_memcg_state(page->mem_cgroup, idx, val);
pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
this_cpu_add(pn->lruvec_stat->count[idx], val);
} }
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment