Commit 2cd21c89 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: consolidate lruvec stat flushing

There are two functions to flush the per-cpu data of an lruvec into the
rest of the cgroup tree: when the cgroup is being freed, and when a CPU
disappears during hotplug.  The difference is whether all CPUs or just
one is being collected, but the rest of the flushing code is the same.
Merge them into one function and share the common code.

Link: https://lkml.kernel.org/r/20210209163304.77088-8-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarRoman Gushchin <guro@fb.com>
Cc: Michal Koutný <mkoutny@suse.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2d146aa3
......@@ -2364,39 +2364,39 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
mutex_unlock(&percpu_charge_mutex);
}
static int memcg_hotplug_cpu_dead(unsigned int cpu)
static void memcg_flush_lruvec_page_state(struct mem_cgroup *memcg, int cpu)
{
struct memcg_stock_pcp *stock;
struct mem_cgroup *memcg;
stock = &per_cpu(memcg_stock, cpu);
drain_stock(stock);
for_each_mem_cgroup(memcg) {
int i;
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
int nid;
for_each_node(nid) {
struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
unsigned long stat[NR_VM_NODE_STAT_ITEMS];
struct batched_lruvec_stat *lstatc;
struct mem_cgroup_per_node *pn;
long x;
int i;
pn = memcg->nodeinfo[nid];
lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu);
x = lstatc->count[i];
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
stat[i] = lstatc->count[i];
lstatc->count[i] = 0;
}
if (x) {
do {
atomic_long_add(x, &pn->lruvec_stat[i]);
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
atomic_long_add(stat[i], &pn->lruvec_stat[i]);
} while ((pn = parent_nodeinfo(pn, nid)));
}
}
}
}
}
static int memcg_hotplug_cpu_dead(unsigned int cpu)
{
struct memcg_stock_pcp *stock;
struct mem_cgroup *memcg;
stock = &per_cpu(memcg_stock, cpu);
drain_stock(stock);
for_each_mem_cgroup(memcg)
memcg_flush_lruvec_page_state(memcg, cpu);
return 0;
}
......@@ -3583,27 +3583,6 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
}
}
static void memcg_flush_lruvec_page_state(struct mem_cgroup *memcg)
{
int node;
for_each_node(node) {
struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
unsigned long stat[NR_VM_NODE_STAT_ITEMS] = { 0 };
struct mem_cgroup_per_node *pi;
int cpu, i;
for_each_online_cpu(cpu)
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
stat[i] += per_cpu(
pn->lruvec_stat_cpu->count[i], cpu);
for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
atomic_long_add(stat[i], &pi->lruvec_stat[i]);
}
}
#ifdef CONFIG_MEMCG_KMEM
static int memcg_online_kmem(struct mem_cgroup *memcg)
{
......@@ -5139,12 +5118,15 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
static void mem_cgroup_free(struct mem_cgroup *memcg)
{
int cpu;
memcg_wb_domain_exit(memcg);
/*
* Flush percpu lruvec stats to guarantee the value
* correctness on parent's and all ancestor levels.
*/
memcg_flush_lruvec_page_state(memcg);
for_each_online_cpu(cpu)
memcg_flush_lruvec_page_state(memcg, cpu);
__mem_cgroup_free(memcg);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment