Commit d38144b7 authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

memcg: unify sync and async per-cpu charge cache draining

Currently we have two ways how to drain per-CPU caches for charges.
drain_all_stock_sync will synchronously drain all caches while
drain_all_stock_async will asynchronously drain only those that refer to
a given memory cgroup or its subtree in hierarchy.  Targeted async
draining has been introduced by 26fe6168 (memcg: fix percpu cached
charge draining frequency) to reduce the cpu workers number.

sync draining is currently triggered only from mem_cgroup_force_empty
which is triggered only by userspace (mem_cgroup_force_empty_write) or
when a cgroup is removed (mem_cgroup_pre_destroy).  Although these are
not usually frequent operations it still makes some sense to do targeted
draining as well, especially if the box has many CPUs.

This patch unifies both methods to use the single code (drain_all_stock)
which relies on the original async implementation and just adds
flush_work to wait on all caches that are still under work for the sync
mode.  We are using FLUSHING_CACHED_CHARGE bit check to prevent from
waiting on a work that we haven't triggered.  Please note that both sync
and async functions are currently protected by percpu_charge_mutex so we
cannot race with other drainers.
Signed-off-by: default avatarMichal Hocko <mhocko@suse.cz>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d1a05b69
...@@ -2154,19 +2154,14 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages) ...@@ -2154,19 +2154,14 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
} }
/* /*
* Tries to drain stocked charges in other cpus. This function is asynchronous * Drains all per-CPU charge caches for given root_mem resp. subtree
* and just put a work per cpu for draining localy on each cpu. Caller can * of the hierarchy under it. sync flag says whether we should block
* expects some charges will be back to res_counter later but cannot wait for * until the work is done.
* it.
*/ */
static void drain_all_stock_async(struct mem_cgroup *root_mem) static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
{ {
int cpu, curcpu; int cpu, curcpu;
/*
* If someone calls draining, avoid adding more kworker runs.
*/
if (!mutex_trylock(&percpu_charge_mutex))
return;
/* Notify other cpus that system-wide "drain" is running */ /* Notify other cpus that system-wide "drain" is running */
get_online_cpus(); get_online_cpus();
/* /*
...@@ -2197,17 +2192,42 @@ static void drain_all_stock_async(struct mem_cgroup *root_mem) ...@@ -2197,17 +2192,42 @@ static void drain_all_stock_async(struct mem_cgroup *root_mem)
schedule_work_on(cpu, &stock->work); schedule_work_on(cpu, &stock->work);
} }
} }
if (!sync)
goto out;
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
flush_work(&stock->work);
}
out:
put_online_cpus(); put_online_cpus();
}
/*
* Tries to drain stocked charges in other cpus. This function is asynchronous
* and just put a work per cpu for draining localy on each cpu. Caller can
* expects some charges will be back to res_counter later but cannot wait for
* it.
*/
static void drain_all_stock_async(struct mem_cgroup *root_mem)
{
/*
* If someone calls draining, avoid adding more kworker runs.
*/
if (!mutex_trylock(&percpu_charge_mutex))
return;
drain_all_stock(root_mem, false);
mutex_unlock(&percpu_charge_mutex); mutex_unlock(&percpu_charge_mutex);
/* We don't wait for flush_work */
} }
/* This is a synchronous drain interface. */ /* This is a synchronous drain interface. */
static void drain_all_stock_sync(void) static void drain_all_stock_sync(struct mem_cgroup *root_mem)
{ {
/* called when force_empty is called */ /* called when force_empty is called */
mutex_lock(&percpu_charge_mutex); mutex_lock(&percpu_charge_mutex);
schedule_on_each_cpu(drain_local_stock); drain_all_stock(root_mem, true);
mutex_unlock(&percpu_charge_mutex); mutex_unlock(&percpu_charge_mutex);
} }
...@@ -3856,7 +3876,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) ...@@ -3856,7 +3876,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
goto out; goto out;
/* This is for making all *used* pages to be on LRU. */ /* This is for making all *used* pages to be on LRU. */
lru_add_drain_all(); lru_add_drain_all();
drain_all_stock_sync(); drain_all_stock_sync(mem);
ret = 0; ret = 0;
mem_cgroup_start_move(mem); mem_cgroup_start_move(mem);
for_each_node_state(node, N_HIGH_MEMORY) { for_each_node_state(node, N_HIGH_MEMORY) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment