Commit 4050377b authored by Li Zefan's avatar Li Zefan Committed by Linus Torvalds

memcg: use css_get/put for swap memcg

Use css_get/put instead of mem_cgroup_get/put.  A simple replacement
will do.

The historical reason that memcg has its own refcnt instead of always
using css_get/put, is that cgroup couldn't be removed if there're still
css refs, so css refs can't be used as long-lived reference.  The
situation has changed so that rmdir a cgroup will succeed regardless css
refs, but won't be freed until css refs goes down to 0.
Signed-off-by: default avatarLi Zefan <lizefan@huawei.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Glauber Costa <glommer@openvz.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 10d5ebf4
...@@ -4231,12 +4231,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype, ...@@ -4231,12 +4231,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
/* /*
* even after unlock, we have memcg->res.usage here and this memcg * even after unlock, we have memcg->res.usage here and this memcg
* will never be freed. * will never be freed, so it's safe to call css_get().
*/ */
memcg_check_events(memcg, page); memcg_check_events(memcg, page);
if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
mem_cgroup_swap_statistics(memcg, true); mem_cgroup_swap_statistics(memcg, true);
mem_cgroup_get(memcg); css_get(&memcg->css);
} }
/* /*
* Migration does not charge the res_counter for the * Migration does not charge the res_counter for the
...@@ -4348,7 +4348,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) ...@@ -4348,7 +4348,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
/* /*
* record memcg information, if swapout && memcg != NULL, * record memcg information, if swapout && memcg != NULL,
* mem_cgroup_get() was called in uncharge(). * css_get() was called in uncharge().
*/ */
if (do_swap_account && swapout && memcg) if (do_swap_account && swapout && memcg)
swap_cgroup_record(ent, css_id(&memcg->css)); swap_cgroup_record(ent, css_id(&memcg->css));
...@@ -4379,7 +4379,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent) ...@@ -4379,7 +4379,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
if (!mem_cgroup_is_root(memcg)) if (!mem_cgroup_is_root(memcg))
res_counter_uncharge(&memcg->memsw, PAGE_SIZE); res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
mem_cgroup_swap_statistics(memcg, false); mem_cgroup_swap_statistics(memcg, false);
mem_cgroup_put(memcg); css_put(&memcg->css);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -4413,11 +4413,14 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry, ...@@ -4413,11 +4413,14 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
* This function is only called from task migration context now. * This function is only called from task migration context now.
* It postpones res_counter and refcount handling till the end * It postpones res_counter and refcount handling till the end
* of task migration(mem_cgroup_clear_mc()) for performance * of task migration(mem_cgroup_clear_mc()) for performance
* improvement. But we cannot postpone mem_cgroup_get(to) * improvement. But we cannot postpone css_get(to) because if
* because if the process that has been moved to @to does * the process that has been moved to @to does swap-in, the
* swap-in, the refcount of @to might be decreased to 0. * refcount of @to might be decreased to 0.
*
* We are in attach() phase, so the cgroup is guaranteed to be
* alive, so we can just call css_get().
*/ */
mem_cgroup_get(to); css_get(&to->css);
return 0; return 0;
} }
return -EINVAL; return -EINVAL;
...@@ -6718,6 +6721,7 @@ static void __mem_cgroup_clear_mc(void) ...@@ -6718,6 +6721,7 @@ static void __mem_cgroup_clear_mc(void)
{ {
struct mem_cgroup *from = mc.from; struct mem_cgroup *from = mc.from;
struct mem_cgroup *to = mc.to; struct mem_cgroup *to = mc.to;
int i;
/* we must uncharge all the leftover precharges from mc.to */ /* we must uncharge all the leftover precharges from mc.to */
if (mc.precharge) { if (mc.precharge) {
...@@ -6738,7 +6742,9 @@ static void __mem_cgroup_clear_mc(void) ...@@ -6738,7 +6742,9 @@ static void __mem_cgroup_clear_mc(void)
if (!mem_cgroup_is_root(mc.from)) if (!mem_cgroup_is_root(mc.from))
res_counter_uncharge(&mc.from->memsw, res_counter_uncharge(&mc.from->memsw,
PAGE_SIZE * mc.moved_swap); PAGE_SIZE * mc.moved_swap);
__mem_cgroup_put(mc.from, mc.moved_swap);
for (i = 0; i < mc.moved_swap; i++)
css_put(&mc.from->css);
if (!mem_cgroup_is_root(mc.to)) { if (!mem_cgroup_is_root(mc.to)) {
/* /*
...@@ -6748,7 +6754,7 @@ static void __mem_cgroup_clear_mc(void) ...@@ -6748,7 +6754,7 @@ static void __mem_cgroup_clear_mc(void)
res_counter_uncharge(&mc.to->res, res_counter_uncharge(&mc.to->res,
PAGE_SIZE * mc.moved_swap); PAGE_SIZE * mc.moved_swap);
} }
/* we've already done mem_cgroup_get(mc.to) */ /* we've already done css_get(mc.to) */
mc.moved_swap = 0; mc.moved_swap = 0;
} }
memcg_oom_recover(from); memcg_oom_recover(from);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment