Commit b6b6cc72 authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

memcg: do not replicate get_mem_cgroup_from_mm in __mem_cgroup_try_charge

__mem_cgroup_try_charge duplicates get_mem_cgroup_from_mm for charges
which came without a memcg.  The only reason seems to be a tiny
optimization when css_tryget is not called if the charge can be consumed
from the stock.  Nevertheless css_tryget is very cheap since it has been
reworked to use per-cpu counting so this optimization doesn't give us
anything these days.

So let's drop the code duplication so that the code is more readable.
Signed-off-by: default avatarMichal Hocko <mhocko@suse.cz>
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent df381975
...@@ -2697,52 +2697,14 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, ...@@ -2697,52 +2697,14 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
again: again:
if (*ptr) { /* css should be a valid one */ if (*ptr) { /* css should be a valid one */
memcg = *ptr; memcg = *ptr;
if (mem_cgroup_is_root(memcg))
goto done;
if (consume_stock(memcg, nr_pages))
goto done;
css_get(&memcg->css); css_get(&memcg->css);
} else { } else {
struct task_struct *p; memcg = get_mem_cgroup_from_mm(mm);
rcu_read_lock();
p = rcu_dereference(mm->owner);
/*
* Because we don't have task_lock(), "p" can exit.
* In that case, "memcg" can point to root or p can be NULL with
* race with swapoff. Then, we have small risk of mis-accouning.
* But such kind of mis-account by race always happens because
* we don't have cgroup_mutex(). It's overkill and we allo that
* small race, here.
* (*) swapoff at el will charge against mm-struct not against
* task-struct. So, mm->owner can be NULL.
*/
memcg = mem_cgroup_from_task(p);
if (!memcg)
memcg = root_mem_cgroup;
if (mem_cgroup_is_root(memcg)) {
rcu_read_unlock();
goto done;
}
if (consume_stock(memcg, nr_pages)) {
/*
* It seems dagerous to access memcg without css_get().
* But considering how consume_stok works, it's not
* necessary. If consume_stock success, some charges
* from this memcg are cached on this cpu. So, we
* don't need to call css_get()/css_tryget() before
* calling consume_stock().
*/
rcu_read_unlock();
goto done;
}
/* after here, we may be blocked. we need to get refcnt */
if (!css_tryget(&memcg->css)) {
rcu_read_unlock();
goto again;
}
rcu_read_unlock();
} }
if (mem_cgroup_is_root(memcg))
goto done;
if (consume_stock(memcg, nr_pages))
goto done;
do { do {
bool invoke_oom = oom && !nr_oom_retries; bool invoke_oom = oom && !nr_oom_retries;
...@@ -2778,8 +2740,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, ...@@ -2778,8 +2740,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
if (batch > nr_pages) if (batch > nr_pages)
refill_stock(memcg, batch - nr_pages); refill_stock(memcg, batch - nr_pages);
css_put(&memcg->css);
done: done:
css_put(&memcg->css);
*ptr = memcg; *ptr = memcg;
return 0; return 0;
nomem: nomem:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment