Commit 9476db97 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: simplify move precharge function

The move precharge function does some baroque things: it tries raw
res_counter charging of the entire amount first, and then falls back to
a loop of one-by-one charges, with checks for pending signals and
cond_resched() batching.

Just use mem_cgroup_try_charge() without __GFP_WAIT for the first bulk
charge attempt.  In the one-by-one loop, remove the signal check (this
is already checked in try_charge), and simply call cond_resched() after
every charge - it's not that expensive.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vladimir Davydov <vdavydov@parallels.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0029e19e
...@@ -6385,56 +6385,38 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) ...@@ -6385,56 +6385,38 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* Handlers for move charge at task migration. */ /* Handlers for move charge at task migration. */
#define PRECHARGE_COUNT_AT_ONCE 256
static int mem_cgroup_do_precharge(unsigned long count) static int mem_cgroup_do_precharge(unsigned long count)
{ {
int ret = 0; int ret = 0;
int batch_count = PRECHARGE_COUNT_AT_ONCE;
struct mem_cgroup *memcg = mc.to;
if (mem_cgroup_is_root(memcg)) { if (mem_cgroup_is_root(mc.to)) {
mc.precharge += count; mc.precharge += count;
/* we don't need css_get for root */ /* we don't need css_get for root */
return ret; return ret;
} }
/* try to charge at once */
if (count > 1) { /* Try a single bulk charge without reclaim first */
struct res_counter *dummy; ret = mem_cgroup_try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
/* if (!ret) {
* "memcg" cannot be under rmdir() because we've already checked
* by cgroup_lock_live_cgroup() that it is not removed and we
* are still under the same cgroup_mutex. So we can postpone
* css_get().
*/
if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
goto one_by_one;
if (do_swap_account && res_counter_charge(&memcg->memsw,
PAGE_SIZE * count, &dummy)) {
res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
goto one_by_one;
}
mc.precharge += count; mc.precharge += count;
return ret; return ret;
} }
one_by_one:
/* fall back to one by one charge */ /* Try charges one by one with reclaim */
while (count--) { while (count--) {
if (signal_pending(current)) { ret = mem_cgroup_try_charge(mc.to,
ret = -EINTR;
break;
}
if (!batch_count--) {
batch_count = PRECHARGE_COUNT_AT_ONCE;
cond_resched();
}
ret = mem_cgroup_try_charge(memcg,
GFP_KERNEL & ~__GFP_NORETRY, 1); GFP_KERNEL & ~__GFP_NORETRY, 1);
/*
* In case of failure, any residual charges against
* mc.to will be dropped by mem_cgroup_clear_mc()
* later on.
*/
if (ret) if (ret)
/* mem_cgroup_clear_mc() will do uncharge later */
return ret; return ret;
mc.precharge++; mc.precharge++;
cond_resched();
} }
return ret; return 0;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment