Commit b2402857 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcg: remove PCG_CACHE page_cgroup flag

We record 'the page is cache' with the PCG_CACHE bit in page_cgroup.
Here, "CACHE" means anonymous user pages (and SwapCache).  This doesn't
include shmem.

Considering callers, at charge/uncharge, the caller should know what the
page is and we don't need to record it by using one bit per page.

This patch removes PCG_CACHE bit and make callers of
mem_cgroup_charge_statistics() to specify what the page is.

About page migration: Mapping of the used page is not touched during migra
tion (see page_remove_rmap) so we can rely on it and push the correct
charge type down to __mem_cgroup_uncharge_common from end_migration for
unused page.  The force flag was misleading was abused for skipping the
needless page_mapped() / PageCgroupMigration() check, as we know the
unused page is no longer mapped and cleared the migration flag just a few
lines up.  But doing the checks is no biggie and it's not worth adding
another flag just to skip them.

[akpm@linux-foundation.org: checkpatch fixes]
[hughd@google.com: fix PageAnon uncharging]
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ying Han <yinghan@google.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ca464d69
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
enum { enum {
/* flags for mem_cgroup */ /* flags for mem_cgroup */
PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */ PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
PCG_CACHE, /* charged as cache */
PCG_USED, /* this object is in use. */ PCG_USED, /* this object is in use. */
PCG_MIGRATION, /* under page migration */ PCG_MIGRATION, /* under page migration */
/* flags for mem_cgroup and file and I/O status */ /* flags for mem_cgroup and file and I/O status */
...@@ -64,11 +63,6 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \ ...@@ -64,11 +63,6 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
{ return test_and_clear_bit(PCG_##lname, &pc->flags); } { return test_and_clear_bit(PCG_##lname, &pc->flags); }
/* Cache flag is set only once (at allocation) */
TESTPCGFLAG(Cache, CACHE)
CLEARPCGFLAG(Cache, CACHE)
SETPCGFLAG(Cache, CACHE)
TESTPCGFLAG(Used, USED) TESTPCGFLAG(Used, USED)
CLEARPCGFLAG(Used, USED) CLEARPCGFLAG(Used, USED)
SETPCGFLAG(Used, USED) SETPCGFLAG(Used, USED)
...@@ -85,7 +79,7 @@ static inline void lock_page_cgroup(struct page_cgroup *pc) ...@@ -85,7 +79,7 @@ static inline void lock_page_cgroup(struct page_cgroup *pc)
{ {
/* /*
* Don't take this lock in IRQ context. * Don't take this lock in IRQ context.
* This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION * This lock is for pc->mem_cgroup, USED, MIGRATION
*/ */
bit_spin_lock(PCG_LOCK, &pc->flags); bit_spin_lock(PCG_LOCK, &pc->flags);
} }
......
...@@ -690,15 +690,19 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, ...@@ -690,15 +690,19 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
} }
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
bool file, int nr_pages) bool anon, int nr_pages)
{ {
preempt_disable(); preempt_disable();
if (file) /*
__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
* counted as CACHE even if it's on ANON LRU.
*/
if (anon)
__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
nr_pages); nr_pages);
else else
__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
nr_pages); nr_pages);
/* pagein of a big page is an event. So, ignore page size */ /* pagein of a big page is an event. So, ignore page size */
...@@ -2442,6 +2446,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, ...@@ -2442,6 +2446,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
{ {
struct zone *uninitialized_var(zone); struct zone *uninitialized_var(zone);
bool was_on_lru = false; bool was_on_lru = false;
bool anon;
lock_page_cgroup(pc); lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) { if (unlikely(PageCgroupUsed(pc))) {
...@@ -2477,19 +2482,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, ...@@ -2477,19 +2482,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
* See mem_cgroup_add_lru_list(), etc. * See mem_cgroup_add_lru_list(), etc.
*/ */
smp_wmb(); smp_wmb();
switch (ctype) { SetPageCgroupUsed(pc);
case MEM_CGROUP_CHARGE_TYPE_CACHE:
case MEM_CGROUP_CHARGE_TYPE_SHMEM:
SetPageCgroupCache(pc);
SetPageCgroupUsed(pc);
break;
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
ClearPageCgroupCache(pc);
SetPageCgroupUsed(pc);
break;
default:
break;
}
if (lrucare) { if (lrucare) {
if (was_on_lru) { if (was_on_lru) {
...@@ -2500,7 +2493,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, ...@@ -2500,7 +2493,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
} }
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
anon = true;
else
anon = false;
mem_cgroup_charge_statistics(memcg, anon, nr_pages);
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
/* /*
...@@ -2565,6 +2563,7 @@ static int mem_cgroup_move_account(struct page *page, ...@@ -2565,6 +2563,7 @@ static int mem_cgroup_move_account(struct page *page,
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
bool anon = PageAnon(page);
VM_BUG_ON(from == to); VM_BUG_ON(from == to);
VM_BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
...@@ -2593,14 +2592,14 @@ static int mem_cgroup_move_account(struct page *page, ...@@ -2593,14 +2592,14 @@ static int mem_cgroup_move_account(struct page *page,
__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
preempt_enable(); preempt_enable();
} }
mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages); mem_cgroup_charge_statistics(from, anon, -nr_pages);
if (uncharge) if (uncharge)
/* This is not "cancel", but cancel_charge does all we need. */ /* This is not "cancel", but cancel_charge does all we need. */
__mem_cgroup_cancel_charge(from, nr_pages); __mem_cgroup_cancel_charge(from, nr_pages);
/* caller should have done css_get */ /* caller should have done css_get */
pc->mem_cgroup = to; pc->mem_cgroup = to;
mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages); mem_cgroup_charge_statistics(to, anon, nr_pages);
/* /*
* We charges against "to" which may not have any tasks. Then, "to" * We charges against "to" which may not have any tasks. Then, "to"
* can be under rmdir(). But in current implementation, caller of * can be under rmdir(). But in current implementation, caller of
...@@ -2921,6 +2920,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) ...@@ -2921,6 +2920,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
struct mem_cgroup *memcg = NULL; struct mem_cgroup *memcg = NULL;
unsigned int nr_pages = 1; unsigned int nr_pages = 1;
struct page_cgroup *pc; struct page_cgroup *pc;
bool anon;
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return NULL; return NULL;
...@@ -2946,8 +2946,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) ...@@ -2946,8 +2946,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
if (!PageCgroupUsed(pc)) if (!PageCgroupUsed(pc))
goto unlock_out; goto unlock_out;
anon = PageAnon(page);
switch (ctype) { switch (ctype) {
case MEM_CGROUP_CHARGE_TYPE_MAPPED: case MEM_CGROUP_CHARGE_TYPE_MAPPED:
anon = true;
/* fallthrough */
case MEM_CGROUP_CHARGE_TYPE_DROP: case MEM_CGROUP_CHARGE_TYPE_DROP:
/* See mem_cgroup_prepare_migration() */ /* See mem_cgroup_prepare_migration() */
if (page_mapped(page) || PageCgroupMigration(pc)) if (page_mapped(page) || PageCgroupMigration(pc))
...@@ -2964,7 +2968,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) ...@@ -2964,7 +2968,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
break; break;
} }
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -nr_pages); mem_cgroup_charge_statistics(memcg, anon, -nr_pages);
ClearPageCgroupUsed(pc); ClearPageCgroupUsed(pc);
/* /*
...@@ -3271,6 +3275,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg, ...@@ -3271,6 +3275,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
{ {
struct page *used, *unused; struct page *used, *unused;
struct page_cgroup *pc; struct page_cgroup *pc;
bool anon;
if (!memcg) if (!memcg)
return; return;
...@@ -3292,8 +3297,10 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg, ...@@ -3292,8 +3297,10 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
lock_page_cgroup(pc); lock_page_cgroup(pc);
ClearPageCgroupMigration(pc); ClearPageCgroupMigration(pc);
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
anon = PageAnon(used);
__mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE); __mem_cgroup_uncharge_common(unused,
anon ? MEM_CGROUP_CHARGE_TYPE_MAPPED
: MEM_CGROUP_CHARGE_TYPE_CACHE);
/* /*
* If a page is a file cache, radix-tree replacement is very atomic * If a page is a file cache, radix-tree replacement is very atomic
...@@ -3303,7 +3310,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg, ...@@ -3303,7 +3310,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
* and USED bit check in mem_cgroup_uncharge_page() will do enough * and USED bit check in mem_cgroup_uncharge_page() will do enough
* check. (see prepare_charge() also) * check. (see prepare_charge() also)
*/ */
if (PageAnon(used)) if (anon)
mem_cgroup_uncharge_page(used); mem_cgroup_uncharge_page(used);
/* /*
* At migration, we may charge account against cgroup which has no * At migration, we may charge account against cgroup which has no
...@@ -3333,7 +3340,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage, ...@@ -3333,7 +3340,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
/* fix accounting on old pages */ /* fix accounting on old pages */
lock_page_cgroup(pc); lock_page_cgroup(pc);
memcg = pc->mem_cgroup; memcg = pc->mem_cgroup;
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1); mem_cgroup_charge_statistics(memcg, false, -1);
ClearPageCgroupUsed(pc); ClearPageCgroupUsed(pc);
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment