Commit 9b3c0a07 authored by Hirokazu Takahashi's avatar Hirokazu Takahashi Committed by Linus Torvalds

memcg: simplify force_empty and move_lists

As for force_empty, though this may not be the main topic here,
mem_cgroup_force_empty_list() can be implemented simpler.  It is possible to
make the function just call mem_cgroup_uncharge_page() instead of releasing
page_cgroups by itself.  The tip is to call get_page() before invoking
mem_cgroup_uncharge_page(), so the page won't be released during this
function.

Kamezawa-san points out that by the time mem_cgroup_uncharge_page() uncharges,
the page might have been reassigned to an lru of a different mem_cgroup, and
now be emptied from that; but Hugh claims that's okay, the end state is the
same as when it hasn't gone to another list.

And once force_empty stops taking lock_page_cgroup within mz->lru_lock,
mem_cgroup_move_lists() can be simplified to take mz->lru_lock directly while
holding page_cgroup lock (but still has to use try_lock_page_cgroup).
Signed-off-by: default avatarHirokazu Takahashi <taka@valinux.co.jp>
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Paul Menage <menage@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2680eed7
...@@ -353,7 +353,6 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) ...@@ -353,7 +353,6 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
void mem_cgroup_move_lists(struct page *page, bool active) void mem_cgroup_move_lists(struct page *page, bool active)
{ {
struct page_cgroup *pc; struct page_cgroup *pc;
struct mem_cgroup *mem;
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_zone *mz;
unsigned long flags; unsigned long flags;
...@@ -367,35 +366,14 @@ void mem_cgroup_move_lists(struct page *page, bool active) ...@@ -367,35 +366,14 @@ void mem_cgroup_move_lists(struct page *page, bool active)
if (!try_lock_page_cgroup(page)) if (!try_lock_page_cgroup(page))
return; return;
/*
* Now page_cgroup is stable, but we cannot acquire mz->lru_lock
* while holding it, because mem_cgroup_force_empty_list does the
* reverse. Get a hold on the mem_cgroup before unlocking, so that
* the zoneinfo remains stable, then take mz->lru_lock; then check
* that page still points to pc and pc (even if freed and reassigned
* to that same page meanwhile) still points to the same mem_cgroup.
* Then we know mz still points to the right spinlock, so it's safe
* to move_lists (page->page_cgroup might be reset while we do so, but
* that doesn't matter: pc->page is stable till we drop mz->lru_lock).
* We're being a little naughty not to try_lock_page_cgroup again
* inside there, but we are safe, aren't we? Aren't we? Whistle...
*/
pc = page_get_page_cgroup(page); pc = page_get_page_cgroup(page);
if (pc) { if (pc) {
mem = pc->mem_cgroup;
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
css_get(&mem->css);
unlock_page_cgroup(page);
spin_lock_irqsave(&mz->lru_lock, flags); spin_lock_irqsave(&mz->lru_lock, flags);
if (page_get_page_cgroup(page) == pc && pc->mem_cgroup == mem) __mem_cgroup_move_lists(pc, active);
__mem_cgroup_move_lists(pc, active);
spin_unlock_irqrestore(&mz->lru_lock, flags); spin_unlock_irqrestore(&mz->lru_lock, flags);
}
css_put(&mem->css); unlock_page_cgroup(page);
} else
unlock_page_cgroup(page);
} }
/* /*
...@@ -789,7 +767,7 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, ...@@ -789,7 +767,7 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
{ {
struct page_cgroup *pc; struct page_cgroup *pc;
struct page *page; struct page *page;
int count; int count = FORCE_UNCHARGE_BATCH;
unsigned long flags; unsigned long flags;
struct list_head *list; struct list_head *list;
...@@ -798,35 +776,21 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, ...@@ -798,35 +776,21 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
else else
list = &mz->inactive_list; list = &mz->inactive_list;
if (list_empty(list))
return;
retry:
count = FORCE_UNCHARGE_BATCH;
spin_lock_irqsave(&mz->lru_lock, flags); spin_lock_irqsave(&mz->lru_lock, flags);
while (!list_empty(list)) {
while (--count && !list_empty(list)) {
pc = list_entry(list->prev, struct page_cgroup, lru); pc = list_entry(list->prev, struct page_cgroup, lru);
page = pc->page; page = pc->page;
lock_page_cgroup(page); get_page(page);
if (page_get_page_cgroup(page) == pc) { spin_unlock_irqrestore(&mz->lru_lock, flags);
page_assign_page_cgroup(page, NULL); mem_cgroup_uncharge_page(page);
unlock_page_cgroup(page); put_page(page);
__mem_cgroup_remove_list(pc); if (--count <= 0) {
res_counter_uncharge(&mem->res, PAGE_SIZE); count = FORCE_UNCHARGE_BATCH;
css_put(&mem->css); cond_resched();
kfree(pc);
} else {
/* racing uncharge: let page go then retry */
unlock_page_cgroup(page);
break;
} }
spin_lock_irqsave(&mz->lru_lock, flags);
} }
spin_unlock_irqrestore(&mz->lru_lock, flags); spin_unlock_irqrestore(&mz->lru_lock, flags);
if (!list_empty(list)) {
cond_resched();
goto retry;
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment