Commit 4c4a2214 authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds

memcontrol: move oom task exclusion to tasklist scan

Creates a helper function to return non-zero if a task is a member of a
memory controller:

	int task_in_mem_cgroup(const struct task_struct *task,
			       const struct mem_cgroup *mem);

When the OOM killer is constrained by the memory controller, the exclusion
of tasks that are not a member of that controller was previously misplaced
and appeared in the badness scoring function.  It should be excluded
during the tasklist scan in select_bad_process() instead.

[akpm@linux-foundation.org: build fix]
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4c6bc8dd
......@@ -48,6 +48,7 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
static inline struct mem_cgroup *mm_cgroup(const struct mm_struct *mm)
{
......@@ -110,6 +111,12 @@ static inline struct mem_cgroup *mm_cgroup(const struct mm_struct *mm)
return NULL;
}
static inline int task_in_mem_cgroup(struct task_struct *task,
const struct mem_cgroup *mem)
{
return 1;
}
#endif /* CONFIG_CGROUP_MEM_CONT */
#endif /* _LINUX_MEMCONTROL_H */
......
......@@ -170,6 +170,16 @@ static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
list_move(&pc->lru, &pc->mem_cgroup->inactive_list);
}
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
{
int ret;
task_lock(task);
ret = task->mm && mm_cgroup(task->mm) == mem;
task_unlock(task);
return ret;
}
/*
* This routine assumes that the appropriate zone's lru lock is already held
*/
......
......@@ -65,13 +65,6 @@ unsigned long badness(struct task_struct *p, unsigned long uptime,
return 0;
}
#ifdef CONFIG_CGROUP_MEM_CONT
if (mem != NULL && mm->mem_cgroup != mem) {
task_unlock(p);
return 0;
}
#endif
/*
* The memory size of the process is the basis for the badness.
*/
......@@ -223,6 +216,8 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
/* skip the init task */
if (is_global_init(p))
continue;
if (mem && !task_in_mem_cgroup(p, mem))
continue;
/*
* This task already has access to memory reserves and is
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment