Commit 5eee7e1c authored by Shakeel Butt's avatar Shakeel Butt Committed by Linus Torvalds

mm, oom: refactor dump_tasks for memcg OOMs

dump_tasks() traverses all the existing processes even for the memcg OOM
context which is not only unnecessary but also wasteful.  This imposes a
long RCU critical section even from a contained context which can be quite
disruptive.

Change dump_tasks() to be aligned with select_bad_process and use
mem_cgroup_scan_tasks to selectively traverse only processes of the target
memcg hierarchy during memcg OOM.

Link: http://lkml.kernel.org/r/20190617231207.160865-1-shakeelb@google.comSigned-off-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarRoman Gushchin <guro@fb.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Paul Jackson <pj@sgi.com>
Cc: Nick Piggin <npiggin@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f168a9a5
...@@ -382,10 +382,38 @@ static void select_bad_process(struct oom_control *oc) ...@@ -382,10 +382,38 @@ static void select_bad_process(struct oom_control *oc)
oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages; oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
} }
static int dump_task(struct task_struct *p, void *arg)
{
struct oom_control *oc = arg;
struct task_struct *task;
if (oom_unkillable_task(p, NULL, oc->nodemask))
return 0;
task = find_lock_task_mm(p);
if (!task) {
/*
* This is a kthread or all of p's threads have already
* detached their mm's. There's no need to report
* them; they can't be oom killed anyway.
*/
return 0;
}
pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
task->pid, from_kuid(&init_user_ns, task_uid(task)),
task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
mm_pgtables_bytes(task->mm),
get_mm_counter(task->mm, MM_SWAPENTS),
task->signal->oom_score_adj, task->comm);
task_unlock(task);
return 0;
}
/** /**
* dump_tasks - dump current memory state of all system tasks * dump_tasks - dump current memory state of all system tasks
* @memcg: current's memory controller, if constrained * @oc: pointer to struct oom_control
* @nodemask: nodemask passed to page allocator for mempolicy ooms
* *
* Dumps the current memory state of all eligible tasks. Tasks not in the same * Dumps the current memory state of all eligible tasks. Tasks not in the same
* memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
...@@ -393,37 +421,21 @@ static void select_bad_process(struct oom_control *oc) ...@@ -393,37 +421,21 @@ static void select_bad_process(struct oom_control *oc)
* State information includes task's pid, uid, tgid, vm size, rss, * State information includes task's pid, uid, tgid, vm size, rss,
* pgtables_bytes, swapents, oom_score_adj value, and name. * pgtables_bytes, swapents, oom_score_adj value, and name.
*/ */
static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) static void dump_tasks(struct oom_control *oc)
{ {
struct task_struct *p;
struct task_struct *task;
pr_info("Tasks state (memory values in pages):\n"); pr_info("Tasks state (memory values in pages):\n");
pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n"); pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
rcu_read_lock();
for_each_process(p) {
if (oom_unkillable_task(p, memcg, nodemask))
continue;
task = find_lock_task_mm(p); if (is_memcg_oom(oc))
if (!task) { mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
/* else {
* This is a kthread or all of p's threads have already struct task_struct *p;
* detached their mm's. There's no need to report
* them; they can't be oom killed anyway.
*/
continue;
}
pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n", rcu_read_lock();
task->pid, from_kuid(&init_user_ns, task_uid(task)), for_each_process(p)
task->tgid, task->mm->total_vm, get_mm_rss(task->mm), dump_task(p, oc);
mm_pgtables_bytes(task->mm), rcu_read_unlock();
get_mm_counter(task->mm, MM_SWAPENTS),
task->signal->oom_score_adj, task->comm);
task_unlock(task);
} }
rcu_read_unlock();
} }
static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim) static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
...@@ -455,7 +467,7 @@ static void dump_header(struct oom_control *oc, struct task_struct *p) ...@@ -455,7 +467,7 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
dump_unreclaimable_slab(); dump_unreclaimable_slab();
} }
if (sysctl_oom_dump_tasks) if (sysctl_oom_dump_tasks)
dump_tasks(oc->memcg, oc->nodemask); dump_tasks(oc);
if (p) if (p)
dump_oom_summary(oc, p); dump_oom_summary(oc, p);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment