Commit b564daf8 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds

coredump: construct the list of coredumping threads at startup time

binfmt->core_dump() has to iterate over the all threads in system in order
to find the coredumping threads and construct the list using the
GFP_ATOMIC allocations.

With this patch each thread allocates the list node on exit_mm()'s stack and
adds itself to the list.

This allows us to do further changes:

	- simplify ->core_dump()

	- change exit_mm() to clear ->mm first, then wait for ->core_done.
	  this makes the coredumping process visible to oom_kill

	- kill mm->core_done
Signed-off-by: default avatarOleg Nesterov <oleg@tv-sign.ru>
Acked-by: default avatarRoland McGrath <roland@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9d5b327b
...@@ -1604,6 +1604,8 @@ static int coredump_wait(int exit_code, struct core_state *core_state) ...@@ -1604,6 +1604,8 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
init_completion(&mm->core_done); init_completion(&mm->core_done);
init_completion(&core_state->startup); init_completion(&core_state->startup);
core_state->dumper.task = tsk;
core_state->dumper.next = NULL;
core_waiters = zap_threads(tsk, mm, core_state, exit_code); core_waiters = zap_threads(tsk, mm, core_state, exit_code);
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
......
...@@ -159,8 +159,14 @@ struct vm_area_struct { ...@@ -159,8 +159,14 @@ struct vm_area_struct {
#endif #endif
}; };
struct core_thread {
struct task_struct *task;
struct core_thread *next;
};
struct core_state { struct core_state {
atomic_t nr_threads; atomic_t nr_threads;
struct core_thread dumper;
struct completion startup; struct completion startup;
}; };
......
...@@ -664,6 +664,7 @@ void mm_update_next_owner(struct mm_struct *mm) ...@@ -664,6 +664,7 @@ void mm_update_next_owner(struct mm_struct *mm)
static void exit_mm(struct task_struct * tsk) static void exit_mm(struct task_struct * tsk)
{ {
struct mm_struct *mm = tsk->mm; struct mm_struct *mm = tsk->mm;
struct core_state *core_state;
mm_release(tsk, mm); mm_release(tsk, mm);
if (!mm) if (!mm)
...@@ -676,11 +677,19 @@ static void exit_mm(struct task_struct * tsk) ...@@ -676,11 +677,19 @@ static void exit_mm(struct task_struct * tsk)
* group with ->mm != NULL. * group with ->mm != NULL.
*/ */
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
if (mm->core_state) { core_state = mm->core_state;
if (core_state) {
struct core_thread self;
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (atomic_dec_and_test(&mm->core_state->nr_threads)) self.task = tsk;
complete(&mm->core_state->startup); self.next = xchg(&core_state->dumper.next, &self);
/*
* Implies mb(), the result of xchg() must be visible
* to core_state->dumper.
*/
if (atomic_dec_and_test(&core_state->nr_threads))
complete(&core_state->startup);
wait_for_completion(&mm->core_done); wait_for_completion(&mm->core_done);
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment