Commit 8a061159 authored by Ingo Molnar's avatar Ingo Molnar Committed by Greg Kroah-Hartman

[PATCH] tcore-fixes-2.5.50-E6

This fixes threaded coredumps and streamlines the code.  The old code
caused crashes and hung coredumps.  The new code has been tested for
some time already and appears to be robust.  Changes:

 - the code now uses completions instead of a semaphore and a waitqueue,
   attached to mm_struct:

        /* coredumping support */
        int core_waiters;
        struct completion *core_startup_done, core_done;

 - extended the completion concept with a 'complete all' call - all pending
   threads are woken up in that case.

 - core_waiters is a plain integer now - it's always accessed from under
   the mmap_sem. It's also used as the fastpath-check in the sys_exit()
   path, instead of ->dumpable (which was incorrect).

 - got rid of the ->core_waiter task flag - it's not needed anymore.
parent fe43697e
...@@ -1238,49 +1238,58 @@ static void zap_threads (struct mm_struct *mm) ...@@ -1238,49 +1238,58 @@ static void zap_threads (struct mm_struct *mm)
{ {
struct task_struct *g, *p; struct task_struct *g, *p;
/* give other threads a chance to run: */
yield();
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
do_each_thread(g,p) do_each_thread(g,p)
if (mm == p->mm && !p->core_waiter) if (mm == p->mm && p != current) {
force_sig_specific(SIGKILL, p); force_sig_specific(SIGKILL, p);
mm->core_waiters++;
}
while_each_thread(g,p); while_each_thread(g,p);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
static void coredump_wait(struct mm_struct *mm) static void coredump_wait(struct mm_struct *mm)
{ {
DECLARE_WAITQUEUE(wait, current); DECLARE_COMPLETION(startup_done);
mm->core_waiters++; /* let other threads block */
mm->core_startup_done = &startup_done;
/* give other threads a chance to run: */
yield();
atomic_inc(&mm->core_waiters);
add_wait_queue(&mm->core_wait, &wait);
zap_threads(mm); zap_threads(mm);
current->state = TASK_UNINTERRUPTIBLE; if (--mm->core_waiters) {
if (atomic_read(&mm->core_waiters) != atomic_read(&mm->mm_users)) up_write(&mm->mmap_sem);
schedule(); wait_for_completion(&startup_done);
else } else
current->state = TASK_RUNNING; up_write(&mm->mmap_sem);
BUG_ON(mm->core_waiters);
} }
int do_coredump(long signr, struct pt_regs * regs) int do_coredump(long signr, struct pt_regs * regs)
{ {
struct linux_binfmt * binfmt;
char corename[CORENAME_MAX_SIZE + 1]; char corename[CORENAME_MAX_SIZE + 1];
struct file * file; struct mm_struct *mm = current->mm;
struct linux_binfmt * binfmt;
struct inode * inode; struct inode * inode;
struct file * file;
int retval = 0; int retval = 0;
lock_kernel(); lock_kernel();
binfmt = current->binfmt; binfmt = current->binfmt;
if (!binfmt || !binfmt->core_dump) if (!binfmt || !binfmt->core_dump)
goto fail; goto fail;
if (!current->mm->dumpable) down_write(&mm->mmap_sem);
if (!mm->dumpable) {
up_write(&mm->mmap_sem);
goto fail; goto fail;
current->mm->dumpable = 0; }
if (down_trylock(&current->mm->core_sem)) mm->dumpable = 0;
BUG(); init_completion(&mm->core_done);
coredump_wait(current->mm); coredump_wait(mm);
if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump) if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
goto fail_unlock; goto fail_unlock;
...@@ -1308,7 +1317,7 @@ int do_coredump(long signr, struct pt_regs * regs) ...@@ -1308,7 +1317,7 @@ int do_coredump(long signr, struct pt_regs * regs)
close_fail: close_fail:
filp_close(file, NULL); filp_close(file, NULL);
fail_unlock: fail_unlock:
up(&current->mm->core_sem); complete_all(&mm->core_done);
fail: fail:
unlock_kernel(); unlock_kernel();
return retval; return retval;
......
...@@ -29,6 +29,7 @@ static inline void init_completion(struct completion *x) ...@@ -29,6 +29,7 @@ static inline void init_completion(struct completion *x)
extern void FASTCALL(wait_for_completion(struct completion *)); extern void FASTCALL(wait_for_completion(struct completion *));
extern void FASTCALL(complete(struct completion *)); extern void FASTCALL(complete(struct completion *));
extern void FASTCALL(complete_all(struct completion *));
#define INIT_COMPLETION(x) ((x).done = 0) #define INIT_COMPLETION(x) ((x).done = 0)
......
...@@ -203,9 +203,8 @@ struct mm_struct { ...@@ -203,9 +203,8 @@ struct mm_struct {
mm_context_t context; mm_context_t context;
/* coredumping support */ /* coredumping support */
struct semaphore core_sem; int core_waiters;
atomic_t core_waiters; struct completion *core_startup_done, core_done;
wait_queue_head_t core_wait;
/* aio bits */ /* aio bits */
rwlock_t ioctx_list_lock; rwlock_t ioctx_list_lock;
...@@ -397,8 +396,6 @@ struct task_struct { ...@@ -397,8 +396,6 @@ struct task_struct {
void *journal_info; void *journal_info;
struct dentry *proc_dentry; struct dentry *proc_dentry;
struct backing_dev_info *backing_dev_info; struct backing_dev_info *backing_dev_info;
/* threaded coredumping support */
int core_waiter;
unsigned long ptrace_message; unsigned long ptrace_message;
}; };
......
...@@ -425,14 +425,13 @@ static inline void __exit_mm(struct task_struct * tsk) ...@@ -425,14 +425,13 @@ static inline void __exit_mm(struct task_struct * tsk)
/* /*
* Serialize with any possible pending coredump: * Serialize with any possible pending coredump:
*/ */
if (!mm->dumpable) { if (mm->core_waiters) {
current->core_waiter = 1; down_write(&mm->mmap_sem);
atomic_inc(&mm->core_waiters); if (!--mm->core_waiters)
if (atomic_read(&mm->core_waiters) ==atomic_read(&mm->mm_users)) complete(mm->core_startup_done);
wake_up(&mm->core_wait); up_write(&mm->mmap_sem);
down(&mm->core_sem);
up(&mm->core_sem); wait_for_completion(&mm->core_done);
atomic_dec(&mm->core_waiters);
} }
atomic_inc(&mm->mm_count); atomic_inc(&mm->mm_count);
if (mm != tsk->active_mm) BUG(); if (mm != tsk->active_mm) BUG();
......
...@@ -328,9 +328,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm) ...@@ -328,9 +328,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1); atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem); init_rwsem(&mm->mmap_sem);
init_MUTEX(&mm->core_sem); mm->core_waiters = 0;
init_waitqueue_head(&mm->core_wait);
atomic_set(&mm->core_waiters, 0);
mm->page_table_lock = SPIN_LOCK_UNLOCKED; mm->page_table_lock = SPIN_LOCK_UNLOCKED;
mm->ioctx_list_lock = RW_LOCK_UNLOCKED; mm->ioctx_list_lock = RW_LOCK_UNLOCKED;
mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm); mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
...@@ -800,7 +798,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -800,7 +798,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->start_time = jiffies; p->start_time = jiffies;
p->security = NULL; p->security = NULL;
p->core_waiter = 0;
retval = -ENOMEM; retval = -ENOMEM;
if (security_task_alloc(p)) if (security_task_alloc(p))
goto bad_fork_cleanup; goto bad_fork_cleanup;
......
...@@ -1192,6 +1192,16 @@ void complete(struct completion *x) ...@@ -1192,6 +1192,16 @@ void complete(struct completion *x)
spin_unlock_irqrestore(&x->wait.lock, flags); spin_unlock_irqrestore(&x->wait.lock, flags);
} }
void complete_all(struct completion *x)
{
unsigned long flags;
spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, 0);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
void wait_for_completion(struct completion *x) void wait_for_completion(struct completion *x)
{ {
might_sleep(); might_sleep();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment