Commit 8a061159 authored by Ingo Molnar's avatar Ingo Molnar Committed by Greg Kroah-Hartman

[PATCH] tcore-fixes-2.5.50-E6

This fixes threaded coredumps and streamlines the code.  The old code
caused crashes and hung coredumps.  The new code has been tested for
some time already and appears to be robust.  Changes:

 - the code now uses completions instead of a semaphore and a waitqueue,
   attached to mm_struct:

        /* coredumping support */
        int core_waiters;
        struct completion *core_startup_done, core_done;

 - extended the completion concept with a 'complete all' call - all pending
   threads are woken up in that case.

 - core_waiters is a plain integer now - it's always accessed from under
   the mmap_sem. It's also used as the fastpath-check in the sys_exit()
   path, instead of ->dumpable (which was incorrect).

 - got rid of the ->core_waiter task flag - it's not needed anymore.
parent fe43697e
......@@ -1238,49 +1238,58 @@ static void zap_threads (struct mm_struct *mm)
{
struct task_struct *g, *p;
/* give other threads a chance to run: */
yield();
read_lock(&tasklist_lock);
do_each_thread(g,p)
if (mm == p->mm && !p->core_waiter)
if (mm == p->mm && p != current) {
force_sig_specific(SIGKILL, p);
mm->core_waiters++;
}
while_each_thread(g,p);
read_unlock(&tasklist_lock);
}
static void coredump_wait(struct mm_struct *mm)
{
DECLARE_WAITQUEUE(wait, current);
DECLARE_COMPLETION(startup_done);
mm->core_waiters++; /* let other threads block */
mm->core_startup_done = &startup_done;
/* give other threads a chance to run: */
yield();
atomic_inc(&mm->core_waiters);
add_wait_queue(&mm->core_wait, &wait);
zap_threads(mm);
current->state = TASK_UNINTERRUPTIBLE;
if (atomic_read(&mm->core_waiters) != atomic_read(&mm->mm_users))
schedule();
else
current->state = TASK_RUNNING;
if (--mm->core_waiters) {
up_write(&mm->mmap_sem);
wait_for_completion(&startup_done);
} else
up_write(&mm->mmap_sem);
BUG_ON(mm->core_waiters);
}
int do_coredump(long signr, struct pt_regs * regs)
{
struct linux_binfmt * binfmt;
char corename[CORENAME_MAX_SIZE + 1];
struct file * file;
struct mm_struct *mm = current->mm;
struct linux_binfmt * binfmt;
struct inode * inode;
struct file * file;
int retval = 0;
lock_kernel();
binfmt = current->binfmt;
if (!binfmt || !binfmt->core_dump)
goto fail;
if (!current->mm->dumpable)
down_write(&mm->mmap_sem);
if (!mm->dumpable) {
up_write(&mm->mmap_sem);
goto fail;
current->mm->dumpable = 0;
if (down_trylock(&current->mm->core_sem))
BUG();
coredump_wait(current->mm);
}
mm->dumpable = 0;
init_completion(&mm->core_done);
coredump_wait(mm);
if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
goto fail_unlock;
......@@ -1308,7 +1317,7 @@ int do_coredump(long signr, struct pt_regs * regs)
close_fail:
filp_close(file, NULL);
fail_unlock:
up(&current->mm->core_sem);
complete_all(&mm->core_done);
fail:
unlock_kernel();
return retval;
......
......@@ -29,6 +29,7 @@ static inline void init_completion(struct completion *x)
extern void FASTCALL(wait_for_completion(struct completion *));
extern void FASTCALL(complete(struct completion *));
extern void FASTCALL(complete_all(struct completion *));
#define INIT_COMPLETION(x) ((x).done = 0)
......
......@@ -203,9 +203,8 @@ struct mm_struct {
mm_context_t context;
/* coredumping support */
struct semaphore core_sem;
atomic_t core_waiters;
wait_queue_head_t core_wait;
int core_waiters;
struct completion *core_startup_done, core_done;
/* aio bits */
rwlock_t ioctx_list_lock;
......@@ -397,8 +396,6 @@ struct task_struct {
void *journal_info;
struct dentry *proc_dentry;
struct backing_dev_info *backing_dev_info;
/* threaded coredumping support */
int core_waiter;
unsigned long ptrace_message;
};
......
......@@ -425,14 +425,13 @@ static inline void __exit_mm(struct task_struct * tsk)
/*
* Serialize with any possible pending coredump:
*/
if (!mm->dumpable) {
current->core_waiter = 1;
atomic_inc(&mm->core_waiters);
if (atomic_read(&mm->core_waiters) ==atomic_read(&mm->mm_users))
wake_up(&mm->core_wait);
down(&mm->core_sem);
up(&mm->core_sem);
atomic_dec(&mm->core_waiters);
if (mm->core_waiters) {
down_write(&mm->mmap_sem);
if (!--mm->core_waiters)
complete(mm->core_startup_done);
up_write(&mm->mmap_sem);
wait_for_completion(&mm->core_done);
}
atomic_inc(&mm->mm_count);
if (mm != tsk->active_mm) BUG();
......
......@@ -328,9 +328,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
init_MUTEX(&mm->core_sem);
init_waitqueue_head(&mm->core_wait);
atomic_set(&mm->core_waiters, 0);
mm->core_waiters = 0;
mm->page_table_lock = SPIN_LOCK_UNLOCKED;
mm->ioctx_list_lock = RW_LOCK_UNLOCKED;
mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
......@@ -800,7 +798,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->start_time = jiffies;
p->security = NULL;
p->core_waiter = 0;
retval = -ENOMEM;
if (security_task_alloc(p))
goto bad_fork_cleanup;
......
......@@ -1192,6 +1192,16 @@ void complete(struct completion *x)
spin_unlock_irqrestore(&x->wait.lock, flags);
}
void complete_all(struct completion *x)
{
unsigned long flags;
spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, 0);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
void wait_for_completion(struct completion *x)
{
might_sleep();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment