Commit 5a5ec729 authored by Ingo Molnar's avatar Ingo Molnar Committed by Jeff Garzik

[PATCH] atomic-thread-signals

Avoid racing on signal delivery with thread signal blocking in thread
groups.

The method to do this is to eliminate the per-thread sigmask_lock, and
use the per-group (per 'process') siglock for all signal related
activities.  This immensely simplified some of the locking interactions
within signal.c, and enabled the fixing of the above category of signal
delivery races.

This became possible due to the former thread-signal patch, which made
siglock an irq-safe thing.  (it used to be a process-context-only
spinlock.) And this is even a speedup for non-threaded applications:
only one lock is used.

I fixed all places within the kernel except the non-x86 arch sections.
Even for them the transition is very straightforward, in almost every
case the following is sufficient in arch/*/kernel/signal.c:

		:1,$s/->sigmask_lock/->sig->siglock/g
parent 5360ccf4
......@@ -37,11 +37,11 @@ sys_sigsuspend(int history0, int history1, old_sigset_t mask)
sigset_t saveset;
mask &= _BLOCKABLE;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
saveset = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
regs->eax = -EINTR;
while (1) {
......@@ -66,11 +66,11 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize)
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
saveset = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
regs->eax = -EINTR;
while (1) {
......@@ -224,10 +224,10 @@ asmlinkage int sys_sigreturn(unsigned long __unused)
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (restore_sigcontext(regs, &frame->sc, &eax))
goto badframe;
......@@ -252,10 +252,10 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused)
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
goto badframe;
......@@ -532,11 +532,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
ka->sa.sa_handler = SIG_DFL;
if (!(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
}
}
......
......@@ -440,10 +440,10 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
return 1; /* we let this handle by the calling routine */
if (current->ptrace & PT_PTRACED) {
unsigned long flags;
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
sigdelset(&current->blocked, SIGTRAP);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
send_sig(SIGTRAP, current, 1);
current->thread.trap_no = trapno;
......
......@@ -598,10 +598,10 @@ static int loop_thread(void *data)
hence, it mustn't be stopped at all because it could
be indirectly used during suspension */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
flush_signals(current);
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
set_user_nice(current, -20);
......
......@@ -101,11 +101,11 @@ static int nbd_xmit(int send, struct socket *sock, char *buf, int size, int msg_
oldfs = get_fs();
set_fs(get_ds());
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
oldset = current->blocked;
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
do {
......@@ -137,10 +137,10 @@ static int nbd_xmit(int send, struct socket *sock, char *buf, int size, int msg_
buf += result;
} while (size > 0);
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
set_fs(oldfs);
return result;
......
......@@ -528,19 +528,19 @@ static int bt3c_firmware_load(bt3c_info_t *info)
}
/* Block signals, everything but SIGKILL/SIGSTOP */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
tmpsig = current->blocked;
siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
result = waitpid(pid, NULL, __WCLONE);
/* Allow signals again */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = tmpsig;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (result != pid) {
printk(KERN_WARNING "bt3c_cs: Waiting for pid %d failed (errno=%d).\n", pid, -result);
......
......@@ -633,19 +633,19 @@ static int hci_usb_fw_load(struct usb_device *udev)
}
/* Block signals, everything but SIGKILL/SIGSTOP */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
tmpsig = current->blocked;
siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
result = waitpid(pid, NULL, __WCLONE);
/* Allow signals again */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = tmpsig;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (result != pid) {
BT_ERR("waitpid failed pid %d errno %d\n", pid, -result);
......
......@@ -404,11 +404,11 @@ int fdc_interrupt_wait(unsigned int time)
/* timeout time will be up to USPT microseconds too long ! */
timeout = (1000 * time + FT_USPT - 1) / FT_USPT;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
old_sigmask = current->blocked;
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
current->state = TASK_INTERRUPTIBLE;
add_wait_queue(&ftape_wait_intr, &wait);
......@@ -416,10 +416,10 @@ int fdc_interrupt_wait(unsigned int time)
timeout = schedule_timeout(timeout);
}
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = old_sigmask;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
remove_wait_queue(&ftape_wait_intr, &wait);
/* the following IS necessary. True: as well
......
......@@ -233,10 +233,10 @@ adb_probe_task(void *x)
{
strcpy(current->comm, "kadbprobe");
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
flush_signals(current);
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
printk(KERN_INFO "adb: starting probe task...\n");
do_adb_reset_bus();
......
......@@ -2417,9 +2417,9 @@ static struct block_device_operations md_fops =
static inline void flush_curr_signals(void)
{
spin_lock(&current->sigmask_lock);
spin_lock(&current->sig->siglock);
flush_signals(current);
spin_unlock(&current->sigmask_lock);
spin_unlock(&current->sig->siglock);
}
int md_thread(void * arg)
......
......@@ -280,17 +280,17 @@ static void jdelay(unsigned long delay)
{
sigset_t oldblocked = current->blocked;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(delay);
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = oldblocked;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
}
......
......@@ -304,10 +304,10 @@ static int write_queue_task(void *data)
DEBUG(1, "blkmtd: writetask: starting (pid = %d)\n", tsk->pid);
daemonize();
strcpy(tsk->comm, "blkmtdd");
spin_lock_irq(&tsk->sigmask_lock);
spin_lock_irq(&tsk->sig->siglock);
sigfillset(&tsk->blocked);
recalc_sigpending();
spin_unlock_irq(&tsk->sigmask_lock);
spin_unlock_irq(&tsk->sig->siglock);
if(alloc_kiovec(1, &iobuf)) {
printk("blkmtd: write_queue_task cant allocate kiobuf\n");
......
......@@ -468,10 +468,10 @@ int mtdblock_thread(void *dummy)
/* we might get involved when memory gets low, so use PF_MEMALLOC */
tsk->flags |= PF_MEMALLOC;
strcpy(tsk->comm, "mtdblockd");
spin_lock_irq(&tsk->sigmask_lock);
spin_lock_irq(&tsk->sig->siglock);
sigfillset(&tsk->blocked);
recalc_sigpending();
spin_unlock_irq(&tsk->sigmask_lock);
spin_unlock_irq(&tsk->sig->siglock);
daemonize();
while (!leaving) {
......
......@@ -1584,10 +1584,10 @@ static int rtl8139_thread (void *data)
unsigned long timeout;
daemonize();
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigemptyset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
strncpy (current->comm, dev->name, sizeof(current->comm) - 1);
current->comm[sizeof(current->comm) - 1] = '\0';
......@@ -1599,9 +1599,9 @@ static int rtl8139_thread (void *data)
} while (!signal_pending (current) && (timeout > 0));
if (signal_pending (current)) {
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
flush_signals(current);
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
}
if (tp->time_to_die)
......
......@@ -311,12 +311,12 @@ static int usb_stor_control_thread(void * __us)
daemonize();
/* avoid getting signals */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
flush_signals(current);
current->flags |= PF_IOTHREAD;
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
/* set our name for identification purposes */
sprintf(current->comm, "usb-storage-%d", us->host_number);
......
......@@ -70,10 +70,10 @@ static int autofs_write(struct file *file, const void *addr, int bytes)
/* Keep the currently executing process from receiving a
SIGPIPE unless it was already supposed to get one */
if (wr == -EPIPE && !sigpipe) {
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
sigdelset(&current->pending.signal, SIGPIPE);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
return (bytes > 0);
......@@ -161,18 +161,18 @@ int autofs_wait(struct autofs_sb_info *sbi, struct qstr *name)
sigset_t oldset;
unsigned long irqflags;
spin_lock_irqsave(&current->sigmask_lock, irqflags);
spin_lock_irqsave(&current->sig->siglock, irqflags);
oldset = current->blocked;
siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
interruptible_sleep_on(&wq->queue);
spin_lock_irqsave(&current->sigmask_lock, irqflags);
spin_lock_irqsave(&current->sig->siglock, irqflags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
} else {
DPRINTK(("autofs_wait: skipped sleeping\n"));
}
......
......@@ -74,10 +74,10 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
/* Keep the currently executing process from receiving a
SIGPIPE unless it was already supposed to get one */
if (wr == -EPIPE && !sigpipe) {
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
sigdelset(&current->pending.signal, SIGPIPE);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
return (bytes > 0);
......@@ -198,18 +198,18 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct qstr *name,
sigset_t oldset;
unsigned long irqflags;
spin_lock_irqsave(&current->sigmask_lock, irqflags);
spin_lock_irqsave(&current->sig->siglock, irqflags);
oldset = current->blocked;
siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
interruptible_sleep_on(&wq->queue);
spin_lock_irqsave(&current->sigmask_lock, irqflags);
spin_lock_irqsave(&current->sig->siglock, irqflags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
} else {
DPRINTK(("autofs_wait: skipped sleeping\n"));
}
......
......@@ -649,12 +649,19 @@ static inline int de_thread(struct signal_struct *oldsig)
memcpy(newsig->action, current->sig->action, sizeof(newsig->action));
init_sigpending(&newsig->shared_pending);
remove_thread_group(current, current->sig);
spin_lock_irq(&current->sigmask_lock);
write_lock_irq(&tasklist_lock);
spin_lock(&oldsig->siglock);
spin_lock(&newsig->siglock);
if (current == oldsig->curr_target)
oldsig->curr_target = next_thread(current);
current->sig = newsig;
init_sigpending(&current->pending);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock(&newsig->siglock);
spin_unlock(&oldsig->siglock);
write_unlock_irq(&tasklist_lock);
if (atomic_dec_and_test(&oldsig->count))
kmem_cache_free(sigact_cachep, oldsig);
......@@ -753,12 +760,12 @@ int flush_old_exec(struct linux_binprm * bprm)
mmap_failed:
flush_failed:
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
if (current->sig != oldsig) {
kmem_cache_free(sigact_cachep, current->sig);
current->sig = oldsig;
}
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
return retval;
}
......
......@@ -205,10 +205,10 @@ int kjournald(void *arg)
lock_kernel();
daemonize();
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
sprintf(current->comm, "kjournald");
......
......@@ -3347,10 +3347,10 @@ jffs_garbage_collect_thread(void *ptr)
current->session = 1;
current->pgrp = 1;
init_completion(&c->gc_thread_comp); /* barrier */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv (&current->blocked, sigmask(SIGHUP) | sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
strcpy(current->comm, "jffs_gcd");
D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): Starting infinite loop.\n"));
......@@ -3378,9 +3378,9 @@ jffs_garbage_collect_thread(void *ptr)
siginfo_t info;
unsigned long signr;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
signr = dequeue_signal(&current->blocked, &info);
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
switch(signr) {
case SIGSTOP:
......
......@@ -91,10 +91,10 @@ static int jffs2_garbage_collect_thread(void *_c)
set_user_nice(current, 10);
for (;;) {
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv (&current->blocked, sigmask(SIGHUP) | sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (!thread_should_wake(c)) {
set_current_state (TASK_INTERRUPTIBLE);
......@@ -114,9 +114,9 @@ static int jffs2_garbage_collect_thread(void *_c)
siginfo_t info;
unsigned long signr;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
signr = dequeue_signal(&current->blocked, &info);
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
switch(signr) {
case SIGSTOP:
......@@ -141,10 +141,10 @@ static int jffs2_garbage_collect_thread(void *_c)
}
}
/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv (&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
jffs2_garbage_collect_pass(c);
......
......@@ -2134,10 +2134,10 @@ int jfsIOWait(void *arg)
unlock_kernel();
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
complete(&jfsIOwait);
......
......@@ -2779,10 +2779,10 @@ int jfs_lazycommit(void *arg)
jfsCommitTask = current;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
LAZY_LOCK_INIT();
TxAnchor.unlock_queue = TxAnchor.unlock_tail = 0;
......@@ -2979,10 +2979,10 @@ int jfs_sync(void *arg)
unlock_kernel();
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
complete(&jfsIOwait);
......
......@@ -139,7 +139,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
}
/* Keep the old signal mask */
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
oldset = current->blocked;
/* If we're cleaning up locks because the process is exiting,
......@@ -149,7 +149,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
&& (current->flags & PF_EXITING)) {
sigfillset(&current->blocked); /* Mask all signals */
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
call = nlmclnt_alloc_call();
if (!call) {
......@@ -158,7 +158,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
}
call->a_flags = RPC_TASK_ASYNC;
} else {
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
memset(call, 0, sizeof(*call));
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
......@@ -183,10 +183,10 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
kfree(call);
out_restore:
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
done:
dprintk("lockd: clnt proc returns %d\n", status);
......@@ -592,11 +592,11 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
int status;
/* Block all signals while setting up call */
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
oldset = current->blocked;
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
req = nlmclnt_alloc_call();
if (!req)
......@@ -611,10 +611,10 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
if (status < 0)
kfree(req);
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
return status;
}
......
......@@ -101,10 +101,10 @@ lockd(struct svc_rqst *rqstp)
sprintf(current->comm, "lockd");
/* Process request with signals blocked. */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked, sigmask(SIGKILL));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
/* kick rpciod */
rpciod_up();
......@@ -126,9 +126,9 @@ lockd(struct svc_rqst *rqstp)
{
long timeout = MAX_SCHEDULE_TIMEOUT;
if (signalled()) {
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
flush_signals(current);
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (nlmsvc_ops) {
nlmsvc_invalidate_all();
grace_period_expire = set_grace_period();
......@@ -297,9 +297,9 @@ lockd_down(void)
"lockd_down: lockd failed to exit, clearing pid\n");
nlmsvc_pid = 0;
}
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
out:
up(&nlmsvc_sema);
}
......
......@@ -743,7 +743,7 @@ static int ncp_do_request(struct ncp_server *server, int size,
sigset_t old_set;
unsigned long mask, flags;
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
old_set = current->blocked;
if (current->flags & PF_EXITING)
mask = 0;
......@@ -762,7 +762,7 @@ static int ncp_do_request(struct ncp_server *server, int size,
}
siginitsetinv(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
fs = get_fs();
set_fs(get_ds());
......@@ -771,10 +771,10 @@ static int ncp_do_request(struct ncp_server *server, int size,
set_fs(fs);
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
current->blocked = old_set;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
DDPRINTK("do_ncp_rpc_call returned %d\n", result);
......
......@@ -186,10 +186,10 @@ nfsd(struct svc_rqst *rqstp)
*/
for (;;) {
/* Block all but the shutdown signals */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked, SHUTDOWN_SIGS);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
/*
* Find a socket with data available and call its
......@@ -211,10 +211,10 @@ nfsd(struct svc_rqst *rqstp)
*/
rqstp->rq_client = exp_getclient(&rqstp->rq_addr);
/* Process request with signals blocked. */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked, ALLOWED_SIGS);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
svc_process(serv, rqstp);
......
......@@ -228,7 +228,7 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
sigemptyset(ign);
sigemptyset(catch);
spin_lock_irq(&p->sigmask_lock);
spin_lock_irq(&p->sig->siglock);
if (p->sig) {
k = p->sig->action;
for (i = 1; i <= _NSIG; ++i, ++k) {
......@@ -238,7 +238,7 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
sigaddset(catch, i);
}
}
spin_unlock_irq(&p->sigmask_lock);
spin_unlock_irq(&p->sig->siglock);
}
static inline char * task_sig(struct task_struct *p, char *buffer)
......
......@@ -1875,10 +1875,10 @@ static int reiserfs_journal_commit_thread(void *nullp) {
daemonize() ;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
sprintf(current->comm, "kreiserfsd") ;
lock_kernel() ;
......
......@@ -279,10 +279,10 @@ static int smbiod(void *unused)
{
daemonize();
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked, sigmask(SIGKILL));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
strcpy(current->comm, "smbiod");
......
......@@ -1684,10 +1684,10 @@ pagebuf_iodone_daemon(
daemonize();
/* Avoid signals */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
/* Migrate to the right CPU */
set_cpus_allowed(current, 1UL << cpu);
......@@ -1752,10 +1752,10 @@ pagebuf_daemon(
daemonize();
/* Avoid signals */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
strcpy(current->comm, "pagebufd");
current->flags |= PF_MEMALLOC;
......
......@@ -90,7 +90,6 @@
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
.sigmask_lock = SPIN_LOCK_UNLOCKED, \
.sig = &init_signals, \
.pending = { NULL, &tsk.pending.head, {{0}}}, \
.blocked = {{0}}, \
......
......@@ -380,7 +380,6 @@ struct task_struct {
/* namespace */
struct namespace *namespace;
/* signal handlers */
spinlock_t sigmask_lock; /* Protects signal and blocked */
struct signal_struct *sig;
sigset_t blocked, real_blocked, shared_unblocked;
......@@ -657,7 +656,6 @@ extern void exit_mm(struct task_struct *);
extern void exit_files(struct task_struct *);
extern void exit_sighand(struct task_struct *);
extern void __exit_sighand(struct task_struct *);
extern void remove_thread_group(struct task_struct *tsk, struct signal_struct *sig);
extern void reparent_to_init(void);
extern void daemonize(void);
......@@ -955,7 +953,7 @@ static inline void cond_resched_lock(spinlock_t * lock)
/* Reevaluate whether the task has signals pending delivery.
This is required every time the blocked sigset_t changes.
Athread cathreaders should have t->sigmask_lock. */
callers must hold sig->siglock. */
extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t));
extern void recalc_sigpending(void);
......
......@@ -77,10 +77,10 @@ static int context_thread(void *startup)
keventd_running = 1;
keventd_task = curtask;
spin_lock_irq(&curtask->sigmask_lock);
spin_lock_irq(&curtask->sig->siglock);
siginitsetinv(&curtask->blocked, sigmask(SIGCHLD));
recalc_sigpending();
spin_unlock_irq(&curtask->sigmask_lock);
spin_unlock_irq(&curtask->sig->siglock);
complete((struct completion *)startup);
......@@ -106,10 +106,10 @@ static int context_thread(void *startup)
if (signal_pending(curtask)) {
while (waitpid(-1, (unsigned int *)0, __WALL|WNOHANG) > 0)
;
spin_lock_irq(&curtask->sigmask_lock);
spin_lock_irq(&curtask->sig->siglock);
flush_signals(curtask);
recalc_sigpending();
spin_unlock_irq(&curtask->sigmask_lock);
spin_unlock_irq(&curtask->sig->siglock);
}
}
}
......
......@@ -756,7 +756,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
/* ?? should we just memset this ?? */
for(i = 0; i < NR_CPUS; i++)
p->per_cpu_utime[i] = p->per_cpu_stime[i] = 0;
spin_lock_init(&p->sigmask_lock);
}
#endif
p->array = NULL;
......
......@@ -110,12 +110,12 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[])
as the super user right after the execve fails if you time
the signal just right.
*/
spin_lock_irq(&curtask->sigmask_lock);
spin_lock_irq(&curtask->sig->siglock);
sigemptyset(&curtask->blocked);
flush_signals(curtask);
flush_signal_handlers(curtask);
recalc_sigpending();
spin_unlock_irq(&curtask->sigmask_lock);
spin_unlock_irq(&curtask->sig->siglock);
for (i = 0; i < curtask->files->max_fds; i++ ) {
if (curtask->files->fd[i]) close(i);
......@@ -238,20 +238,20 @@ int request_module(const char * module_name)
}
/* Block everything but SIGKILL/SIGSTOP */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
tmpsig = current->blocked;
siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
waitpid_result = waitpid(pid, NULL, __WCLONE);
atomic_dec(&kmod_concurrent);
/* Allow signals again.. */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = tmpsig;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (waitpid_result != pid) {
printk(KERN_ERR "request_module[%s]: waitpid(%d,...) failed, errno %d\n",
......
This diff is collapsed.
......@@ -219,9 +219,9 @@ int freeze_processes(void)
/* FIXME: smp problem here: we may not access other process' flags
without locking */
p->flags |= PF_FREEZE;
spin_lock_irqsave(&p->sigmask_lock, flags);
spin_lock_irqsave(&p->sig->siglock, flags);
signal_wake_up(p);
spin_unlock_irqrestore(&p->sigmask_lock, flags);
spin_unlock_irqrestore(&p->sig->siglock, flags);
todo++;
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
......
......@@ -91,10 +91,10 @@ static int __pdflush(struct pdflush_work *my_work)
strcpy(current->comm, "pdflush");
/* interruptible sleep, so block all signals */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked, 0);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
current->flags |= PF_FLUSHER;
my_work->fn = NULL;
......
......@@ -226,21 +226,21 @@ void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
sigallow |= sigmask(SIGQUIT);
}
spin_lock_irqsave(&current->sigmask_lock, irqflags);
spin_lock_irqsave(&current->sig->siglock, irqflags);
*oldset = current->blocked;
siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
}
void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
{
unsigned long irqflags;
spin_lock_irqsave(&current->sigmask_lock, irqflags);
spin_lock_irqsave(&current->sig->siglock, irqflags);
current->blocked = *oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
}
/*
......
......@@ -992,10 +992,10 @@ rpciod(void *ptr)
daemonize();
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked, sigmask(SIGKILL));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
strcpy(current->comm, "rpciod");
......@@ -1050,9 +1050,9 @@ rpciod_killall(void)
}
}
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
/*
......@@ -1128,9 +1128,9 @@ rpciod_down(void)
}
interruptible_sleep_on(&rpciod_killer);
}
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
out:
up(&rpciod_sema);
MOD_DEC_USE_COUNT;
......
......@@ -213,9 +213,9 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port)
}
if (!port) {
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
return error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment