Commit 5a5ec729 authored by Ingo Molnar's avatar Ingo Molnar Committed by Jeff Garzik

[PATCH] atomic-thread-signals

Avoid racing on signal delivery with thread signal blocking in thread
groups.

The method to do this is to eliminate the per-thread sigmask_lock, and
use the per-group (per 'process') siglock for all signal related
activities.  This immensely simplified some of the locking interactions
within signal.c, and enabled the fixing of the above category of signal
delivery races.

This became possible due to the former thread-signal patch, which made
siglock an irq-safe thing.  (it used to be a process-context-only
spinlock.) And this is even a speedup for non-threaded applications:
only one lock is used.

I fixed all places within the kernel except the non-x86 arch sections.
Even for them the transition is very straightforward, in almost every
case the following is sufficient in arch/*/kernel/signal.c:

		:1,$s/->sigmask_lock/->sig->siglock/g
parent 5360ccf4
......@@ -37,11 +37,11 @@ sys_sigsuspend(int history0, int history1, old_sigset_t mask)
sigset_t saveset;
mask &= _BLOCKABLE;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
saveset = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
regs->eax = -EINTR;
while (1) {
......@@ -66,11 +66,11 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize)
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
saveset = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
regs->eax = -EINTR;
while (1) {
......@@ -224,10 +224,10 @@ asmlinkage int sys_sigreturn(unsigned long __unused)
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (restore_sigcontext(regs, &frame->sc, &eax))
goto badframe;
......@@ -252,10 +252,10 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused)
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
goto badframe;
......@@ -532,11 +532,11 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
ka->sa.sa_handler = SIG_DFL;
if (!(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
}
}
......
......@@ -440,10 +440,10 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
return 1; /* we let this handle by the calling routine */
if (current->ptrace & PT_PTRACED) {
unsigned long flags;
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
sigdelset(&current->blocked, SIGTRAP);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
send_sig(SIGTRAP, current, 1);
current->thread.trap_no = trapno;
......
......@@ -598,10 +598,10 @@ static int loop_thread(void *data)
hence, it mustn't be stopped at all because it could
be indirectly used during suspension */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
flush_signals(current);
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
set_user_nice(current, -20);
......
......@@ -101,11 +101,11 @@ static int nbd_xmit(int send, struct socket *sock, char *buf, int size, int msg_
oldfs = get_fs();
set_fs(get_ds());
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
oldset = current->blocked;
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
do {
......@@ -137,10 +137,10 @@ static int nbd_xmit(int send, struct socket *sock, char *buf, int size, int msg_
buf += result;
} while (size > 0);
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
set_fs(oldfs);
return result;
......
......@@ -528,19 +528,19 @@ static int bt3c_firmware_load(bt3c_info_t *info)
}
/* Block signals, everything but SIGKILL/SIGSTOP */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
tmpsig = current->blocked;
siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
result = waitpid(pid, NULL, __WCLONE);
/* Allow signals again */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = tmpsig;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (result != pid) {
printk(KERN_WARNING "bt3c_cs: Waiting for pid %d failed (errno=%d).\n", pid, -result);
......
......@@ -633,19 +633,19 @@ static int hci_usb_fw_load(struct usb_device *udev)
}
/* Block signals, everything but SIGKILL/SIGSTOP */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
tmpsig = current->blocked;
siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
result = waitpid(pid, NULL, __WCLONE);
/* Allow signals again */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = tmpsig;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (result != pid) {
BT_ERR("waitpid failed pid %d errno %d\n", pid, -result);
......
......@@ -404,11 +404,11 @@ int fdc_interrupt_wait(unsigned int time)
/* timeout time will be up to USPT microseconds too long ! */
timeout = (1000 * time + FT_USPT - 1) / FT_USPT;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
old_sigmask = current->blocked;
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
current->state = TASK_INTERRUPTIBLE;
add_wait_queue(&ftape_wait_intr, &wait);
......@@ -416,10 +416,10 @@ int fdc_interrupt_wait(unsigned int time)
timeout = schedule_timeout(timeout);
}
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = old_sigmask;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
remove_wait_queue(&ftape_wait_intr, &wait);
/* the following IS necessary. True: as well
......
......@@ -233,10 +233,10 @@ adb_probe_task(void *x)
{
strcpy(current->comm, "kadbprobe");
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
flush_signals(current);
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
printk(KERN_INFO "adb: starting probe task...\n");
do_adb_reset_bus();
......
......@@ -2417,9 +2417,9 @@ static struct block_device_operations md_fops =
static inline void flush_curr_signals(void)
{
spin_lock(&current->sigmask_lock);
spin_lock(&current->sig->siglock);
flush_signals(current);
spin_unlock(&current->sigmask_lock);
spin_unlock(&current->sig->siglock);
}
int md_thread(void * arg)
......
......@@ -280,17 +280,17 @@ static void jdelay(unsigned long delay)
{
sigset_t oldblocked = current->blocked;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(delay);
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = oldblocked;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
}
......
......@@ -304,10 +304,10 @@ static int write_queue_task(void *data)
DEBUG(1, "blkmtd: writetask: starting (pid = %d)\n", tsk->pid);
daemonize();
strcpy(tsk->comm, "blkmtdd");
spin_lock_irq(&tsk->sigmask_lock);
spin_lock_irq(&tsk->sig->siglock);
sigfillset(&tsk->blocked);
recalc_sigpending();
spin_unlock_irq(&tsk->sigmask_lock);
spin_unlock_irq(&tsk->sig->siglock);
if(alloc_kiovec(1, &iobuf)) {
printk("blkmtd: write_queue_task cant allocate kiobuf\n");
......
......@@ -468,10 +468,10 @@ int mtdblock_thread(void *dummy)
/* we might get involved when memory gets low, so use PF_MEMALLOC */
tsk->flags |= PF_MEMALLOC;
strcpy(tsk->comm, "mtdblockd");
spin_lock_irq(&tsk->sigmask_lock);
spin_lock_irq(&tsk->sig->siglock);
sigfillset(&tsk->blocked);
recalc_sigpending();
spin_unlock_irq(&tsk->sigmask_lock);
spin_unlock_irq(&tsk->sig->siglock);
daemonize();
while (!leaving) {
......
......@@ -1584,10 +1584,10 @@ static int rtl8139_thread (void *data)
unsigned long timeout;
daemonize();
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigemptyset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
strncpy (current->comm, dev->name, sizeof(current->comm) - 1);
current->comm[sizeof(current->comm) - 1] = '\0';
......@@ -1599,9 +1599,9 @@ static int rtl8139_thread (void *data)
} while (!signal_pending (current) && (timeout > 0));
if (signal_pending (current)) {
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
flush_signals(current);
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
}
if (tp->time_to_die)
......
......@@ -311,12 +311,12 @@ static int usb_stor_control_thread(void * __us)
daemonize();
/* avoid getting signals */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
flush_signals(current);
current->flags |= PF_IOTHREAD;
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
/* set our name for identification purposes */
sprintf(current->comm, "usb-storage-%d", us->host_number);
......
......@@ -70,10 +70,10 @@ static int autofs_write(struct file *file, const void *addr, int bytes)
/* Keep the currently executing process from receiving a
SIGPIPE unless it was already supposed to get one */
if (wr == -EPIPE && !sigpipe) {
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
sigdelset(&current->pending.signal, SIGPIPE);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
return (bytes > 0);
......@@ -161,18 +161,18 @@ int autofs_wait(struct autofs_sb_info *sbi, struct qstr *name)
sigset_t oldset;
unsigned long irqflags;
spin_lock_irqsave(&current->sigmask_lock, irqflags);
spin_lock_irqsave(&current->sig->siglock, irqflags);
oldset = current->blocked;
siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
interruptible_sleep_on(&wq->queue);
spin_lock_irqsave(&current->sigmask_lock, irqflags);
spin_lock_irqsave(&current->sig->siglock, irqflags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
} else {
DPRINTK(("autofs_wait: skipped sleeping\n"));
}
......
......@@ -74,10 +74,10 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
/* Keep the currently executing process from receiving a
SIGPIPE unless it was already supposed to get one */
if (wr == -EPIPE && !sigpipe) {
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
sigdelset(&current->pending.signal, SIGPIPE);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
return (bytes > 0);
......@@ -198,18 +198,18 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct qstr *name,
sigset_t oldset;
unsigned long irqflags;
spin_lock_irqsave(&current->sigmask_lock, irqflags);
spin_lock_irqsave(&current->sig->siglock, irqflags);
oldset = current->blocked;
siginitsetinv(&current->blocked, SHUTDOWN_SIGS & ~oldset.sig[0]);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
interruptible_sleep_on(&wq->queue);
spin_lock_irqsave(&current->sigmask_lock, irqflags);
spin_lock_irqsave(&current->sig->siglock, irqflags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
} else {
DPRINTK(("autofs_wait: skipped sleeping\n"));
}
......
......@@ -649,12 +649,19 @@ static inline int de_thread(struct signal_struct *oldsig)
memcpy(newsig->action, current->sig->action, sizeof(newsig->action));
init_sigpending(&newsig->shared_pending);
remove_thread_group(current, current->sig);
spin_lock_irq(&current->sigmask_lock);
write_lock_irq(&tasklist_lock);
spin_lock(&oldsig->siglock);
spin_lock(&newsig->siglock);
if (current == oldsig->curr_target)
oldsig->curr_target = next_thread(current);
current->sig = newsig;
init_sigpending(&current->pending);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock(&newsig->siglock);
spin_unlock(&oldsig->siglock);
write_unlock_irq(&tasklist_lock);
if (atomic_dec_and_test(&oldsig->count))
kmem_cache_free(sigact_cachep, oldsig);
......@@ -753,12 +760,12 @@ int flush_old_exec(struct linux_binprm * bprm)
mmap_failed:
flush_failed:
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
if (current->sig != oldsig) {
kmem_cache_free(sigact_cachep, current->sig);
current->sig = oldsig;
}
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
return retval;
}
......
......@@ -205,10 +205,10 @@ int kjournald(void *arg)
lock_kernel();
daemonize();
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
sprintf(current->comm, "kjournald");
......
......@@ -3347,10 +3347,10 @@ jffs_garbage_collect_thread(void *ptr)
current->session = 1;
current->pgrp = 1;
init_completion(&c->gc_thread_comp); /* barrier */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv (&current->blocked, sigmask(SIGHUP) | sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
strcpy(current->comm, "jffs_gcd");
D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): Starting infinite loop.\n"));
......@@ -3378,9 +3378,9 @@ jffs_garbage_collect_thread(void *ptr)
siginfo_t info;
unsigned long signr;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
signr = dequeue_signal(&current->blocked, &info);
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
switch(signr) {
case SIGSTOP:
......
......@@ -91,10 +91,10 @@ static int jffs2_garbage_collect_thread(void *_c)
set_user_nice(current, 10);
for (;;) {
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv (&current->blocked, sigmask(SIGHUP) | sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (!thread_should_wake(c)) {
set_current_state (TASK_INTERRUPTIBLE);
......@@ -114,9 +114,9 @@ static int jffs2_garbage_collect_thread(void *_c)
siginfo_t info;
unsigned long signr;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
signr = dequeue_signal(&current->blocked, &info);
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
switch(signr) {
case SIGSTOP:
......@@ -141,10 +141,10 @@ static int jffs2_garbage_collect_thread(void *_c)
}
}
/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv (&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
jffs2_garbage_collect_pass(c);
......
......@@ -2134,10 +2134,10 @@ int jfsIOWait(void *arg)
unlock_kernel();
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
complete(&jfsIOwait);
......
......@@ -2779,10 +2779,10 @@ int jfs_lazycommit(void *arg)
jfsCommitTask = current;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
LAZY_LOCK_INIT();
TxAnchor.unlock_queue = TxAnchor.unlock_tail = 0;
......@@ -2979,10 +2979,10 @@ int jfs_sync(void *arg)
unlock_kernel();
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
complete(&jfsIOwait);
......
......@@ -139,7 +139,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
}
/* Keep the old signal mask */
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
oldset = current->blocked;
/* If we're cleaning up locks because the process is exiting,
......@@ -149,7 +149,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
&& (current->flags & PF_EXITING)) {
sigfillset(&current->blocked); /* Mask all signals */
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
call = nlmclnt_alloc_call();
if (!call) {
......@@ -158,7 +158,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
}
call->a_flags = RPC_TASK_ASYNC;
} else {
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
memset(call, 0, sizeof(*call));
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
......@@ -183,10 +183,10 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
kfree(call);
out_restore:
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
done:
dprintk("lockd: clnt proc returns %d\n", status);
......@@ -592,11 +592,11 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
int status;
/* Block all signals while setting up call */
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
oldset = current->blocked;
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
req = nlmclnt_alloc_call();
if (!req)
......@@ -611,10 +611,10 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
if (status < 0)
kfree(req);
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
return status;
}
......
......@@ -101,10 +101,10 @@ lockd(struct svc_rqst *rqstp)
sprintf(current->comm, "lockd");
/* Process request with signals blocked. */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked, sigmask(SIGKILL));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
/* kick rpciod */
rpciod_up();
......@@ -126,9 +126,9 @@ lockd(struct svc_rqst *rqstp)
{
long timeout = MAX_SCHEDULE_TIMEOUT;
if (signalled()) {
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
flush_signals(current);
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (nlmsvc_ops) {
nlmsvc_invalidate_all();
grace_period_expire = set_grace_period();
......@@ -297,9 +297,9 @@ lockd_down(void)
"lockd_down: lockd failed to exit, clearing pid\n");
nlmsvc_pid = 0;
}
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
out:
up(&nlmsvc_sema);
}
......
......@@ -743,7 +743,7 @@ static int ncp_do_request(struct ncp_server *server, int size,
sigset_t old_set;
unsigned long mask, flags;
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
old_set = current->blocked;
if (current->flags & PF_EXITING)
mask = 0;
......@@ -762,7 +762,7 @@ static int ncp_do_request(struct ncp_server *server, int size,
}
siginitsetinv(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
fs = get_fs();
set_fs(get_ds());
......@@ -771,10 +771,10 @@ static int ncp_do_request(struct ncp_server *server, int size,
set_fs(fs);
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
current->blocked = old_set;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
DDPRINTK("do_ncp_rpc_call returned %d\n", result);
......
......@@ -186,10 +186,10 @@ nfsd(struct svc_rqst *rqstp)
*/
for (;;) {
/* Block all but the shutdown signals */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked, SHUTDOWN_SIGS);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
/*
* Find a socket with data available and call its
......@@ -211,10 +211,10 @@ nfsd(struct svc_rqst *rqstp)
*/
rqstp->rq_client = exp_getclient(&rqstp->rq_addr);
/* Process request with signals blocked. */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked, ALLOWED_SIGS);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
svc_process(serv, rqstp);
......
......@@ -228,7 +228,7 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
sigemptyset(ign);
sigemptyset(catch);
spin_lock_irq(&p->sigmask_lock);
spin_lock_irq(&p->sig->siglock);
if (p->sig) {
k = p->sig->action;
for (i = 1; i <= _NSIG; ++i, ++k) {
......@@ -238,7 +238,7 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
sigaddset(catch, i);
}
}
spin_unlock_irq(&p->sigmask_lock);
spin_unlock_irq(&p->sig->siglock);
}
static inline char * task_sig(struct task_struct *p, char *buffer)
......
......@@ -1875,10 +1875,10 @@ static int reiserfs_journal_commit_thread(void *nullp) {
daemonize() ;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
sprintf(current->comm, "kreiserfsd") ;
lock_kernel() ;
......
......@@ -279,10 +279,10 @@ static int smbiod(void *unused)
{
daemonize();
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked, sigmask(SIGKILL));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
strcpy(current->comm, "smbiod");
......
......@@ -1684,10 +1684,10 @@ pagebuf_iodone_daemon(
daemonize();
/* Avoid signals */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
/* Migrate to the right CPU */
set_cpus_allowed(current, 1UL << cpu);
......@@ -1752,10 +1752,10 @@ pagebuf_daemon(
daemonize();
/* Avoid signals */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigfillset(&current->blocked);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
strcpy(current->comm, "pagebufd");
current->flags |= PF_MEMALLOC;
......
......@@ -90,7 +90,6 @@
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
.sigmask_lock = SPIN_LOCK_UNLOCKED, \
.sig = &init_signals, \
.pending = { NULL, &tsk.pending.head, {{0}}}, \
.blocked = {{0}}, \
......
......@@ -380,7 +380,6 @@ struct task_struct {
/* namespace */
struct namespace *namespace;
/* signal handlers */
spinlock_t sigmask_lock; /* Protects signal and blocked */
struct signal_struct *sig;
sigset_t blocked, real_blocked, shared_unblocked;
......@@ -657,7 +656,6 @@ extern void exit_mm(struct task_struct *);
extern void exit_files(struct task_struct *);
extern void exit_sighand(struct task_struct *);
extern void __exit_sighand(struct task_struct *);
extern void remove_thread_group(struct task_struct *tsk, struct signal_struct *sig);
extern void reparent_to_init(void);
extern void daemonize(void);
......@@ -955,7 +953,7 @@ static inline void cond_resched_lock(spinlock_t * lock)
/* Reevaluate whether the task has signals pending delivery.
This is required every time the blocked sigset_t changes.
Athread cathreaders should have t->sigmask_lock. */
callers must hold sig->siglock. */
extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t));
extern void recalc_sigpending(void);
......
......@@ -77,10 +77,10 @@ static int context_thread(void *startup)
keventd_running = 1;
keventd_task = curtask;
spin_lock_irq(&curtask->sigmask_lock);
spin_lock_irq(&curtask->sig->siglock);
siginitsetinv(&curtask->blocked, sigmask(SIGCHLD));
recalc_sigpending();
spin_unlock_irq(&curtask->sigmask_lock);
spin_unlock_irq(&curtask->sig->siglock);
complete((struct completion *)startup);
......@@ -106,10 +106,10 @@ static int context_thread(void *startup)
if (signal_pending(curtask)) {
while (waitpid(-1, (unsigned int *)0, __WALL|WNOHANG) > 0)
;
spin_lock_irq(&curtask->sigmask_lock);
spin_lock_irq(&curtask->sig->siglock);
flush_signals(curtask);
recalc_sigpending();
spin_unlock_irq(&curtask->sigmask_lock);
spin_unlock_irq(&curtask->sig->siglock);
}
}
}
......
......@@ -756,7 +756,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
/* ?? should we just memset this ?? */
for(i = 0; i < NR_CPUS; i++)
p->per_cpu_utime[i] = p->per_cpu_stime[i] = 0;
spin_lock_init(&p->sigmask_lock);
}
#endif
p->array = NULL;
......
......@@ -110,12 +110,12 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[])
as the super user right after the execve fails if you time
the signal just right.
*/
spin_lock_irq(&curtask->sigmask_lock);
spin_lock_irq(&curtask->sig->siglock);
sigemptyset(&curtask->blocked);
flush_signals(curtask);
flush_signal_handlers(curtask);
recalc_sigpending();
spin_unlock_irq(&curtask->sigmask_lock);
spin_unlock_irq(&curtask->sig->siglock);
for (i = 0; i < curtask->files->max_fds; i++ ) {
if (curtask->files->fd[i]) close(i);
......@@ -238,20 +238,20 @@ int request_module(const char * module_name)
}
/* Block everything but SIGKILL/SIGSTOP */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
tmpsig = current->blocked;
siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
waitpid_result = waitpid(pid, NULL, __WCLONE);
atomic_dec(&kmod_concurrent);
/* Allow signals again.. */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
current->blocked = tmpsig;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (waitpid_result != pid) {
printk(KERN_ERR "request_module[%s]: waitpid(%d,...) failed, errno %d\n",
......
......@@ -155,16 +155,8 @@ int max_queued_signals = 1024;
(((sig) != SIGCHLD) && \
((t)->sig->action[(sig)-1].sa.sa_handler == SIG_IGN))
void __init signals_init(void)
{
sigqueue_cachep =
kmem_cache_create("sigqueue",
sizeof(struct sigqueue),
__alignof__(struct sigqueue),
0, NULL, NULL);
if (!sigqueue_cachep)
panic("signals_init(): cannot create sigqueue SLAB cache");
}
static int
__send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
......@@ -250,23 +242,6 @@ flush_signals(struct task_struct *t)
flush_sigqueue(&t->pending);
}
static inline void __remove_thread_group(struct task_struct *tsk, struct signal_struct *sig)
{
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
}
void remove_thread_group(struct task_struct *tsk, struct signal_struct *sig)
{
write_lock_irq(&tasklist_lock);
spin_lock(&tsk->sig->siglock);
__remove_thread_group(tsk, sig);
spin_unlock(&tsk->sig->siglock);
write_unlock_irq(&tasklist_lock);
}
/*
* This function expects the tasklist_lock write-locked.
*/
......@@ -279,9 +254,9 @@ void __exit_sighand(struct task_struct *tsk)
if (!atomic_read(&sig->count))
BUG();
spin_lock(&sig->siglock);
spin_lock(&tsk->sigmask_lock);
if (atomic_dec_and_test(&sig->count)) {
__remove_thread_group(tsk, sig);
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
tsk->sig = NULL;
spin_unlock(&sig->siglock);
flush_sigqueue(&sig->shared_pending);
......@@ -295,14 +270,13 @@ void __exit_sighand(struct task_struct *tsk)
wake_up_process(sig->group_exit_task);
sig->group_exit_task = NULL;
}
__remove_thread_group(tsk, sig);
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
tsk->sig = NULL;
spin_unlock(&sig->siglock);
}
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
flush_sigqueue(&tsk->pending);
spin_unlock(&tsk->sigmask_lock);
}
void exit_sighand(struct task_struct *tsk)
......@@ -361,11 +335,11 @@ block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
{
unsigned long flags;
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
current->notifier_mask = mask;
current->notifier_data = priv;
current->notifier = notifier;
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
/* Notify the system that blocking has ended. */
......@@ -375,11 +349,11 @@ unblock_all_signals(void)
{
unsigned long flags;
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
current->notifier = NULL;
current->notifier_data = NULL;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
......@@ -435,7 +409,7 @@ static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *in
* Dequeue a signal and return the element to the caller, which is
* expected to free it.
*
* All callers have to hold the siglock and the sigmask_lock.
* All callers have to hold the siglock.
*/
int dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t *info)
......@@ -492,7 +466,7 @@ static int rm_from_queue(int sig, struct sigpending *s)
* Remove signal sig from t->pending.
* Returns 1 if sig was found.
*
* All callers must be holding t->sigmask_lock.
* All callers must be holding the siglock.
*/
static int rm_sig_from_queue(int sig, struct task_struct *t)
{
......@@ -661,7 +635,7 @@ static int send_signal(int sig, struct siginfo *info, struct sigpending *signals
*
* NOTE! we rely on the previous spin_lock to
* lock interrupts for us! We can only be called with
* "sigmask_lock" held, and the local interrupt must
* "siglock" held, and the local interrupt must
* have been disabled when that got acquired!
*
* No need to set need_resched since signal event passing
......@@ -700,7 +674,7 @@ static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
}
static int
__send_sig_info(int sig, struct siginfo *info, struct task_struct *t, int shared)
specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t, int shared)
{
int ret;
......@@ -712,22 +686,21 @@ __send_sig_info(int sig, struct siginfo *info, struct task_struct *t, int shared
#endif
ret = -EINVAL;
if (sig < 0 || sig > _NSIG)
goto out_nolock;
goto out;
/* The somewhat baroque permissions check... */
ret = -EPERM;
if (bad_signal(sig, info, t))
goto out_nolock;
goto out;
ret = security_ops->task_kill(t, info, sig);
if (ret)
goto out_nolock;
goto out;
/* The null signal is a permissions and process existence probe.
No signal is actually delivered. Same goes for zombies. */
ret = 0;
if (!sig || !t->sig)
goto out_nolock;
goto out;
spin_lock(&t->sigmask_lock);
handle_stop_signal(sig, t);
/* Optimize away the signal, if it's a signal that can be
......@@ -754,8 +727,6 @@ __send_sig_info(int sig, struct siginfo *info, struct task_struct *t, int shared
ret = send_signal(sig, info, &t->sig->shared_pending);
}
out:
spin_unlock(&t->sigmask_lock);
out_nolock:
return ret;
}
......@@ -768,40 +739,31 @@ int
force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
unsigned long int flags;
int ret;
spin_lock_irqsave(&t->sigmask_lock, flags);
if (t->sig == NULL) {
spin_unlock_irqrestore(&t->sigmask_lock, flags);
return -ESRCH;
}
spin_lock_irqsave(&t->sig->siglock, flags);
if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
sigdelset(&t->blocked, sig);
recalc_sigpending_tsk(t);
spin_unlock_irqrestore(&t->sigmask_lock, flags);
ret = __send_sig_info(sig, info, t);
spin_unlock_irqrestore(&t->sig->siglock, flags);
return send_sig_info(sig, info, t);
return ret;
}
static int
__force_sig_info(int sig, struct task_struct *t)
specific_force_sig_info(int sig, struct task_struct *t)
{
unsigned long int flags;
spin_lock_irqsave(&t->sigmask_lock, flags);
if (t->sig == NULL) {
spin_unlock_irqrestore(&t->sigmask_lock, flags);
if (!t->sig)
return -ESRCH;
}
if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
sigdelset(&t->blocked, sig);
recalc_sigpending_tsk(t);
spin_unlock_irqrestore(&t->sigmask_lock, flags);
return __send_sig_info(sig, (void *)2, t, 0);
return specific_send_sig_info(sig, (void *)2, t, 0);
}
#define can_take_signal(p, sig) \
......@@ -820,7 +782,7 @@ int load_balance_thread_group(struct task_struct *p, int sig,
* then deliver it.
*/
if (can_take_signal(p, sig))
return __send_sig_info(sig, info, p, 0);
return specific_send_sig_info(sig, info, p, 0);
/*
* Otherwise try to find a suitable thread.
......@@ -851,14 +813,14 @@ int load_balance_thread_group(struct task_struct *p, int sig,
break;
continue;
}
ret = __send_sig_info(sig, info, tmp, 0);
ret = specific_send_sig_info(sig, info, tmp, 0);
return ret;
}
/*
* No suitable thread was found - put the signal
* into the shared-pending queue.
*/
return __send_sig_info(sig, info, p, 1);
return specific_send_sig_info(sig, info, p, 1);
}
int __broadcast_thread_group(struct task_struct *p, int sig)
......@@ -869,7 +831,7 @@ int __broadcast_thread_group(struct task_struct *p, int sig)
int err = 0;
for_each_task_pid(p->tgid, PIDTYPE_TGID, tmp, l, pid)
err = __force_sig_info(sig, tmp);
err = specific_force_sig_info(sig, tmp);
return err;
}
......@@ -887,19 +849,16 @@ struct task_struct * find_unblocked_thread(struct task_struct *p, int signr)
return NULL;
}
int
send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
static int
__send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
struct task_struct *t;
unsigned long flags;
int ret = 0;
if (!p)
BUG();
if (!p->sig)
#if CONFIG_SMP
if (!spin_is_locked(&p->sig->siglock))
BUG();
spin_lock_irqsave(&p->sig->siglock, flags);
#endif
/* not a thread group - normal signal behavior */
if (thread_group_empty(p) || !sig)
goto out_send;
......@@ -925,7 +884,7 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
/* Does any of the threads unblock the signal? */
t = find_unblocked_thread(p, sig);
if (!t) {
ret = __send_sig_info(sig, info, p, 1);
ret = specific_send_sig_info(sig, info, p, 1);
goto out_unlock;
}
if (sig_kernel_broadcast(sig) || sig_kernel_coredump(sig)) {
......@@ -936,9 +895,21 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
/* must not happen */
BUG();
out_send:
ret = __send_sig_info(sig, info, p, 0);
ret = specific_send_sig_info(sig, info, p, 0);
out_unlock:
return ret;
}
int
send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&p->sig->siglock, flags);
ret = __send_sig_info(sig, info, p);
spin_unlock_irqrestore(&p->sig->siglock, flags);
return ret;
}
......@@ -1096,9 +1067,8 @@ kill_proc(pid_t pid, int sig, int priv)
* Joy. Or not. Pthread wants us to wake up every thread
* in our parent group.
*/
static inline void wake_up_parent(struct task_struct *p)
static inline void __wake_up_parent(struct task_struct *p)
{
unsigned long flags;
struct task_struct *parent = p->parent, *tsk = parent;
/*
......@@ -1108,14 +1078,13 @@ static inline void wake_up_parent(struct task_struct *p)
wake_up_interruptible(&tsk->wait_chldexit);
return;
}
spin_lock_irqsave(&parent->sig->siglock, flags);
do {
wake_up_interruptible(&tsk->wait_chldexit);
tsk = next_thread(tsk);
if (tsk->sig != parent->sig)
BUG();
} while (tsk != parent);
spin_unlock_irqrestore(&parent->sig->siglock, flags);
}
/*
......@@ -1125,6 +1094,7 @@ static inline void wake_up_parent(struct task_struct *p)
void do_notify_parent(struct task_struct *tsk, int sig)
{
struct siginfo info;
unsigned long flags;
int why, status;
if (sig == -1)
......@@ -1164,8 +1134,10 @@ void do_notify_parent(struct task_struct *tsk, int sig)
info.si_code = why;
info.si_status = status;
send_sig_info(sig, &info, tsk->parent);
wake_up_parent(tsk);
spin_lock_irqsave(&tsk->parent->sig->siglock, flags);
__send_sig_info(sig, &info, tsk->parent);
__wake_up_parent(tsk);
spin_unlock_irqrestore(&tsk->parent->sig->siglock, flags);
}
......@@ -1196,18 +1168,12 @@ int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs)
unsigned long signr = 0;
struct k_sigaction *ka;
local_irq_disable();
if (current->sig->shared_pending.head) {
spin_lock(&current->sig->siglock);
spin_lock_irq(&current->sig->siglock);
if (current->sig->shared_pending.head)
signr = dequeue_signal(&current->sig->shared_pending, mask, info);
spin_unlock(&current->sig->siglock);
}
if (!signr) {
spin_lock(&current->sigmask_lock);
if (!signr)
signr = dequeue_signal(&current->pending, mask, info);
spin_unlock(&current->sigmask_lock);
}
local_irq_enable();
spin_unlock_irq(&current->sig->siglock);
if (!signr)
break;
......@@ -1345,7 +1311,7 @@ sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
goto out;
sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
old_set = current->blocked;
error = 0;
......@@ -1365,15 +1331,15 @@ sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
current->blocked = new_set;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (error)
goto out;
if (oset)
goto set_old;
} else if (oset) {
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
old_set = current->blocked;
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
set_old:
error = -EFAULT;
......@@ -1393,9 +1359,9 @@ long do_sigpending(void *set, unsigned long sigsetsize)
if (sigsetsize > sizeof(sigset_t))
goto out;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
sigandsets(&pending, &current->blocked, &current->pending.signal);
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
error = -EFAULT;
if (!copy_to_user(set, &pending, sigsetsize))
......@@ -1503,7 +1469,6 @@ sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
}
spin_lock_irq(&current->sig->siglock);
spin_lock(&current->sigmask_lock);
sig = dequeue_signal(&current->sig->shared_pending, &these, &info);
if (!sig)
sig = dequeue_signal(&current->pending, &these, &info);
......@@ -1520,14 +1485,12 @@ sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
current->real_blocked = current->blocked;
sigandsets(&current->blocked, &current->blocked, &these);
recalc_sigpending();
spin_unlock(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
current->state = TASK_INTERRUPTIBLE;
timeout = schedule_timeout(timeout);
spin_lock_irq(&current->sig->siglock);
spin_lock(&current->sigmask_lock);
sig = dequeue_signal(&current->sig->shared_pending, &these, &info);
if (!sig)
sig = dequeue_signal(&current->pending, &these, &info);
......@@ -1536,7 +1499,6 @@ sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
recalc_sigpending();
}
}
spin_unlock(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (sig) {
......@@ -1593,7 +1555,7 @@ sys_tkill(int pid, int sig)
error = -ESRCH;
if (p) {
spin_lock_irq(&p->sig->siglock);
error = __send_sig_info(sig, &info, p, 0);
error = specific_send_sig_info(sig, &info, p, 0);
spin_unlock_irq(&p->sig->siglock);
}
read_unlock(&tasklist_lock);
......@@ -1660,10 +1622,8 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
sig == SIGCHLD ||
sig == SIGWINCH ||
sig == SIGURG))) {
spin_lock_irq(&current->sigmask_lock);
if (rm_sig_from_queue(sig, current))
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
}
}
......@@ -1756,7 +1716,7 @@ sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
goto out;
new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
old_set = current->blocked.sig[0];
error = 0;
......@@ -1776,7 +1736,7 @@ sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
}
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (error)
goto out;
if (oset)
......@@ -1838,13 +1798,13 @@ sys_ssetmask(int newmask)
{
int old;
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
old = current->blocked.sig[0];
siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
sigmask(SIGSTOP)));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
return old;
}
......@@ -1881,3 +1841,15 @@ sys_pause(void)
}
#endif /* HAVE_ARCH_SYS_PAUSE */
void __init signals_init(void)
{
sigqueue_cachep =
kmem_cache_create("sigqueue",
sizeof(struct sigqueue),
__alignof__(struct sigqueue),
0, NULL, NULL);
if (!sigqueue_cachep)
panic("signals_init(): cannot create sigqueue SLAB cache");
}
......@@ -219,9 +219,9 @@ int freeze_processes(void)
/* FIXME: smp problem here: we may not access other process' flags
without locking */
p->flags |= PF_FREEZE;
spin_lock_irqsave(&p->sigmask_lock, flags);
spin_lock_irqsave(&p->sig->siglock, flags);
signal_wake_up(p);
spin_unlock_irqrestore(&p->sigmask_lock, flags);
spin_unlock_irqrestore(&p->sig->siglock, flags);
todo++;
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
......
......@@ -91,10 +91,10 @@ static int __pdflush(struct pdflush_work *my_work)
strcpy(current->comm, "pdflush");
/* interruptible sleep, so block all signals */
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked, 0);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
current->flags |= PF_FLUSHER;
my_work->fn = NULL;
......
......@@ -226,21 +226,21 @@ void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
sigallow |= sigmask(SIGQUIT);
}
spin_lock_irqsave(&current->sigmask_lock, irqflags);
spin_lock_irqsave(&current->sig->siglock, irqflags);
*oldset = current->blocked;
siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
}
void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
{
unsigned long irqflags;
spin_lock_irqsave(&current->sigmask_lock, irqflags);
spin_lock_irqsave(&current->sig->siglock, irqflags);
current->blocked = *oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
spin_unlock_irqrestore(&current->sig->siglock, irqflags);
}
/*
......
......@@ -992,10 +992,10 @@ rpciod(void *ptr)
daemonize();
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
siginitsetinv(&current->blocked, sigmask(SIGKILL));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
strcpy(current->comm, "rpciod");
......@@ -1050,9 +1050,9 @@ rpciod_killall(void)
}
}
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
/*
......@@ -1128,9 +1128,9 @@ rpciod_down(void)
}
interruptible_sleep_on(&rpciod_killer);
}
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
out:
up(&rpciod_sema);
MOD_DEC_USE_COUNT;
......
......@@ -213,9 +213,9 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port)
}
if (!port) {
spin_lock_irqsave(&current->sigmask_lock, flags);
spin_lock_irqsave(&current->sig->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sigmask_lock, flags);
spin_unlock_irqrestore(&current->sig->siglock, flags);
}
return error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment