Commit 0bbfcaff authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq/core changes for v3.4 from Ingo Molnar

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  genirq: Remove paranoid warnons and bogus fixups
  genirq: Flush the irq thread on synchronization
  genirq: Get rid of unnecessary IRQTF_DIED flag
  genirq: No need to check IRQTF_DIED before stopping a thread handler
  genirq: Get rid of unnecessary irqaction field in task_struct
  genirq: Fix incorrect check for forced IRQ thread handler
  softirq: Reduce invoke_softirq() code duplication
  genirq: Fix long-term regression in genirq irq_set_irq_type() handling
  x86-32/irq: Don't switch to irq stack for a user-mode irq
parents 5928a2b6 e04268b0
...@@ -100,13 +100,8 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) ...@@ -100,13 +100,8 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
irqctx->tinfo.task = curctx->tinfo.task; irqctx->tinfo.task = curctx->tinfo.task;
irqctx->tinfo.previous_esp = current_stack_pointer; irqctx->tinfo.previous_esp = current_stack_pointer;
/* /* Copy the preempt_count so that the [soft]irq checks work. */
* Copy the softirq bits in preempt_count so that the irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
* softirq checks work in the hardirq context.
*/
irqctx->tinfo.preempt_count =
(irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
if (unlikely(overflow)) if (unlikely(overflow))
call_on_stack(print_stack_overflow, isp); call_on_stack(print_stack_overflow, isp);
...@@ -196,7 +191,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) ...@@ -196,7 +191,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
if (unlikely(!desc)) if (unlikely(!desc))
return false; return false;
if (!execute_on_irq_stack(overflow, desc, irq)) { if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow)) if (unlikely(overflow))
print_stack_overflow(); print_stack_overflow();
desc->handle_irq(irq, desc); desc->handle_irq(irq, desc);
......
...@@ -1319,6 +1319,11 @@ struct task_struct { ...@@ -1319,6 +1319,11 @@ struct task_struct {
unsigned sched_reset_on_fork:1; unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1; unsigned sched_contributes_to_load:1;
#ifdef CONFIG_GENERIC_HARDIRQS
/* IRQ handler threads */
unsigned irq_thread:1;
#endif
pid_t pid; pid_t pid;
pid_t tgid; pid_t tgid;
...@@ -1427,11 +1432,6 @@ struct task_struct { ...@@ -1427,11 +1432,6 @@ struct task_struct {
* mempolicy */ * mempolicy */
spinlock_t alloc_lock; spinlock_t alloc_lock;
#ifdef CONFIG_GENERIC_HARDIRQS
/* IRQ handler threads */
struct irqaction *irqaction;
#endif
/* Protection of the PI data structures: */ /* Protection of the PI data structures: */
raw_spinlock_t pi_lock; raw_spinlock_t pi_lock;
......
...@@ -935,8 +935,6 @@ void do_exit(long code) ...@@ -935,8 +935,6 @@ void do_exit(long code)
schedule(); schedule();
} }
exit_irq_thread();
exit_signals(tsk); /* sets PF_EXITING */ exit_signals(tsk); /* sets PF_EXITING */
/* /*
* tsk->flags are checked in the futex code to protect against * tsk->flags are checked in the futex code to protect against
...@@ -945,6 +943,8 @@ void do_exit(long code) ...@@ -945,6 +943,8 @@ void do_exit(long code)
smp_mb(); smp_mb();
raw_spin_unlock_wait(&tsk->pi_lock); raw_spin_unlock_wait(&tsk->pi_lock);
exit_irq_thread();
if (unlikely(in_atomic())) if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, task_pid_nr(current), current->comm, task_pid_nr(current),
......
...@@ -61,8 +61,7 @@ int irq_set_irq_type(unsigned int irq, unsigned int type) ...@@ -61,8 +61,7 @@ int irq_set_irq_type(unsigned int irq, unsigned int type)
return -EINVAL; return -EINVAL;
type &= IRQ_TYPE_SENSE_MASK; type &= IRQ_TYPE_SENSE_MASK;
if (type != IRQ_TYPE_NONE) ret = __irq_set_trigger(desc, irq, type);
ret = __irq_set_trigger(desc, irq, type);
irq_put_desc_busunlock(desc, flags); irq_put_desc_busunlock(desc, flags);
return ret; return ret;
} }
......
...@@ -60,7 +60,7 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) ...@@ -60,7 +60,7 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
* device interrupt, so no irq storm is lurking. If the * device interrupt, so no irq storm is lurking. If the
* RUNTHREAD bit is already set, nothing to do. * RUNTHREAD bit is already set, nothing to do.
*/ */
if (test_bit(IRQTF_DIED, &action->thread_flags) || if ((action->thread->flags & PF_EXITING) ||
test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags)) test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
return; return;
...@@ -110,6 +110,18 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) ...@@ -110,6 +110,18 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
* threads_oneshot untouched and runs the thread another time. * threads_oneshot untouched and runs the thread another time.
*/ */
desc->threads_oneshot |= action->thread_mask; desc->threads_oneshot |= action->thread_mask;
/*
* We increment the threads_active counter in case we wake up
* the irq thread. The irq thread decrements the counter when
* it returns from the handler or in the exit path and wakes
* up waiters which are stuck in synchronize_irq() when the
* active count becomes zero. synchronize_irq() is serialized
* against this code (hard irq handler) via IRQS_INPROGRESS
* like the finalize_oneshot() code. See comment above.
*/
atomic_inc(&desc->threads_active);
wake_up_process(action->thread); wake_up_process(action->thread);
} }
......
...@@ -20,14 +20,12 @@ extern bool noirqdebug; ...@@ -20,14 +20,12 @@ extern bool noirqdebug;
/* /*
* Bits used by threaded handlers: * Bits used by threaded handlers:
* IRQTF_RUNTHREAD - signals that the interrupt handler thread should run * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
* IRQTF_DIED - handler thread died
* IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
* IRQTF_AFFINITY - irq thread is requested to adjust affinity * IRQTF_AFFINITY - irq thread is requested to adjust affinity
* IRQTF_FORCED_THREAD - irq action is force threaded * IRQTF_FORCED_THREAD - irq action is force threaded
*/ */
enum { enum {
IRQTF_RUNTHREAD, IRQTF_RUNTHREAD,
IRQTF_DIED,
IRQTF_WARNED, IRQTF_WARNED,
IRQTF_AFFINITY, IRQTF_AFFINITY,
IRQTF_FORCED_THREAD, IRQTF_FORCED_THREAD,
......
...@@ -759,6 +759,13 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc, ...@@ -759,6 +759,13 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
return ret; return ret;
} }
static void wake_threads_waitq(struct irq_desc *desc)
{
if (atomic_dec_and_test(&desc->threads_active) &&
waitqueue_active(&desc->wait_for_threads))
wake_up(&desc->wait_for_threads);
}
/* /*
* Interrupt handler thread * Interrupt handler thread
*/ */
...@@ -771,57 +778,41 @@ static int irq_thread(void *data) ...@@ -771,57 +778,41 @@ static int irq_thread(void *data)
struct irq_desc *desc = irq_to_desc(action->irq); struct irq_desc *desc = irq_to_desc(action->irq);
irqreturn_t (*handler_fn)(struct irq_desc *desc, irqreturn_t (*handler_fn)(struct irq_desc *desc,
struct irqaction *action); struct irqaction *action);
int wake;
if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
&action->thread_flags)) &action->thread_flags))
handler_fn = irq_forced_thread_fn; handler_fn = irq_forced_thread_fn;
else else
handler_fn = irq_thread_fn; handler_fn = irq_thread_fn;
sched_setscheduler(current, SCHED_FIFO, &param); sched_setscheduler(current, SCHED_FIFO, &param);
current->irqaction = action; current->irq_thread = 1;
while (!irq_wait_for_interrupt(action)) { while (!irq_wait_for_interrupt(action)) {
irqreturn_t action_ret;
irq_thread_check_affinity(desc, action); irq_thread_check_affinity(desc, action);
atomic_inc(&desc->threads_active); action_ret = handler_fn(desc, action);
if (!noirqdebug)
note_interrupt(action->irq, desc, action_ret);
raw_spin_lock_irq(&desc->lock); wake_threads_waitq(desc);
if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
/*
* CHECKME: We might need a dedicated
* IRQ_THREAD_PENDING flag here, which
* retriggers the thread in check_irq_resend()
* but AFAICT IRQS_PENDING should be fine as it
* retriggers the interrupt itself --- tglx
*/
desc->istate |= IRQS_PENDING;
raw_spin_unlock_irq(&desc->lock);
} else {
irqreturn_t action_ret;
raw_spin_unlock_irq(&desc->lock);
action_ret = handler_fn(desc, action);
if (!noirqdebug)
note_interrupt(action->irq, desc, action_ret);
}
wake = atomic_dec_and_test(&desc->threads_active);
if (wake && waitqueue_active(&desc->wait_for_threads))
wake_up(&desc->wait_for_threads);
} }
/* Prevent a stale desc->threads_oneshot */
irq_finalize_oneshot(desc, action, true);
/* /*
* Clear irqaction. Otherwise exit_irq_thread() would make * This is the regular exit path. __free_irq() is stopping the
* thread via kthread_stop() after calling
* synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
* oneshot mask bit can be set. We cannot verify that as we
* cannot touch the oneshot mask at this point anymore as
* __setup_irq() might have given out currents thread_mask
* again.
*
* Clear irq_thread. Otherwise exit_irq_thread() would make
* fuzz about an active irq thread going into nirvana. * fuzz about an active irq thread going into nirvana.
*/ */
current->irqaction = NULL; current->irq_thread = 0;
return 0; return 0;
} }
...@@ -832,27 +823,28 @@ void exit_irq_thread(void) ...@@ -832,27 +823,28 @@ void exit_irq_thread(void)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct irq_desc *desc; struct irq_desc *desc;
struct irqaction *action;
if (!tsk->irqaction) if (!tsk->irq_thread)
return; return;
action = kthread_data(tsk);
printk(KERN_ERR printk(KERN_ERR
"exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
desc = irq_to_desc(tsk->irqaction->irq); desc = irq_to_desc(action->irq);
/* /*
* Prevent a stale desc->threads_oneshot. Must be called * If IRQTF_RUNTHREAD is set, we need to decrement
* before setting the IRQTF_DIED flag. * desc->threads_active and wake possible waiters.
*/ */
irq_finalize_oneshot(desc, tsk->irqaction, true); if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
wake_threads_waitq(desc);
/* /* Prevent a stale desc->threads_oneshot */
* Set the THREAD DIED flag to prevent further wakeups of the irq_finalize_oneshot(desc, action, true);
* soon to be gone threaded handler.
*/
set_bit(IRQTF_DIED, &tsk->irqaction->flags);
} }
static void irq_setup_forced_threading(struct irqaction *new) static void irq_setup_forced_threading(struct irqaction *new)
...@@ -1135,8 +1127,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) ...@@ -1135,8 +1127,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
struct task_struct *t = new->thread; struct task_struct *t = new->thread;
new->thread = NULL; new->thread = NULL;
if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) kthread_stop(t);
kthread_stop(t);
put_task_struct(t); put_task_struct(t);
} }
out_mput: out_mput:
...@@ -1246,8 +1237,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) ...@@ -1246,8 +1237,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
#endif #endif
if (action->thread) { if (action->thread) {
if (!test_bit(IRQTF_DIED, &action->thread_flags)) kthread_stop(action->thread);
kthread_stop(action->thread);
put_task_struct(action->thread); put_task_struct(action->thread);
} }
......
...@@ -310,31 +310,21 @@ void irq_enter(void) ...@@ -310,31 +310,21 @@ void irq_enter(void)
__irq_enter(); __irq_enter();
} }
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
static inline void invoke_softirq(void) static inline void invoke_softirq(void)
{ {
if (!force_irqthreads) if (!force_irqthreads) {
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
__do_softirq(); __do_softirq();
else {
__local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET);
wakeup_softirqd();
__local_bh_enable(SOFTIRQ_OFFSET);
}
}
#else #else
static inline void invoke_softirq(void)
{
if (!force_irqthreads)
do_softirq(); do_softirq();
else { #endif
} else {
__local_bh_disable((unsigned long)__builtin_return_address(0), __local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET); SOFTIRQ_OFFSET);
wakeup_softirqd(); wakeup_softirqd();
__local_bh_enable(SOFTIRQ_OFFSET); __local_bh_enable(SOFTIRQ_OFFSET);
} }
} }
#endif
/* /*
* Exit an interrupt context. Process softirqs if needed and possible: * Exit an interrupt context. Process softirqs if needed and possible:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment