Commit aefb058b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Ingo Molnar:
 "Affinity fixes and a nested threaded IRQ handling fix."

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  genirq: Always force thread affinity
  irq: Set CPU affinity right on thread creation
  genirq: Provide means to retrigger parent
parents 37ea95a9 04aa530e
...@@ -392,6 +392,15 @@ static inline void irq_move_masked_irq(struct irq_data *data) { } ...@@ -392,6 +392,15 @@ static inline void irq_move_masked_irq(struct irq_data *data) { }
extern int no_irq_affinity; extern int no_irq_affinity;
#ifdef CONFIG_HARDIRQS_SW_RESEND
int irq_set_parent(int irq, int parent_irq);
#else
static inline int irq_set_parent(int irq, int parent_irq)
{
return 0;
}
#endif
/* /*
* Built-in IRQ handlers for various IRQ types, * Built-in IRQ handlers for various IRQ types,
* callable via desc->handle_irq() * callable via desc->handle_irq()
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
struct irq_affinity_notify; struct irq_affinity_notify;
struct proc_dir_entry; struct proc_dir_entry;
struct module; struct module;
struct irq_desc;
/** /**
* struct irq_desc - interrupt descriptor * struct irq_desc - interrupt descriptor
* @irq_data: per irq and chip data passed down to chip functions * @irq_data: per irq and chip data passed down to chip functions
...@@ -65,6 +67,7 @@ struct irq_desc { ...@@ -65,6 +67,7 @@ struct irq_desc {
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir; struct proc_dir_entry *dir;
#endif #endif
int parent_irq;
struct module *owner; struct module *owner;
const char *name; const char *name;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
......
...@@ -272,6 +272,7 @@ void handle_nested_irq(unsigned int irq) ...@@ -272,6 +272,7 @@ void handle_nested_irq(unsigned int irq)
raw_spin_lock_irq(&desc->lock); raw_spin_lock_irq(&desc->lock);
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc); kstat_incr_irqs_this_cpu(irq, desc);
action = desc->action; action = desc->action;
......
...@@ -616,6 +616,22 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, ...@@ -616,6 +616,22 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
return ret; return ret;
} }
#ifdef CONFIG_HARDIRQS_SW_RESEND
int irq_set_parent(int irq, int parent_irq)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc)
return -EINVAL;
desc->parent_irq = parent_irq;
irq_put_desc_unlock(desc, flags);
return 0;
}
#endif
/* /*
* Default primary interrupt handler for threaded interrupts. Is * Default primary interrupt handler for threaded interrupts. Is
* assigned as primary handler when request_threaded_irq is called * assigned as primary handler when request_threaded_irq is called
...@@ -716,6 +732,7 @@ static void ...@@ -716,6 +732,7 @@ static void
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
{ {
cpumask_var_t mask; cpumask_var_t mask;
bool valid = true;
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
return; return;
...@@ -730,9 +747,17 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) ...@@ -730,9 +747,17 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
} }
raw_spin_lock_irq(&desc->lock); raw_spin_lock_irq(&desc->lock);
/*
* This code is triggered unconditionally. Check the affinity
* mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
*/
if (desc->irq_data.affinity)
cpumask_copy(mask, desc->irq_data.affinity); cpumask_copy(mask, desc->irq_data.affinity);
else
valid = false;
raw_spin_unlock_irq(&desc->lock); raw_spin_unlock_irq(&desc->lock);
if (valid)
set_cpus_allowed_ptr(current, mask); set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask); free_cpumask_var(mask);
} }
...@@ -833,6 +858,8 @@ static int irq_thread(void *data) ...@@ -833,6 +858,8 @@ static int irq_thread(void *data)
init_task_work(&on_exit_work, irq_thread_dtor); init_task_work(&on_exit_work, irq_thread_dtor);
task_work_add(current, &on_exit_work, false); task_work_add(current, &on_exit_work, false);
irq_thread_check_affinity(desc, action);
while (!irq_wait_for_interrupt(action)) { while (!irq_wait_for_interrupt(action)) {
irqreturn_t action_ret; irqreturn_t action_ret;
...@@ -936,6 +963,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) ...@@ -936,6 +963,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/ */
get_task_struct(t); get_task_struct(t);
new->thread = t; new->thread = t;
/*
* Tell the thread to set its affinity. This is
* important for shared interrupt handlers as we do
* not invoke setup_affinity() for the secondary
* handlers as everything is already set up. Even for
* interrupts marked with IRQF_NO_BALANCE this is
* correct as we want the thread to move to the cpu(s)
* on which the requesting code placed the interrupt.
*/
set_bit(IRQTF_AFFINITY, &new->thread_flags);
} }
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
......
...@@ -74,6 +74,14 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) ...@@ -74,6 +74,14 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
if (!desc->irq_data.chip->irq_retrigger || if (!desc->irq_data.chip->irq_retrigger ||
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
#ifdef CONFIG_HARDIRQS_SW_RESEND #ifdef CONFIG_HARDIRQS_SW_RESEND
/*
* If the interrupt has a parent irq and runs
* in the thread context of the parent irq,
* retrigger the parent.
*/
if (desc->parent_irq &&
irq_settings_is_nested_thread(desc))
irq = desc->parent_irq;
/* Set it pending and activate the softirq: */ /* Set it pending and activate the softirq: */
set_bit(irq, irqs_resend); set_bit(irq, irqs_resend);
tasklet_schedule(&resend_tasklet); tasklet_schedule(&resend_tasklet);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment