Commit d59b553f authored by Marc Zyngier's avatar Marc Zyngier

Merge branch irq/lpi-resend into irq/irqchip-next

* irq/lpi-resend:
  : .
  : Patch series from James Gowans, working around an issue with
  : GICv3 LPIs that can fire concurrently on multiple CPUs.
  : .
  irqchip/gic-v3-its: Enable RESEND_WHEN_IN_PROGRESS for LPIs
  genirq: Allow fasteoi handler to resend interrupts on concurrent handling
  genirq: Expand doc for PENDING and REPLAY flags
  genirq: Use BIT() for the IRQD_* state flags
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 2b384e01 8f4b5895
...@@ -3585,6 +3585,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, ...@@ -3585,6 +3585,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
irqd = irq_get_irq_data(virq + i); irqd = irq_get_irq_data(virq + i);
irqd_set_single_target(irqd); irqd_set_single_target(irqd);
irqd_set_affinity_on_activate(irqd); irqd_set_affinity_on_activate(irqd);
irqd_set_resend_when_in_progress(irqd);
pr_debug("ID:%d pID:%d vID:%d\n", pr_debug("ID:%d pID:%d vID:%d\n",
(int)(hwirq + i - its_dev->event_map.lpi_base), (int)(hwirq + i - its_dev->event_map.lpi_base),
(int)(hwirq + i), virq + i); (int)(hwirq + i), virq + i);
...@@ -4523,6 +4524,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq ...@@ -4523,6 +4524,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
irq_domain_set_hwirq_and_chip(domain, virq + i, i, irq_domain_set_hwirq_and_chip(domain, virq + i, i,
irqchip, vm->vpes[i]); irqchip, vm->vpes[i]);
set_bit(i, bitmap); set_bit(i, bitmap);
irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
} }
if (err) { if (err) {
......
...@@ -223,32 +223,35 @@ struct irq_data { ...@@ -223,32 +223,35 @@ struct irq_data {
* irq_chip::irq_set_affinity() when deactivated. * irq_chip::irq_set_affinity() when deactivated.
* IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if * IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if
* irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set. * irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set.
* IRQD_RESEND_WHEN_IN_PROGRESS - Interrupt may fire when already in progress in which
* case it must be resent at the next available opportunity.
*/ */
enum { enum {
IRQD_TRIGGER_MASK = 0xf, IRQD_TRIGGER_MASK = 0xf,
IRQD_SETAFFINITY_PENDING = (1 << 8), IRQD_SETAFFINITY_PENDING = BIT(8),
IRQD_ACTIVATED = (1 << 9), IRQD_ACTIVATED = BIT(9),
IRQD_NO_BALANCING = (1 << 10), IRQD_NO_BALANCING = BIT(10),
IRQD_PER_CPU = (1 << 11), IRQD_PER_CPU = BIT(11),
IRQD_AFFINITY_SET = (1 << 12), IRQD_AFFINITY_SET = BIT(12),
IRQD_LEVEL = (1 << 13), IRQD_LEVEL = BIT(13),
IRQD_WAKEUP_STATE = (1 << 14), IRQD_WAKEUP_STATE = BIT(14),
IRQD_MOVE_PCNTXT = (1 << 15), IRQD_MOVE_PCNTXT = BIT(15),
IRQD_IRQ_DISABLED = (1 << 16), IRQD_IRQ_DISABLED = BIT(16),
IRQD_IRQ_MASKED = (1 << 17), IRQD_IRQ_MASKED = BIT(17),
IRQD_IRQ_INPROGRESS = (1 << 18), IRQD_IRQ_INPROGRESS = BIT(18),
IRQD_WAKEUP_ARMED = (1 << 19), IRQD_WAKEUP_ARMED = BIT(19),
IRQD_FORWARDED_TO_VCPU = (1 << 20), IRQD_FORWARDED_TO_VCPU = BIT(20),
IRQD_AFFINITY_MANAGED = (1 << 21), IRQD_AFFINITY_MANAGED = BIT(21),
IRQD_IRQ_STARTED = (1 << 22), IRQD_IRQ_STARTED = BIT(22),
IRQD_MANAGED_SHUTDOWN = (1 << 23), IRQD_MANAGED_SHUTDOWN = BIT(23),
IRQD_SINGLE_TARGET = (1 << 24), IRQD_SINGLE_TARGET = BIT(24),
IRQD_DEFAULT_TRIGGER_SET = (1 << 25), IRQD_DEFAULT_TRIGGER_SET = BIT(25),
IRQD_CAN_RESERVE = (1 << 26), IRQD_CAN_RESERVE = BIT(26),
IRQD_MSI_NOMASK_QUIRK = (1 << 27), IRQD_MSI_NOMASK_QUIRK = BIT(27),
IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28), IRQD_HANDLE_ENFORCE_IRQCTX = BIT(28),
IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), IRQD_AFFINITY_ON_ACTIVATE = BIT(29),
IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30), IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(30),
IRQD_RESEND_WHEN_IN_PROGRESS = BIT(31),
}; };
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
...@@ -448,6 +451,16 @@ static inline bool irqd_affinity_on_activate(struct irq_data *d) ...@@ -448,6 +451,16 @@ static inline bool irqd_affinity_on_activate(struct irq_data *d)
return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
} }
static inline void irqd_set_resend_when_in_progress(struct irq_data *d)
{
__irqd_to_state(d) |= IRQD_RESEND_WHEN_IN_PROGRESS;
}
static inline bool irqd_needs_resend_when_in_progress(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_RESEND_WHEN_IN_PROGRESS;
}
#undef __irqd_to_state #undef __irqd_to_state
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
......
...@@ -692,8 +692,16 @@ void handle_fasteoi_irq(struct irq_desc *desc) ...@@ -692,8 +692,16 @@ void handle_fasteoi_irq(struct irq_desc *desc)
raw_spin_lock(&desc->lock); raw_spin_lock(&desc->lock);
if (!irq_may_run(desc)) /*
* When an affinity change races with IRQ handling, the next interrupt
* can arrive on the new CPU before the original CPU has completed
* handling the previous one - it may need to be resent.
*/
if (!irq_may_run(desc)) {
if (irqd_needs_resend_when_in_progress(&desc->irq_data))
desc->istate |= IRQS_PENDING;
goto out; goto out;
}
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
...@@ -715,6 +723,12 @@ void handle_fasteoi_irq(struct irq_desc *desc) ...@@ -715,6 +723,12 @@ void handle_fasteoi_irq(struct irq_desc *desc)
cond_unmask_eoi_irq(desc, chip); cond_unmask_eoi_irq(desc, chip);
/*
* When the race described above happens this will resend the interrupt.
*/
if (unlikely(desc->istate & IRQS_PENDING))
check_irq_resend(desc, false);
raw_spin_unlock(&desc->lock); raw_spin_unlock(&desc->lock);
return; return;
out: out:
......
...@@ -133,6 +133,8 @@ static const struct irq_bit_descr irqdata_states[] = { ...@@ -133,6 +133,8 @@ static const struct irq_bit_descr irqdata_states[] = {
BIT_MASK_DESCR(IRQD_HANDLE_ENFORCE_IRQCTX), BIT_MASK_DESCR(IRQD_HANDLE_ENFORCE_IRQCTX),
BIT_MASK_DESCR(IRQD_IRQ_ENABLED_ON_SUSPEND), BIT_MASK_DESCR(IRQD_IRQ_ENABLED_ON_SUSPEND),
BIT_MASK_DESCR(IRQD_RESEND_WHEN_IN_PROGRESS),
}; };
static const struct irq_bit_descr irqdesc_states[] = { static const struct irq_bit_descr irqdesc_states[] = {
......
...@@ -47,9 +47,12 @@ enum { ...@@ -47,9 +47,12 @@ enum {
* detection * detection
* IRQS_POLL_INPROGRESS - polling in progress * IRQS_POLL_INPROGRESS - polling in progress
* IRQS_ONESHOT - irq is not unmasked in primary handler * IRQS_ONESHOT - irq is not unmasked in primary handler
* IRQS_REPLAY - irq is replayed * IRQS_REPLAY - irq has been resent and will not be resent
* again until the handler has run and cleared
* this flag.
* IRQS_WAITING - irq is waiting * IRQS_WAITING - irq is waiting
* IRQS_PENDING - irq is pending and replayed later * IRQS_PENDING - irq needs to be resent and should be resent
* at the next available opportunity.
* IRQS_SUSPENDED - irq is suspended * IRQS_SUSPENDED - irq is suspended
* IRQS_NMI - irq line is used to deliver NMIs * IRQS_NMI - irq line is used to deliver NMIs
* IRQS_SYSFS - descriptor has been added to sysfs * IRQS_SYSFS - descriptor has been added to sysfs
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment