Commit c36e33e2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'irq-urgent-2021-11-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Thomas Gleixner:
 "A set of fixes for the interrupt subsystem

  Core code:

   - A regression fix for the Open Firmware interrupt mapping code where
     a interrupt controller property in a node caused a map property in
     the same node to be ignored.

  Interrupt chip drivers:

   - Workaround a limitation in SiFive PLIC interrupt chip which
     silently ignores an EOI when the interrupt line is masked.

   - Provide the missing mask/unmask implementation for the CSKY MP
     interrupt controller.

  PCI/MSI:

   - Prevent a use after free when PCI/MSI interrupts are released by
     destroying the sysfs entries before freeing the memory which is
     accessed in the sysfs show() function.

   - Implement a mask quirk for the Nvidia ION AHCI chip which does not
     advertise masking capability despite implementing it. Even worse
     the chip comes out of reset with all MSI entries masked, which due
     to the missing masking capability never get unmasked.

   - Move the check which prevents accessing the MSI[X] masking for XEN
     back into the low level accessors. The recent consolidation missed
     that these accessors can be invoked from places which do not have
     that check which broke XEN. Move them back to he original place
     instead of sprinkling tons of these checks all over the code"

* tag 'irq-urgent-2021-11-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  of/irq: Don't ignore interrupt-controller when interrupt-map failed
  irqchip/sifive-plic: Fixup EOI failed when masked
  irqchip/csky-mpintc: Fixup mask/unmask implementation
  PCI/MSI: Destroy sysfs before freeing entries
  PCI: Add MSI masking quirk for Nvidia ION AHCI
  PCI/MSI: Deal with devices lying about their MSI mask capability
  PCI/MSI: Move non-mask check back into low level accessors
parents 218cc8b8 979292af
...@@ -78,7 +78,7 @@ static void csky_mpintc_handler(struct pt_regs *regs) ...@@ -78,7 +78,7 @@ static void csky_mpintc_handler(struct pt_regs *regs)
readl_relaxed(reg_base + INTCL_RDYIR)); readl_relaxed(reg_base + INTCL_RDYIR));
} }
static void csky_mpintc_enable(struct irq_data *d) static void csky_mpintc_unmask(struct irq_data *d)
{ {
void __iomem *reg_base = this_cpu_read(intcl_reg); void __iomem *reg_base = this_cpu_read(intcl_reg);
...@@ -87,7 +87,7 @@ static void csky_mpintc_enable(struct irq_data *d) ...@@ -87,7 +87,7 @@ static void csky_mpintc_enable(struct irq_data *d)
writel_relaxed(d->hwirq, reg_base + INTCL_SENR); writel_relaxed(d->hwirq, reg_base + INTCL_SENR);
} }
static void csky_mpintc_disable(struct irq_data *d) static void csky_mpintc_mask(struct irq_data *d)
{ {
void __iomem *reg_base = this_cpu_read(intcl_reg); void __iomem *reg_base = this_cpu_read(intcl_reg);
...@@ -164,8 +164,8 @@ static int csky_irq_set_affinity(struct irq_data *d, ...@@ -164,8 +164,8 @@ static int csky_irq_set_affinity(struct irq_data *d,
static struct irq_chip csky_irq_chip = { static struct irq_chip csky_irq_chip = {
.name = "C-SKY SMP Intc", .name = "C-SKY SMP Intc",
.irq_eoi = csky_mpintc_eoi, .irq_eoi = csky_mpintc_eoi,
.irq_enable = csky_mpintc_enable, .irq_unmask = csky_mpintc_unmask,
.irq_disable = csky_mpintc_disable, .irq_mask = csky_mpintc_mask,
.irq_set_type = csky_mpintc_set_type, .irq_set_type = csky_mpintc_set_type,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.irq_set_affinity = csky_irq_set_affinity, .irq_set_affinity = csky_irq_set_affinity,
......
...@@ -163,7 +163,13 @@ static void plic_irq_eoi(struct irq_data *d) ...@@ -163,7 +163,13 @@ static void plic_irq_eoi(struct irq_data *d)
{ {
struct plic_handler *handler = this_cpu_ptr(&plic_handlers); struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
if (irqd_irq_masked(d)) {
plic_irq_unmask(d);
writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
plic_irq_mask(d);
} else {
writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
}
} }
static struct irq_chip plic_chip = { static struct irq_chip plic_chip = {
......
...@@ -161,9 +161,10 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) ...@@ -161,9 +161,10 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
* if it is then we are done, unless there is an * if it is then we are done, unless there is an
* interrupt-map which takes precedence. * interrupt-map which takes precedence.
*/ */
bool intc = of_property_read_bool(ipar, "interrupt-controller");
imap = of_get_property(ipar, "interrupt-map", &imaplen); imap = of_get_property(ipar, "interrupt-map", &imaplen);
if (imap == NULL && if (imap == NULL && intc) {
of_property_read_bool(ipar, "interrupt-controller")) {
pr_debug(" -> got it !\n"); pr_debug(" -> got it !\n");
return 0; return 0;
} }
...@@ -244,8 +245,20 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) ...@@ -244,8 +245,20 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
pr_debug(" -> imaplen=%d\n", imaplen); pr_debug(" -> imaplen=%d\n", imaplen);
} }
if (!match) if (!match) {
if (intc) {
/*
* The PASEMI Nemo is a known offender, so
* let's only warn for anyone else.
*/
WARN(!IS_ENABLED(CONFIG_PPC_PASEMI),
"%pOF interrupt-map failed, using interrupt-controller\n",
ipar);
return 0;
}
goto fail; goto fail;
}
/* /*
* Successfully parsed an interrrupt-map translation; copy new * Successfully parsed an interrrupt-map translation; copy new
......
...@@ -148,6 +148,9 @@ static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 s ...@@ -148,6 +148,9 @@ static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 s
raw_spinlock_t *lock = &desc->dev->msi_lock; raw_spinlock_t *lock = &desc->dev->msi_lock;
unsigned long flags; unsigned long flags;
if (!desc->msi_attrib.can_mask)
return;
raw_spin_lock_irqsave(lock, flags); raw_spin_lock_irqsave(lock, flags);
desc->msi_mask &= ~clear; desc->msi_mask &= ~clear;
desc->msi_mask |= set; desc->msi_mask |= set;
...@@ -181,6 +184,7 @@ static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl) ...@@ -181,6 +184,7 @@ static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
{ {
void __iomem *desc_addr = pci_msix_desc_addr(desc); void __iomem *desc_addr = pci_msix_desc_addr(desc);
if (desc->msi_attrib.can_mask)
writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL); writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
} }
...@@ -200,23 +204,17 @@ static inline void pci_msix_unmask(struct msi_desc *desc) ...@@ -200,23 +204,17 @@ static inline void pci_msix_unmask(struct msi_desc *desc)
static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask) static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
{ {
if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual)
return;
if (desc->msi_attrib.is_msix) if (desc->msi_attrib.is_msix)
pci_msix_mask(desc); pci_msix_mask(desc);
else if (desc->msi_attrib.maskbit) else
pci_msi_mask(desc, mask); pci_msi_mask(desc, mask);
} }
static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask) static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
{ {
if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual)
return;
if (desc->msi_attrib.is_msix) if (desc->msi_attrib.is_msix)
pci_msix_unmask(desc); pci_msix_unmask(desc);
else if (desc->msi_attrib.maskbit) else
pci_msi_unmask(desc, mask); pci_msi_unmask(desc, mask);
} }
...@@ -370,6 +368,11 @@ static void free_msi_irqs(struct pci_dev *dev) ...@@ -370,6 +368,11 @@ static void free_msi_irqs(struct pci_dev *dev)
for (i = 0; i < entry->nvec_used; i++) for (i = 0; i < entry->nvec_used; i++)
BUG_ON(irq_has_action(entry->irq + i)); BUG_ON(irq_has_action(entry->irq + i));
if (dev->msi_irq_groups) {
msi_destroy_sysfs(&dev->dev, dev->msi_irq_groups);
dev->msi_irq_groups = NULL;
}
pci_msi_teardown_msi_irqs(dev); pci_msi_teardown_msi_irqs(dev);
list_for_each_entry_safe(entry, tmp, msi_list, list) { list_for_each_entry_safe(entry, tmp, msi_list, list) {
...@@ -381,11 +384,6 @@ static void free_msi_irqs(struct pci_dev *dev) ...@@ -381,11 +384,6 @@ static void free_msi_irqs(struct pci_dev *dev)
list_del(&entry->list); list_del(&entry->list);
free_msi_entry(entry); free_msi_entry(entry);
} }
if (dev->msi_irq_groups) {
msi_destroy_sysfs(&dev->dev, dev->msi_irq_groups);
dev->msi_irq_groups = NULL;
}
} }
static void pci_intx_for_msi(struct pci_dev *dev, int enable) static void pci_intx_for_msi(struct pci_dev *dev, int enable)
...@@ -479,12 +477,16 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd) ...@@ -479,12 +477,16 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
goto out; goto out;
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
entry->msi_attrib.is_msix = 0; entry->msi_attrib.is_msix = 0;
entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
entry->msi_attrib.is_virtual = 0; entry->msi_attrib.is_virtual = 0;
entry->msi_attrib.entry_nr = 0; entry->msi_attrib.entry_nr = 0;
entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); entry->msi_attrib.can_mask = !pci_msi_ignore_mask &&
!!(control & PCI_MSI_FLAGS_MASKBIT);
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
...@@ -495,7 +497,7 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd) ...@@ -495,7 +497,7 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32; entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
/* Save the initial mask status */ /* Save the initial mask status */
if (entry->msi_attrib.maskbit) if (entry->msi_attrib.can_mask)
pci_read_config_dword(dev, entry->mask_pos, &entry->msi_mask); pci_read_config_dword(dev, entry->mask_pos, &entry->msi_mask);
out: out:
...@@ -639,10 +641,13 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, ...@@ -639,10 +641,13 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.is_virtual = entry->msi_attrib.is_virtual =
entry->msi_attrib.entry_nr >= vec_count; entry->msi_attrib.entry_nr >= vec_count;
entry->msi_attrib.can_mask = !pci_msi_ignore_mask &&
!entry->msi_attrib.is_virtual;
entry->msi_attrib.default_irq = dev->irq; entry->msi_attrib.default_irq = dev->irq;
entry->mask_base = base; entry->mask_base = base;
if (!entry->msi_attrib.is_virtual) { if (entry->msi_attrib.can_mask) {
addr = pci_msix_desc_addr(entry); addr = pci_msix_desc_addr(entry);
entry->msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); entry->msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
} }
......
...@@ -5851,3 +5851,9 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2303, ...@@ -5851,3 +5851,9 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2303,
pci_fixup_pericom_acs_store_forward); pci_fixup_pericom_acs_store_forward);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2303, DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2303,
pci_fixup_pericom_acs_store_forward); pci_fixup_pericom_acs_store_forward);
static void nvidia_ion_ahci_fixup(struct pci_dev *pdev)
{
pdev->dev_flags |= PCI_DEV_FLAGS_HAS_MSI_MASKING;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0ab8, nvidia_ion_ahci_fixup);
...@@ -148,7 +148,7 @@ struct msi_desc { ...@@ -148,7 +148,7 @@ struct msi_desc {
u8 is_msix : 1; u8 is_msix : 1;
u8 multiple : 3; u8 multiple : 3;
u8 multi_cap : 3; u8 multi_cap : 3;
u8 maskbit : 1; u8 can_mask : 1;
u8 is_64 : 1; u8 is_64 : 1;
u8 is_virtual : 1; u8 is_virtual : 1;
u16 entry_nr; u16 entry_nr;
......
...@@ -233,6 +233,8 @@ enum pci_dev_flags { ...@@ -233,6 +233,8 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
/* Don't use Relaxed Ordering for TLPs directed at this device */ /* Don't use Relaxed Ordering for TLPs directed at this device */
PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11), PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
/* Device does honor MSI masking despite saying otherwise */
PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
}; };
enum pci_irq_reroute_variant { enum pci_irq_reroute_variant {
......
...@@ -529,10 +529,10 @@ static bool msi_check_reservation_mode(struct irq_domain *domain, ...@@ -529,10 +529,10 @@ static bool msi_check_reservation_mode(struct irq_domain *domain,
/* /*
* Checking the first MSI descriptor is sufficient. MSIX supports * Checking the first MSI descriptor is sufficient. MSIX supports
* masking and MSI does so when the maskbit is set. * masking and MSI does so when the can_mask attribute is set.
*/ */
desc = first_msi_entry(dev); desc = first_msi_entry(dev);
return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit; return desc->msi_attrib.is_msix || desc->msi_attrib.can_mask;
} }
int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment