Commit c93fd526 authored by Ahmed S. Darwish's avatar Ahmed S. Darwish Committed by Thomas Gleixner

PCI/MSI: Move mask and unmask helpers to msi.h

The upcoming support for per device MSI interrupt domains needs to share
some of the inline helpers with the MSI implementation.

Move them to the header file.
Signed-off-by: default avatarAhmed S. Darwish <darwi@linutronix.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Acked-by: default avatarBjorn Helgaas <bhelgaas@google.com>
Link: https://lore.kernel.org/r/20221111122014.640052354@linutronix.de
parent db537dd3
......@@ -16,7 +16,7 @@
static int pci_msi_enable = 1;
int pci_msi_ignore_mask;
static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set)
void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set)
{
raw_spinlock_t *lock = &to_pci_dev(desc->dev)->msi_lock;
unsigned long flags;
......@@ -32,65 +32,6 @@ static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 s
raw_spin_unlock_irqrestore(lock, flags);
}
static inline void pci_msi_mask(struct msi_desc *desc, u32 mask)
{
pci_msi_update_mask(desc, 0, mask);
}
static inline void pci_msi_unmask(struct msi_desc *desc, u32 mask)
{
pci_msi_update_mask(desc, mask, 0);
}
static inline void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
{
return desc->pci.mask_base + desc->msi_index * PCI_MSIX_ENTRY_SIZE;
}
/*
* This internal function does not flush PCI writes to the device. All
* users must ensure that they read from the device before either assuming
* that the device state is up to date, or returning out of this file.
* It does not affect the msi_desc::msix_ctrl cache either. Use with care!
*/
static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
{
void __iomem *desc_addr = pci_msix_desc_addr(desc);
if (desc->pci.msi_attrib.can_mask)
writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
static inline void pci_msix_mask(struct msi_desc *desc)
{
desc->pci.msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
/* Flush write to device */
readl(desc->pci.mask_base);
}
static inline void pci_msix_unmask(struct msi_desc *desc)
{
desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
}
static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
{
if (desc->pci.msi_attrib.is_msix)
pci_msix_mask(desc);
else
pci_msi_mask(desc, mask);
}
static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
{
if (desc->pci.msi_attrib.is_msix)
pci_msix_unmask(desc);
else
pci_msi_unmask(desc, mask);
}
/**
* pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts
* @data: pointer to irqdata associated to that interrupt
......
......@@ -8,21 +8,67 @@
int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void pci_msi_teardown_msi_irqs(struct pci_dev *dev);
#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev);
#else
static inline int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
/* Mask/unmask helpers */
void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set);
static inline void pci_msi_mask(struct msi_desc *desc, u32 mask)
{
WARN_ON_ONCE(1);
return -ENODEV;
pci_msi_update_mask(desc, 0, mask);
}
static inline void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev)
static inline void pci_msi_unmask(struct msi_desc *desc, u32 mask)
{
WARN_ON_ONCE(1);
pci_msi_update_mask(desc, mask, 0);
}
static inline void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
{
return desc->pci.mask_base + desc->msi_index * PCI_MSIX_ENTRY_SIZE;
}
/*
* This internal function does not flush PCI writes to the device. All
* users must ensure that they read from the device before either assuming
* that the device state is up to date, or returning out of this file.
* It does not affect the msi_desc::msix_ctrl cache either. Use with care!
*/
static inline void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
{
void __iomem *desc_addr = pci_msix_desc_addr(desc);
if (desc->pci.msi_attrib.can_mask)
writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
static inline void pci_msix_mask(struct msi_desc *desc)
{
desc->pci.msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
/* Flush write to device */
readl(desc->pci.mask_base);
}
static inline void pci_msix_unmask(struct msi_desc *desc)
{
desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
}
static inline void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
{
if (desc->pci.msi_attrib.is_msix)
pci_msix_mask(desc);
else
pci_msi_mask(desc, mask);
}
static inline void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
{
if (desc->pci.msi_attrib.is_msix)
pci_msix_unmask(desc);
else
pci_msi_unmask(desc, mask);
}
#endif
/*
* PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
......@@ -37,3 +83,20 @@ static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
return 0xffffffff;
return (1 << (1 << desc->pci.msi_attrib.multi_cap)) - 1;
}
/* Legacy (!IRQDOMAIN) fallbacks */
#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev);
#else
static inline int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
WARN_ON_ONCE(1);
return -ENODEV;
}
static inline void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev)
{
WARN_ON_ONCE(1);
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment