Commit 3a6384ba authored by Bjorn Helgaas's avatar Bjorn Helgaas

Merge branch 'pci/host-vmd' into next

* pci/host-vmd:
  x86/PCI: Add driver for Intel Volume Management Device (VMD)
  PCI/AER: Use 32 bit PCI domain numbers
  x86/PCI: Allow DMA ops specific to a PCI domain
  irqdomain: Export irq_domain_set_info() for module use
  genirq/MSI: Relax msi_domain_alloc() to support parentless MSI irqdomains
parents 47235841 185a383a
...@@ -8216,6 +8216,12 @@ S: Maintained ...@@ -8216,6 +8216,12 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/host-generic-pci.txt F: Documentation/devicetree/bindings/pci/host-generic-pci.txt
F: drivers/pci/host/pci-host-generic.c F: drivers/pci/host/pci-host-generic.c
PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
M: Keith Busch <keith.busch@intel.com>
L: linux-pci@vger.kernel.org
S: Supported
F: arch/x86/pci/vmd.c
PCIE DRIVER FOR ST SPEAR13XX PCIE DRIVER FOR ST SPEAR13XX
M: Pratyush Anand <pratyush.anand@gmail.com> M: Pratyush Anand <pratyush.anand@gmail.com>
L: linux-pci@vger.kernel.org L: linux-pci@vger.kernel.org
......
...@@ -2665,6 +2665,19 @@ config PMC_ATOM ...@@ -2665,6 +2665,19 @@ config PMC_ATOM
def_bool y def_bool y
depends on PCI depends on PCI
config VMD
depends on PCI_MSI
tristate "Volume Management Device Driver"
default N
---help---
Adds support for the Intel Volume Management Device (VMD). VMD is a
secondary PCI host bridge that allows PCI Express root ports,
and devices attached to them, to be removed from the default
PCI domain and placed within the VMD domain. This provides
more bus resources than are otherwise possible with a
single domain. If you know your system provides one of these and
has devices attached to it, say Y; if you are not sure, say N.
source "net/Kconfig" source "net/Kconfig"
source "drivers/Kconfig" source "drivers/Kconfig"
......
...@@ -10,6 +10,16 @@ struct dev_archdata { ...@@ -10,6 +10,16 @@ struct dev_archdata {
#endif #endif
}; };
#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
struct dma_domain {
struct list_head node;
struct dma_map_ops *dma_ops;
int domain_nr;
};
void add_dma_domain(struct dma_domain *domain);
void del_dma_domain(struct dma_domain *domain);
#endif
struct pdev_archdata { struct pdev_archdata {
}; };
......
...@@ -129,6 +129,11 @@ struct irq_alloc_info { ...@@ -129,6 +129,11 @@ struct irq_alloc_info {
unsigned long uv_offset; unsigned long uv_offset;
char *uv_name; char *uv_name;
}; };
#endif
#if IS_ENABLED(CONFIG_VMD)
struct {
struct msi_desc *desc;
};
#endif #endif
}; };
}; };
......
...@@ -23,6 +23,8 @@ obj-y += bus_numa.o ...@@ -23,6 +23,8 @@ obj-y += bus_numa.o
obj-$(CONFIG_AMD_NB) += amd_bus.o obj-$(CONFIG_AMD_NB) += amd_bus.o
obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o
obj-$(CONFIG_VMD) += vmd.o
ifeq ($(CONFIG_PCI_DEBUG),y) ifeq ($(CONFIG_PCI_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG EXTRA_CFLAGS += -DDEBUG
endif endif
...@@ -641,6 +641,43 @@ unsigned int pcibios_assign_all_busses(void) ...@@ -641,6 +641,43 @@ unsigned int pcibios_assign_all_busses(void)
return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0; return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
} }
#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
static LIST_HEAD(dma_domain_list);
static DEFINE_SPINLOCK(dma_domain_list_lock);
void add_dma_domain(struct dma_domain *domain)
{
spin_lock(&dma_domain_list_lock);
list_add(&domain->node, &dma_domain_list);
spin_unlock(&dma_domain_list_lock);
}
EXPORT_SYMBOL_GPL(add_dma_domain);
void del_dma_domain(struct dma_domain *domain)
{
spin_lock(&dma_domain_list_lock);
list_del(&domain->node);
spin_unlock(&dma_domain_list_lock);
}
EXPORT_SYMBOL_GPL(del_dma_domain);
static void set_dma_domain_ops(struct pci_dev *pdev)
{
struct dma_domain *domain;
spin_lock(&dma_domain_list_lock);
list_for_each_entry(domain, &dma_domain_list, node) {
if (pci_domain_nr(pdev->bus) == domain->domain_nr) {
pdev->dev.archdata.dma_ops = domain->dma_ops;
break;
}
}
spin_unlock(&dma_domain_list_lock);
}
#else
static void set_dma_domain_ops(struct pci_dev *pdev) {}
#endif
int pcibios_add_device(struct pci_dev *dev) int pcibios_add_device(struct pci_dev *dev)
{ {
struct setup_data *data; struct setup_data *data;
...@@ -670,6 +707,7 @@ int pcibios_add_device(struct pci_dev *dev) ...@@ -670,6 +707,7 @@ int pcibios_add_device(struct pci_dev *dev)
pa_data = data->next; pa_data = data->next;
iounmap(data); iounmap(data);
} }
set_dma_domain_ops(dev);
return 0; return 0;
} }
......
This diff is collapsed.
...@@ -41,12 +41,12 @@ struct aer_error_inj { ...@@ -41,12 +41,12 @@ struct aer_error_inj {
u32 header_log1; u32 header_log1;
u32 header_log2; u32 header_log2;
u32 header_log3; u32 header_log3;
u16 domain; u32 domain;
}; };
struct aer_error { struct aer_error {
struct list_head list; struct list_head list;
u16 domain; u32 domain;
unsigned int bus; unsigned int bus;
unsigned int devfn; unsigned int devfn;
int pos_cap_err; int pos_cap_err;
...@@ -74,7 +74,7 @@ static LIST_HEAD(pci_bus_ops_list); ...@@ -74,7 +74,7 @@ static LIST_HEAD(pci_bus_ops_list);
/* Protect einjected and pci_bus_ops_list */ /* Protect einjected and pci_bus_ops_list */
static DEFINE_SPINLOCK(inject_lock); static DEFINE_SPINLOCK(inject_lock);
static void aer_error_init(struct aer_error *err, u16 domain, static void aer_error_init(struct aer_error *err, u32 domain,
unsigned int bus, unsigned int devfn, unsigned int bus, unsigned int devfn,
int pos_cap_err) int pos_cap_err)
{ {
...@@ -86,7 +86,7 @@ static void aer_error_init(struct aer_error *err, u16 domain, ...@@ -86,7 +86,7 @@ static void aer_error_init(struct aer_error *err, u16 domain,
} }
/* inject_lock must be held before calling */ /* inject_lock must be held before calling */
static struct aer_error *__find_aer_error(u16 domain, unsigned int bus, static struct aer_error *__find_aer_error(u32 domain, unsigned int bus,
unsigned int devfn) unsigned int devfn)
{ {
struct aer_error *err; struct aer_error *err;
...@@ -106,7 +106,7 @@ static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev) ...@@ -106,7 +106,7 @@ static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
int domain = pci_domain_nr(dev->bus); int domain = pci_domain_nr(dev->bus);
if (domain < 0) if (domain < 0)
return NULL; return NULL;
return __find_aer_error((u16)domain, dev->bus->number, dev->devfn); return __find_aer_error(domain, dev->bus->number, dev->devfn);
} }
/* inject_lock must be held before calling */ /* inject_lock must be held before calling */
...@@ -196,7 +196,7 @@ static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where, ...@@ -196,7 +196,7 @@ static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
domain = pci_domain_nr(bus); domain = pci_domain_nr(bus);
if (domain < 0) if (domain < 0)
goto out; goto out;
err = __find_aer_error((u16)domain, bus->number, devfn); err = __find_aer_error(domain, bus->number, devfn);
if (!err) if (!err)
goto out; goto out;
...@@ -228,7 +228,7 @@ static int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, ...@@ -228,7 +228,7 @@ static int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where,
domain = pci_domain_nr(bus); domain = pci_domain_nr(bus);
if (domain < 0) if (domain < 0)
goto out; goto out;
err = __find_aer_error((u16)domain, bus->number, devfn); err = __find_aer_error(domain, bus->number, devfn);
if (!err) if (!err)
goto out; goto out;
...@@ -329,7 +329,7 @@ static int aer_inject(struct aer_error_inj *einj) ...@@ -329,7 +329,7 @@ static int aer_inject(struct aer_error_inj *einj)
u32 sever, cor_mask, uncor_mask, cor_mask_orig = 0, uncor_mask_orig = 0; u32 sever, cor_mask, uncor_mask, cor_mask_orig = 0, uncor_mask_orig = 0;
int ret = 0; int ret = 0;
dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn); dev = pci_get_domain_bus_and_slot(einj->domain, einj->bus, devfn);
if (!dev) if (!dev)
return -ENODEV; return -ENODEV;
rpdev = pcie_find_root_port(dev); rpdev = pcie_find_root_port(dev);
......
...@@ -1058,6 +1058,7 @@ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, ...@@ -1058,6 +1058,7 @@ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
__irq_set_handler(virq, handler, 0, handler_name); __irq_set_handler(virq, handler, 0, handler_name);
irq_set_handler_data(virq, handler_data); irq_set_handler_data(virq, handler_data);
} }
EXPORT_SYMBOL(irq_domain_set_info);
/** /**
* irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
......
...@@ -109,9 +109,11 @@ static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, ...@@ -109,9 +109,11 @@ static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
if (irq_find_mapping(domain, hwirq) > 0) if (irq_find_mapping(domain, hwirq) > 0)
return -EEXIST; return -EEXIST;
if (domain->parent) {
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
if (ret < 0) if (ret < 0)
return ret; return ret;
}
for (i = 0; i < nr_irqs; i++) { for (i = 0; i < nr_irqs; i++) {
ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment