Commit e5558d1a authored by Bjorn Helgaas's avatar Bjorn Helgaas

Merge branches 'dma-api', 'pci/virtualization', 'pci/msi', 'pci/misc' and 'pci/resource' into next

* dma-api:
  iommu/exynos: Remove unnecessary "&" from function pointers
  DMA-API: Update dma_pool_create ()and dma_pool_alloc() descriptions
  DMA-API: Fix duplicated word in DMA-API-HOWTO.txt
  DMA-API: Capitalize "CPU" consistently
  sh/PCI: Pass GAPSPCI_DMA_BASE CPU & bus address to dma_declare_coherent_memory()
  DMA-API: Change dma_declare_coherent_memory() CPU address to phys_addr_t
  DMA-API: Clarify physical/bus address distinction

* pci/virtualization:
  PCI: Mark RTL8110SC INTx masking as broken

* pci/msi:
  PCI/MSI: Remove pci_enable_msi_block()

* pci/misc:
  PCI: Remove pcibios_add_platform_entries()
  s390/pci: use pdev->dev.groups for attribute creation
  PCI: Move Open Firmware devspec attribute to PCI common code

* pci/resource:
  PCI: Add resource allocation comments
  PCI: Simplify __pci_assign_resource() coding style
  PCI: Change pbus_size_mem() return values to be more conventional
  PCI: Restrict 64-bit prefetchable bridge windows to 64-bit resources
  PCI: Support BAR sizes up to 8GB
  resources: Clarify sanity check message
  PCI: Don't add disabled subtractive decode bus resources
  PCI: Don't print anything while decoding is disabled
  PCI: Don't set BAR to zero if dma_addr_t is too small
  PCI: Don't convert BAR address to resource if dma_addr_t is too small
  PCI: Reject BAR above 4GB if dma_addr_t is too small
  PCI: Fail safely if we can't handle BARs larger than 4GB
  x86/gart: Tidy messages and add bridge device info
  x86/gart: Replace printk() with pr_info()
  x86/PCI: Move pcibios_assign_resources() annotation to definition
  x86/PCI: Mark ATI SBx00 HPET BAR as IORESOURCE_PCI_FIXED
  x86/PCI: Don't try to move IORESOURCE_PCI_FIXED resources
  x86/PCI: Fix Broadcom CNB20LE unintended sign extension
This diff is collapsed.
This diff is collapsed.
......@@ -16,7 +16,7 @@ To do ISA style DMA you need to include two headers:
#include <asm/dma.h>
The first is the generic DMA API used to convert virtual addresses to
physical addresses (see Documentation/DMA-API.txt for details).
bus addresses (see Documentation/DMA-API.txt for details).
The second contains the routines specific to ISA DMA transfers. Since
this is not present on all platforms make sure you construct your
......@@ -50,7 +50,7 @@ early as possible and not release it until the driver is unloaded.)
Part III - Address translation
------------------------------
To translate the virtual address to a physical use the normal DMA
To translate the virtual address to a bus address, use the normal DMA
API. Do _not_ use isa_virt_to_phys() even though it does the same
thing. The reason for this is that the function isa_virt_to_phys()
will require a Kconfig dependency to ISA, not just ISA_DMA_API which
......
......@@ -168,26 +168,6 @@ struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
return NULL;
}
static ssize_t pci_show_devspec(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
struct device_node *np;
pdev = to_pci_dev(dev);
np = pci_device_to_OF_node(pdev);
if (np == NULL || np->full_name == NULL)
return 0;
return sprintf(buf, "%s", np->full_name);
}
static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
/* Add sysfs properties */
int pcibios_add_platform_entries(struct pci_dev *pdev)
{
return device_create_file(&pdev->dev, &dev_attr_devspec);
}
void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
......
......@@ -201,26 +201,6 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
return NULL;
}
static ssize_t pci_show_devspec(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
struct device_node *np;
pdev = to_pci_dev (dev);
np = pci_device_to_OF_node(pdev);
if (np == NULL || np->full_name == NULL)
return 0;
return sprintf(buf, "%s", np->full_name);
}
static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
/* Add sysfs properties */
int pcibios_add_platform_entries(struct pci_dev *pdev)
{
return device_create_file(&pdev->dev, &dev_attr_devspec);
}
/*
* Reads the interrupt pin to determine if interrupt is use by card.
* If the interrupt is used, then gets the interrupt line from the
......
......@@ -120,6 +120,8 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
return (zdev->fh & (1UL << 31)) ? true : false;
}
extern const struct attribute_group *zpci_attr_groups[];
/* -----------------------------------------------------------------------------
Prototypes
----------------------------------------------------------------------------- */
......@@ -166,10 +168,6 @@ static inline void zpci_exit_slot(struct zpci_dev *zdev) {}
struct zpci_dev *get_zdev(struct pci_dev *);
struct zpci_dev *get_zdev_by_fid(u32);
/* sysfs */
int zpci_sysfs_add_device(struct device *);
void zpci_sysfs_remove_device(struct device *);
/* DMA */
int zpci_dma_init(void);
void zpci_dma_exit(void);
......
......@@ -530,11 +530,6 @@ static void zpci_unmap_resources(struct zpci_dev *zdev)
}
}
int pcibios_add_platform_entries(struct pci_dev *pdev)
{
return zpci_sysfs_add_device(&pdev->dev);
}
static int __init zpci_irq_init(void)
{
int rc;
......@@ -671,6 +666,7 @@ int pcibios_add_device(struct pci_dev *pdev)
int i;
zdev->pdev = pdev;
pdev->dev.groups = zpci_attr_groups;
zpci_map_resources(zdev);
for (i = 0; i < PCI_BAR_COUNT; i++) {
......
......@@ -72,36 +72,18 @@ static ssize_t store_recover(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(recover, S_IWUSR, NULL, store_recover);
static struct device_attribute *zpci_dev_attrs[] = {
&dev_attr_function_id,
&dev_attr_function_handle,
&dev_attr_pchid,
&dev_attr_pfgid,
&dev_attr_recover,
static struct attribute *zpci_dev_attrs[] = {
&dev_attr_function_id.attr,
&dev_attr_function_handle.attr,
&dev_attr_pchid.attr,
&dev_attr_pfgid.attr,
&dev_attr_recover.attr,
NULL,
};
static struct attribute_group zpci_attr_group = {
.attrs = zpci_dev_attrs,
};
const struct attribute_group *zpci_attr_groups[] = {
&zpci_attr_group,
NULL,
};
int zpci_sysfs_add_device(struct device *dev)
{
int i, rc = 0;
for (i = 0; zpci_dev_attrs[i]; i++) {
rc = device_create_file(dev, zpci_dev_attrs[i]);
if (rc)
goto error;
}
return 0;
error:
while (--i >= 0)
device_remove_file(dev, zpci_dev_attrs[i]);
return rc;
}
void zpci_sysfs_remove_device(struct device *dev)
{
int i;
for (i = 0; zpci_dev_attrs[i]; i++)
device_remove_file(dev, zpci_dev_attrs[i]);
}
......@@ -31,6 +31,8 @@
static void gapspci_fixup_resources(struct pci_dev *dev)
{
struct pci_channel *p = dev->sysdata;
struct resource res;
struct pci_bus_region region;
printk(KERN_NOTICE "PCI: Fixing up device %s\n", pci_name(dev));
......@@ -50,11 +52,21 @@ static void gapspci_fixup_resources(struct pci_dev *dev)
/*
* Redirect dma memory allocations to special memory window.
*
* If this GAPSPCI region were mapped by a BAR, the CPU
* phys_addr_t would be pci_resource_start(), and the bus
* address would be pci_bus_address(pci_resource_start()).
* But apparently there's no BAR mapping it, so we just
* "know" its CPU address is GAPSPCI_DMA_BASE.
*/
res.start = GAPSPCI_DMA_BASE;
res.end = GAPSPCI_DMA_BASE + GAPSPCI_DMA_SIZE - 1;
res.flags = IORESOURCE_MEM;
pcibios_resource_to_bus(dev->bus, &region, &res);
BUG_ON(!dma_declare_coherent_memory(&dev->dev,
GAPSPCI_DMA_BASE,
GAPSPCI_DMA_BASE,
GAPSPCI_DMA_SIZE,
res.start,
region.start,
resource_size(&res),
DMA_MEMORY_MAP |
DMA_MEMORY_EXCLUSIVE));
break;
......
......@@ -10,6 +10,8 @@
*
* Copyright 2002 Andi Kleen, SuSE Labs.
*/
#define pr_fmt(fmt) "AGP: " fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
......@@ -75,14 +77,13 @@ static u32 __init allocate_aperture(void)
addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
aper_size, aper_size);
if (!addr) {
printk(KERN_ERR
"Cannot allocate aperture memory hole (%lx,%uK)\n",
addr, aper_size>>10);
pr_err("Cannot allocate aperture memory hole [mem %#010lx-%#010lx] (%uKB)\n",
addr, addr + aper_size - 1, aper_size >> 10);
return 0;
}
memblock_reserve(addr, aper_size);
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
aper_size >> 10, addr);
pr_info("Mapping aperture over RAM [mem %#010lx-%#010lx] (%uKB)\n",
addr, addr + aper_size - 1, aper_size >> 10);
register_nosave_region(addr >> PAGE_SHIFT,
(addr+aper_size) >> PAGE_SHIFT);
......@@ -126,10 +127,11 @@ static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order)
u64 aper;
u32 old_order;
printk(KERN_INFO "AGP bridge at %02x:%02x:%02x\n", bus, slot, func);
pr_info("pci 0000:%02x:%02x:%02x: AGP bridge\n", bus, slot, func);
apsizereg = read_pci_config_16(bus, slot, func, cap + 0x14);
if (apsizereg == 0xffffffff) {
printk(KERN_ERR "APSIZE in AGP bridge unreadable\n");
pr_err("pci 0000:%02x:%02x.%d: APSIZE unreadable\n",
bus, slot, func);
return 0;
}
......@@ -153,16 +155,18 @@ static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order)
* On some sick chips, APSIZE is 0. It means it wants 4G
* so let double check that order, and lets trust AMD NB settings:
*/
printk(KERN_INFO "Aperture from AGP @ %Lx old size %u MB\n",
aper, 32 << old_order);
pr_info("pci 0000:%02x:%02x.%d: AGP aperture [bus addr %#010Lx-%#010Lx] (old size %uMB)\n",
bus, slot, func, aper, aper + (32ULL << (old_order + 20)) - 1,
32 << old_order);
if (aper + (32ULL<<(20 + *order)) > 0x100000000ULL) {
printk(KERN_INFO "Aperture size %u MB (APSIZE %x) is not right, using settings from NB\n",
32 << *order, apsizereg);
pr_info("pci 0000:%02x:%02x.%d: AGP aperture size %uMB (APSIZE %#x) is not right, using settings from NB\n",
bus, slot, func, 32 << *order, apsizereg);
*order = old_order;
}
printk(KERN_INFO "Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n",
aper, 32 << *order, apsizereg);
pr_info("pci 0000:%02x:%02x.%d: AGP aperture [bus addr %#010Lx-%#010Lx] (%uMB, APSIZE %#x)\n",
bus, slot, func, aper, aper + (32ULL << (*order + 20)) - 1,
32 << *order, apsizereg);
if (!aperture_valid(aper, (32*1024*1024) << *order, 32<<20))
return 0;
......@@ -218,7 +222,7 @@ static u32 __init search_agp_bridge(u32 *order, int *valid_agp)
}
}
}
printk(KERN_INFO "No AGP bridge found\n");
pr_info("No AGP bridge found\n");
return 0;
}
......@@ -310,7 +314,8 @@ void __init early_gart_iommu_check(void)
if (e820_any_mapped(aper_base, aper_base + aper_size,
E820_RAM)) {
/* reserve it, so we can reuse it in second kernel */
printk(KERN_INFO "update e820 for GART\n");
pr_info("e820: reserve [mem %#010Lx-%#010Lx] for GART\n",
aper_base, aper_base + aper_size - 1);
e820_add_region(aper_base, aper_size, E820_RESERVED);
update_e820();
}
......@@ -354,7 +359,7 @@ int __init gart_iommu_hole_init(void)
!early_pci_allowed())
return -ENODEV;
printk(KERN_INFO "Checking aperture...\n");
pr_info("Checking aperture...\n");
if (!fallback_aper_force)
agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp);
......@@ -395,8 +400,9 @@ int __init gart_iommu_hole_init(void)
aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
aper_base <<= 25;
printk(KERN_INFO "Node %d: aperture @ %Lx size %u MB\n",
node, aper_base, aper_size >> 20);
pr_info("Node %d: aperture [bus addr %#010Lx-%#010Lx] (%uMB)\n",
node, aper_base, aper_base + aper_size - 1,
aper_size >> 20);
node++;
if (!aperture_valid(aper_base, aper_size, 64<<20)) {
......@@ -407,9 +413,9 @@ int __init gart_iommu_hole_init(void)
if (!no_iommu &&
max_pfn > MAX_DMA32_PFN &&
!printed_gart_size_msg) {
printk(KERN_ERR "you are using iommu with agp, but GART size is less than 64M\n");
printk(KERN_ERR "please increase GART size in your BIOS setup\n");
printk(KERN_ERR "if BIOS doesn't have that option, contact your HW vendor!\n");
pr_err("you are using iommu with agp, but GART size is less than 64MB\n");
pr_err("please increase GART size in your BIOS setup\n");
pr_err("if BIOS doesn't have that option, contact your HW vendor!\n");
printed_gart_size_msg = 1;
}
} else {
......@@ -446,13 +452,10 @@ int __init gart_iommu_hole_init(void)
force_iommu ||
valid_agp ||
fallback_aper_force) {
printk(KERN_INFO
"Your BIOS doesn't leave a aperture memory hole\n");
printk(KERN_INFO
"Please enable the IOMMU option in the BIOS setup\n");
printk(KERN_INFO
"This costs you %d MB of RAM\n",
32 << fallback_aper_order);
pr_info("Your BIOS doesn't leave a aperture memory hole\n");
pr_info("Please enable the IOMMU option in the BIOS setup\n");
pr_info("This costs you %dMB of RAM\n",
32 << fallback_aper_order);
aper_order = fallback_aper_order;
aper_alloc = allocate_aperture();
......
......@@ -60,8 +60,8 @@ static void __init cnb20le_res(u8 bus, u8 slot, u8 func)
word1 = read_pci_config_16(bus, slot, func, 0xc4);
word2 = read_pci_config_16(bus, slot, func, 0xc6);
if (word1 != word2) {
res.start = (word1 << 16) | 0x0000;
res.end = (word2 << 16) | 0xffff;
res.start = ((resource_size_t) word1 << 16) | 0x0000;
res.end = ((resource_size_t) word2 << 16) | 0xffff;
res.flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
update_res(info, res.start, res.end, res.flags, 0);
}
......
......@@ -6,6 +6,7 @@
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <asm/hpet.h>
#include <asm/pci_x86.h>
static void pci_fixup_i450nx(struct pci_dev *d)
......@@ -526,6 +527,19 @@ static void sb600_disable_hpet_bar(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
#ifdef CONFIG_HPET_TIMER
static void sb600_hpet_quirk(struct pci_dev *dev)
{
struct resource *r = &dev->resource[1];
if (r->flags & IORESOURCE_MEM && r->start == hpet_address) {
r->flags |= IORESOURCE_PCI_FIXED;
dev_info(&dev->dev, "reg 0x14 contains HPET; making it immovable\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, 0x4385, sb600_hpet_quirk);
#endif
/*
* Twinhead H12Y needs us to block out a region otherwise we map devices
* there and any access kills the box.
......
......@@ -271,11 +271,16 @@ static void pcibios_allocate_dev_resources(struct pci_dev *dev, int pass)
"BAR %d: reserving %pr (d=%d, p=%d)\n",
idx, r, disabled, pass);
if (pci_claim_resource(dev, idx) < 0) {
/* We'll assign a new address later */
pcibios_save_fw_addr(dev,
idx, r->start);
r->end -= r->start;
r->start = 0;
if (r->flags & IORESOURCE_PCI_FIXED) {
dev_info(&dev->dev, "BAR %d %pR is immovable\n",
idx, r);
} else {
/* We'll assign a new address later */
pcibios_save_fw_addr(dev,
idx, r->start);
r->end -= r->start;
r->start = 0;
}
}
}
}
......@@ -356,6 +361,12 @@ static int __init pcibios_assign_resources(void)
return 0;
}
/**
* called in fs_initcall (one below subsys_initcall),
* give a chance for motherboard reserve resources
*/
fs_initcall(pcibios_assign_resources);
void pcibios_resource_survey_bus(struct pci_bus *bus)
{
dev_printk(KERN_DEBUG, &bus->dev, "Allocating resources\n");
......@@ -392,12 +403,6 @@ void __init pcibios_resource_survey(void)
ioapic_insert_resources();
}
/**
* called in fs_initcall (one below subsys_initcall),
* give a chance for motherboard reserve resources
*/
fs_initcall(pcibios_assign_resources);
static const struct vm_operations_struct pci_mmap_ops = {
.access = generic_access_phys,
};
......
......@@ -10,13 +10,13 @@
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
phys_addr_t pfn_base;
unsigned long pfn_base;
int size;
int flags;
unsigned long *bitmap;
};
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
void __iomem *mem_base = NULL;
......@@ -32,7 +32,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
mem_base = ioremap(bus_addr, size);
mem_base = ioremap(phys_addr, size);
if (!mem_base)
goto out;
......@@ -45,7 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dev->dma_mem->virt_base = mem_base;
dev->dma_mem->device_base = device_addr;
dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
dev->dma_mem->pfn_base = PFN_DOWN(phys_addr);
dev->dma_mem->size = pages;
dev->dma_mem->flags = flags;
......@@ -208,7 +208,7 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
*ret = -ENXIO;
if (off < count && user_count <= count - off) {
unsigned pfn = mem->pfn_base + start + off;
unsigned long pfn = mem->pfn_base + start + off;
*ret = remap_pfn_range(vma, vma->vm_start, pfn,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
......
......@@ -175,7 +175,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res)
/**
* dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
* @dev: Device to declare coherent memory for
* @bus_addr: Bus address of coherent memory to be declared
* @phys_addr: Physical address of coherent memory to be declared
* @device_addr: Device address of coherent memory to be declared
* @size: Size of coherent memory to be declared
* @flags: Flags
......@@ -185,7 +185,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res)
* RETURNS:
* 0 on success, -errno on failure.
*/
int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
void *res;
......@@ -195,7 +195,7 @@ int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
if (!res)
return -ENOMEM;
rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size,
rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
flags);
if (rc == 0)
devres_add(dev, res);
......
......@@ -1011,13 +1011,13 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
}
static struct iommu_ops exynos_iommu_ops = {
.domain_init = &exynos_iommu_domain_init,
.domain_destroy = &exynos_iommu_domain_destroy,
.attach_dev = &exynos_iommu_attach_device,
.detach_dev = &exynos_iommu_detach_device,
.map = &exynos_iommu_map,
.unmap = &exynos_iommu_unmap,
.iova_to_phys = &exynos_iommu_iova_to_phys,
.domain_init = exynos_iommu_domain_init,
.domain_destroy = exynos_iommu_domain_destroy,
.attach_dev = exynos_iommu_attach_device,
.detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map,
.unmap = exynos_iommu_unmap,
.iova_to_phys = exynos_iommu_iova_to_phys,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
};
......
......@@ -878,50 +878,6 @@ int pci_msi_vec_count(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_msi_vec_count);
/**
* pci_enable_msi_block - configure device's MSI capability structure
* @dev: device to configure
* @nvec: number of interrupts to configure
*
* Allocate IRQs for a device with the MSI capability.
* This function returns a negative errno if an error occurs. If it
* is unable to allocate the number of interrupts requested, it returns
* the number of interrupts it might be able to allocate. If it successfully
* allocates at least the number of interrupts requested, it returns 0 and
* updates the @dev's irq member to the lowest new interrupt number; the
* other interrupt numbers allocated to this device are consecutive.
*/
int pci_enable_msi_block(struct pci_dev *dev, int nvec)
{
int status, maxvec;
if (dev->current_state != PCI_D0)
return -EINVAL;
maxvec = pci_msi_vec_count(dev);
if (maxvec < 0)
return maxvec;
if (nvec > maxvec)
return maxvec;
status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI);
if (status)
return status;
WARN_ON(!!dev->msi_enabled);
/* Check whether driver already requested MSI-X irqs */
if (dev->msix_enabled) {
dev_info(&dev->dev, "can't enable MSI "
"(MSI-X already enabled)\n");
return -EINVAL;
}
status = msi_capability_init(dev, nvec);
return status;
}
EXPORT_SYMBOL(pci_enable_msi_block);
void pci_msi_shutdown(struct pci_dev *dev)
{
struct msi_desc *desc;
......@@ -1127,14 +1083,45 @@ void pci_msi_init_pci_dev(struct pci_dev *dev)
**/
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
{
int nvec = maxvec;
int nvec;
int rc;
if (dev->current_state != PCI_D0)
return -EINVAL;
WARN_ON(!!dev->msi_enabled);
/* Check whether driver already requested MSI-X irqs */
if (dev->msix_enabled) {
dev_info(&dev->dev,
"can't enable MSI (MSI-X already enabled)\n");
return -EINVAL;
}
if (maxvec < minvec)
return -ERANGE;
nvec = pci_msi_vec_count(dev);
if (nvec < 0)
return nvec;
else if (nvec < minvec)
return -EINVAL;
else if (nvec > maxvec)
nvec = maxvec;
do {
rc = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI);
if (rc < 0) {
return rc;
} else if (rc > 0) {
if (rc < minvec)
return -ENOSPC;
nvec = rc;
}
} while (rc);
do {
rc = pci_enable_msi_block(dev, nvec);
rc = msi_capability_init(dev, nvec);
if (rc < 0) {
return rc;
} else if (rc > 0) {
......
......@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
......@@ -416,6 +417,20 @@ static ssize_t d3cold_allowed_show(struct device *dev,
static DEVICE_ATTR_RW(d3cold_allowed);
#endif
#ifdef CONFIG_OF
static ssize_t devspec_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct device_node *np = pci_device_to_OF_node(pdev);
if (np == NULL || np->full_name == NULL)
return 0;
return sprintf(buf, "%s", np->full_name);
}
static DEVICE_ATTR_RO(devspec);
#endif
#ifdef CONFIG_PCI_IOV
static ssize_t sriov_totalvfs_show(struct device *dev,
struct device_attribute *attr,
......@@ -520,6 +535,9 @@ static struct attribute *pci_dev_attrs[] = {
&dev_attr_msi_bus.attr,
#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
&dev_attr_d3cold_allowed.attr,
#endif
#ifdef CONFIG_OF
&dev_attr_devspec.attr,
#endif
NULL,
};
......@@ -1255,11 +1273,6 @@ static struct bin_attribute pcie_config_attr = {
.write = pci_write_config,
};
int __weak pcibios_add_platform_entries(struct pci_dev *dev)
{
return 0;
}
static ssize_t reset_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
......@@ -1375,11 +1388,6 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
pdev->rom_attr = attr;
}
/* add platform-specific attributes */
retval = pcibios_add_platform_entries(pdev);
if (retval)
goto err_rom_file;
/* add sysfs entries for various capabilities */
retval = pci_create_capabilities_sysfs(pdev);
if (retval)
......
......@@ -171,9 +171,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int pos)
{
u32 l, sz, mask;
u64 l64, sz64, mask64;
u16 orig_cmd;
struct pci_bus_region region, inverted_region;
bool bar_too_big = false, bar_disabled = false;
bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
......@@ -226,9 +227,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
}
if (res->flags & IORESOURCE_MEM_64) {
u64 l64 = l;
u64 sz64 = sz;
u64 mask64 = mask | (u64)~0 << 32;
l64 = l;
sz64 = sz;
mask64 = mask | (u64)~0 << 32;
pci_read_config_dword(dev, pos + 4, &l);
pci_write_config_dword(dev, pos + 4, ~0);
......@@ -243,19 +244,22 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
if (!sz64)
goto fail;
if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) &&
sz64 > 0x100000000ULL) {
res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
res->start = 0;
res->end = 0;
bar_too_big = true;
goto fail;
goto out;
}
if ((sizeof(resource_size_t) < 8) && l) {
/* Address above 32-bit boundary; disable the BAR */
pci_write_config_dword(dev, pos, 0);
pci_write_config_dword(dev, pos + 4, 0);
if ((sizeof(dma_addr_t) < 8) && l) {
/* Above 32-bit boundary; try to reallocate */
res->flags |= IORESOURCE_UNSET;
region.start = 0;
region.end = sz64;
bar_disabled = true;
res->start = 0;
res->end = sz64;
bar_too_high = true;
goto out;
} else {
region.start = l64;
region.end = l64 + sz64;
......@@ -285,11 +289,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
* be claimed by the device.
*/
if (inverted_region.start != region.start) {
dev_info(&dev->dev, "reg 0x%x: initial BAR value %pa invalid; forcing reassignment\n",
pos, &region.start);
res->flags |= IORESOURCE_UNSET;
res->end -= res->start;
res->start = 0;
res->end = region.end - region.start;
bar_invalid = true;
}
goto out;
......@@ -303,8 +306,15 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
if (bar_too_big)
dev_err(&dev->dev, "reg 0x%x: can't handle 64-bit BAR\n", pos);
if (res->flags && !bar_disabled)
dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
pos, (unsigned long long) sz64);
if (bar_too_high)
dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4G (bus address %#010llx)\n",
pos, (unsigned long long) l64);
if (bar_invalid)
dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
pos, (unsigned long long) region.start);
if (res->flags)
dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
......@@ -465,7 +475,7 @@ void pci_read_bridge_bases(struct pci_bus *child)
if (dev->transparent) {
pci_bus_for_each_resource(child->parent, res, i) {
if (res) {
if (res && res->flags) {
pci_bus_add_resource(child, res,
PCI_SUBTRACTIVE_DECODE);
dev_printk(KERN_DEBUG, &dev->dev,
......
......@@ -2992,6 +2992,14 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
quirk_broken_intx_masking);
/*
* Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
* Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC
*
* RTL8110SC - Fails under PCI device assignment using DisINTx masking.
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
quirk_broken_intx_masking);
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
......
This diff is collapsed.
......@@ -208,21 +208,42 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
/* First, try exact prefetching match.. */
/*
* First, try exact prefetching match. Even if a 64-bit
* prefetchable bridge window is below 4GB, we can't put a 32-bit
* prefetchable resource in it because pbus_size_mem() assumes a
* 64-bit window will contain no 32-bit resources. If we assign
* things differently than they were sized, not everything will fit.
*/
ret = pci_bus_alloc_resource(bus, res, size, align, min,
IORESOURCE_PREFETCH,
IORESOURCE_PREFETCH | IORESOURCE_MEM_64,
pcibios_align_resource, dev);
if (ret == 0)
return 0;
if (ret < 0 && (res->flags & IORESOURCE_PREFETCH)) {
/*
* That failed.
*
* But a prefetching area can handle a non-prefetching
* window (it will just not perform as well).
*/
ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
/*
* If the prefetchable window is only 32 bits wide, we can put
* 64-bit prefetchable resources in it.
*/
if ((res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) ==
(IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) {
ret = pci_bus_alloc_resource(bus, res, size, align, min,
IORESOURCE_PREFETCH,
pcibios_align_resource, dev);
if (ret == 0)
return 0;
}
/*
* If we didn't find a better match, we can put any memory resource
* in a non-prefetchable window. If this resource is 32 bits and
* non-prefetchable, the first call already tried the only possibility
* so we don't need to try again.
*/
if (res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64))
ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
pcibios_align_resource, dev);
return ret;
}
......
......@@ -16,16 +16,13 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
* Standard interface
*/
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
extern int
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int flags);
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags);
extern void
dma_release_declared_memory(struct device *dev);
void dma_release_declared_memory(struct device *dev);
extern void *
dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size);
void *dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size);
#else
#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
#define dma_release_from_coherent(dev, order, vaddr) (0)
......
......@@ -8,6 +8,12 @@
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform.
* It can be given to a device to use as a DMA source or target. A CPU cannot
* reference a dma_addr_t directly because there may be translation between
* its physical address space and the bus address space.
*/
struct dma_map_ops {
void* (*alloc)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
......@@ -186,7 +192,7 @@ static inline int dma_get_cache_alignment(void)
#ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
static inline int
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
return 0;
......@@ -217,13 +223,14 @@ extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
extern int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
extern int dmam_declare_coherent_memory(struct device *dev,
phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size,
int flags);
extern void dmam_release_declared_memory(struct device *dev);
#else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
static inline int dmam_declare_coherent_memory(struct device *dev,
dma_addr_t bus_addr, dma_addr_t device_addr,
phys_addr_t phys_addr, dma_addr_t device_addr,
size_t size, gfp_t gfp)
{
return 0;
......
......@@ -1158,7 +1158,6 @@ struct msix_entry {
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);
int pci_enable_msi_block(struct pci_dev *dev, int nvec);
void pci_msi_shutdown(struct pci_dev *dev);
void pci_disable_msi(struct pci_dev *dev);
int pci_msix_vec_count(struct pci_dev *dev);
......@@ -1188,8 +1187,6 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
}
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
static inline int pci_enable_msi_block(struct pci_dev *dev, int nvec)
{ return -ENOSYS; }
static inline void pci_msi_shutdown(struct pci_dev *dev) { }
static inline void pci_disable_msi(struct pci_dev *dev) { }
static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
......@@ -1244,7 +1241,7 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
static inline void pcie_ecrc_get_policy(char *str) { }
#endif
#define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1)
#define pci_enable_msi(pdev) pci_enable_msi_exact(pdev, 1)
#ifdef CONFIG_HT_IRQ
/* The functions a driver should call */
......@@ -1572,7 +1569,6 @@ extern unsigned long pci_hotplug_io_size;
extern unsigned long pci_hotplug_mem_size;
/* Architecture-specific versions may override these (weak) */
int pcibios_add_platform_entries(struct pci_dev *dev);
void pcibios_disable_device(struct pci_dev *dev);
void pcibios_set_master(struct pci_dev *dev);
int pcibios_set_pcie_reset_state(struct pci_dev *dev,
......
......@@ -142,6 +142,7 @@ typedef unsigned long blkcnt_t;
#define pgoff_t unsigned long
#endif
/* A dma_addr_t can hold any valid DMA or bus address for the platform */
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
typedef u64 dma_addr_t;
#else
......
......@@ -1288,13 +1288,10 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
if (p->flags & IORESOURCE_BUSY)
continue;
printk(KERN_WARNING "resource map sanity check conflict: "
"0x%llx 0x%llx 0x%llx 0x%llx %s\n",
printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n",
(unsigned long long)addr,
(unsigned long long)(addr + size - 1),
(unsigned long long)p->start,
(unsigned long long)p->end,
p->name);
p->name, p);
err = -1;
break;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment