Commit 0e59e7e7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next-rebase' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci

* 'next-rebase' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci:
  PCI: Clean-up MPS debug output
  pci: Clamp pcie_set_readrq() when using "performance" settings
  PCI: enable MPS "performance" setting to properly handle bridge MPS
  PCI: Workaround for Intel MPS errata
  PCI: Add support for PASID capability
  PCI: Add implementation for PRI capability
  PCI: Export ATS functions to modules
  PCI: Move ATS implementation into own file
  PCI / PM: Remove unnecessary error variable from acpi_dev_run_wake()
  PCI hotplug: acpiphp: Prevent deadlock on PCI-to-PCI bridge remove
  PCI / PM: Extend PME polling to all PCI devices
  PCI quirk: mmc: Always check for lower base frequency quirk for Ricoh 1180:e823
  PCI: Make pci_setup_bridge() non-static for use by arch code
  x86: constify PCI raw ops structures
  PCI: Add quirk for known incorrect MPSS
  PCI: Add Solarflare vendor ID and SFC4000 device IDs
parents 46b51ea2 a513a99a
...@@ -99,10 +99,10 @@ struct pci_raw_ops { ...@@ -99,10 +99,10 @@ struct pci_raw_ops {
int reg, int len, u32 val); int reg, int len, u32 val);
}; };
extern struct pci_raw_ops *raw_pci_ops; extern const struct pci_raw_ops *raw_pci_ops;
extern struct pci_raw_ops *raw_pci_ext_ops; extern const struct pci_raw_ops *raw_pci_ext_ops;
extern struct pci_raw_ops pci_direct_conf1; extern const struct pci_raw_ops pci_direct_conf1;
extern bool port_cf9_safe; extern bool port_cf9_safe;
/* arch_initcall level */ /* arch_initcall level */
......
...@@ -304,7 +304,7 @@ static int ce4100_conf_write(unsigned int seg, unsigned int bus, ...@@ -304,7 +304,7 @@ static int ce4100_conf_write(unsigned int seg, unsigned int bus,
return pci_direct_conf1.write(seg, bus, devfn, reg, len, value); return pci_direct_conf1.write(seg, bus, devfn, reg, len, value);
} }
struct pci_raw_ops ce4100_pci_conf = { static const struct pci_raw_ops ce4100_pci_conf = {
.read = ce4100_conf_read, .read = ce4100_conf_read,
.write = ce4100_conf_write, .write = ce4100_conf_write,
}; };
......
...@@ -33,8 +33,8 @@ int noioapicreroute = 1; ...@@ -33,8 +33,8 @@ int noioapicreroute = 1;
int pcibios_last_bus = -1; int pcibios_last_bus = -1;
unsigned long pirq_table_addr; unsigned long pirq_table_addr;
struct pci_bus *pci_root_bus; struct pci_bus *pci_root_bus;
struct pci_raw_ops *raw_pci_ops; const struct pci_raw_ops *__read_mostly raw_pci_ops;
struct pci_raw_ops *raw_pci_ext_ops; const struct pci_raw_ops *__read_mostly raw_pci_ext_ops;
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val) int reg, int len, u32 *val)
......
...@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus, ...@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
#undef PCI_CONF1_ADDRESS #undef PCI_CONF1_ADDRESS
struct pci_raw_ops pci_direct_conf1 = { const struct pci_raw_ops pci_direct_conf1 = {
.read = pci_conf1_read, .read = pci_conf1_read,
.write = pci_conf1_write, .write = pci_conf1_write,
}; };
...@@ -175,7 +175,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus, ...@@ -175,7 +175,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
#undef PCI_CONF2_ADDRESS #undef PCI_CONF2_ADDRESS
struct pci_raw_ops pci_direct_conf2 = { static const struct pci_raw_ops pci_direct_conf2 = {
.read = pci_conf2_read, .read = pci_conf2_read,
.write = pci_conf2_write, .write = pci_conf2_write,
}; };
...@@ -191,7 +191,7 @@ struct pci_raw_ops pci_direct_conf2 = { ...@@ -191,7 +191,7 @@ struct pci_raw_ops pci_direct_conf2 = {
* This should be close to trivial, but it isn't, because there are buggy * This should be close to trivial, but it isn't, because there are buggy
* chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
*/ */
static int __init pci_sanity_check(struct pci_raw_ops *o) static int __init pci_sanity_check(const struct pci_raw_ops *o)
{ {
u32 x = 0; u32 x = 0;
int year, devfn; int year, devfn;
......
...@@ -117,7 +117,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus, ...@@ -117,7 +117,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
return 0; return 0;
} }
static struct pci_raw_ops pci_mmcfg = { static const struct pci_raw_ops pci_mmcfg = {
.read = pci_mmcfg_read, .read = pci_mmcfg_read,
.write = pci_mmcfg_write, .write = pci_mmcfg_write,
}; };
......
...@@ -81,7 +81,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus, ...@@ -81,7 +81,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
return 0; return 0;
} }
static struct pci_raw_ops pci_mmcfg = { static const struct pci_raw_ops pci_mmcfg = {
.read = pci_mmcfg_read, .read = pci_mmcfg_read,
.write = pci_mmcfg_write, .write = pci_mmcfg_write,
}; };
......
...@@ -110,7 +110,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus, ...@@ -110,7 +110,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
#undef PCI_CONF1_MQ_ADDRESS #undef PCI_CONF1_MQ_ADDRESS
static struct pci_raw_ops pci_direct_conf1_mq = { static const struct pci_raw_ops pci_direct_conf1_mq = {
.read = pci_conf1_mq_read, .read = pci_conf1_mq_read,
.write = pci_conf1_mq_write .write = pci_conf1_mq_write
}; };
......
...@@ -301,7 +301,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus, ...@@ -301,7 +301,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
return 0; return 0;
} }
static struct pci_raw_ops pci_olpc_conf = { static const struct pci_raw_ops pci_olpc_conf = {
.read = pci_olpc_read, .read = pci_olpc_read,
.write = pci_olpc_write, .write = pci_olpc_write,
}; };
......
...@@ -303,7 +303,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus, ...@@ -303,7 +303,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
* Function table for BIOS32 access * Function table for BIOS32 access
*/ */
static struct pci_raw_ops pci_bios_access = { static const struct pci_raw_ops pci_bios_access = {
.read = pci_bios_read, .read = pci_bios_read,
.write = pci_bios_write .write = pci_bios_write
}; };
...@@ -312,7 +312,7 @@ static struct pci_raw_ops pci_bios_access = { ...@@ -312,7 +312,7 @@ static struct pci_raw_ops pci_bios_access = {
* Try to find PCI BIOS. * Try to find PCI BIOS.
*/ */
static struct pci_raw_ops * __devinit pci_find_bios(void) static const struct pci_raw_ops * __devinit pci_find_bios(void)
{ {
union bios32 *check; union bios32 *check;
unsigned char sum; unsigned char sum;
......
...@@ -80,7 +80,8 @@ static acpi_osd_handler acpi_irq_handler; ...@@ -80,7 +80,8 @@ static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context; static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq; static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq; static struct workqueue_struct *kacpi_notify_wq;
static struct workqueue_struct *kacpi_hotplug_wq; struct workqueue_struct *kacpi_hotplug_wq;
EXPORT_SYMBOL(kacpi_hotplug_wq);
struct acpi_res_list { struct acpi_res_list {
resource_size_t start; resource_size_t start;
......
...@@ -2229,13 +2229,15 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) ...@@ -2229,13 +2229,15 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/* PCI device ID table */ /* PCI device ID table */
static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
{PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
.driver_data = (unsigned long) &falcon_a1_nic_type}, .driver_data = (unsigned long) &falcon_a1_nic_type},
{PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
.driver_data = (unsigned long) &falcon_b0_nic_type}, .driver_data = (unsigned long) &falcon_b0_nic_type},
{PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID), {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID),
.driver_data = (unsigned long) &siena_a0_nic_type}, .driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID), {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID),
.driver_data = (unsigned long) &siena_a0_nic_type}, .driver_data = (unsigned long) &siena_a0_nic_type},
{0} /* end of list */ {0} /* end of list */
}; };
......
...@@ -15,10 +15,6 @@ ...@@ -15,10 +15,6 @@
#include "filter.h" #include "filter.h"
/* PCI IDs */ /* PCI IDs */
#define EFX_VENDID_SFC 0x1924
#define FALCON_A_P_DEVID 0x0703
#define FALCON_A_S_DEVID 0x6703
#define FALCON_B_P_DEVID 0x0710
#define BETHPAGE_A_P_DEVID 0x0803 #define BETHPAGE_A_P_DEVID 0x0803
#define SIENA_A_P_DEVID 0x0813 #define SIENA_A_P_DEVID 0x0813
......
...@@ -1426,7 +1426,8 @@ static int falcon_probe_nic(struct efx_nic *efx) ...@@ -1426,7 +1426,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
} }
dev = pci_dev_get(efx->pci_dev); dev = pci_dev_get(efx->pci_dev);
while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID, while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
dev))) { dev))) {
if (dev->bus == efx->pci_dev->bus && if (dev->bus == efx->pci_dev->bus &&
dev->devfn == efx->pci_dev->devfn + 1) { dev->devfn == efx->pci_dev->devfn + 1) {
......
...@@ -764,7 +764,8 @@ int falcon_probe_board(struct efx_nic *efx, u16 revision_info) ...@@ -764,7 +764,8 @@ int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
if (board->type) { if (board->type) {
netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n", netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n",
(efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) (efx->pci_dev->subsystem_vendor ==
PCI_VENDOR_ID_SOLARFLARE)
? board->type->ref_model : board->type->gen_type, ? board->type->ref_model : board->type->gen_type,
'A' + board->major, board->minor); 'A' + board->major, board->minor);
return 0; return 0;
......
...@@ -71,9 +71,13 @@ config HT_IRQ ...@@ -71,9 +71,13 @@ config HT_IRQ
If unsure say Y. If unsure say Y.
config PCI_ATS
bool
config PCI_IOV config PCI_IOV
bool "PCI IOV support" bool "PCI IOV support"
depends on PCI depends on PCI
select PCI_ATS
help help
I/O Virtualization is a PCI feature supported by some devices I/O Virtualization is a PCI feature supported by some devices
which allows them to create virtual devices which share their which allows them to create virtual devices which share their
...@@ -81,6 +85,28 @@ config PCI_IOV ...@@ -81,6 +85,28 @@ config PCI_IOV
If unsure, say N. If unsure, say N.
config PCI_PRI
bool "PCI PRI support"
select PCI_ATS
help
PRI is the PCI Page Request Interface. It allows PCI devices that are
behind an IOMMU to recover from page faults.
If unsure, say N.
config PCI_PASID
bool "PCI PASID support"
depends on PCI
select PCI_ATS
help
Process Address Space Identifiers (PASIDs) can be used by PCI devices
to access more than one IO address space at the same time. To make
use of this feature an IOMMU is required which also supports PASIDs.
Select this option if you have such an IOMMU and want to compile the
driver for it into your kernel.
If unsure, say N.
config PCI_IOAPIC config PCI_IOAPIC
bool bool
depends on PCI depends on PCI
......
...@@ -29,6 +29,7 @@ obj-$(CONFIG_PCI_MSI) += msi.o ...@@ -29,6 +29,7 @@ obj-$(CONFIG_PCI_MSI) += msi.o
# Build the Hypertransport interrupt support # Build the Hypertransport interrupt support
obj-$(CONFIG_HT_IRQ) += htirq.o obj-$(CONFIG_HT_IRQ) += htirq.o
obj-$(CONFIG_PCI_ATS) += ats.o
obj-$(CONFIG_PCI_IOV) += iov.o obj-$(CONFIG_PCI_IOV) += iov.o
# #
......
This diff is collapsed.
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include <linux/pci-acpi.h> #include <linux/pci-acpi.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/acpi.h>
#include "../pci.h" #include "../pci.h"
#include "acpiphp.h" #include "acpiphp.h"
...@@ -1149,15 +1150,35 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) ...@@ -1149,15 +1150,35 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK ; return AE_OK ;
} }
/** struct acpiphp_hp_work {
* handle_hotplug_event_bridge - handle ACPI event on bridges struct work_struct work;
* @handle: Notify()'ed acpi_handle acpi_handle handle;
* @type: Notify code u32 type;
* @context: pointer to acpiphp_bridge structure void *context;
* };
* Handles ACPI event notification on {host,p2p} bridges.
*/ static void alloc_acpiphp_hp_work(acpi_handle handle, u32 type,
static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *context) void *context,
void (*func)(struct work_struct *work))
{
struct acpiphp_hp_work *hp_work;
int ret;
hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
if (!hp_work)
return;
hp_work->handle = handle;
hp_work->type = type;
hp_work->context = context;
INIT_WORK(&hp_work->work, func);
ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
if (!ret)
kfree(hp_work);
}
static void _handle_hotplug_event_bridge(struct work_struct *work)
{ {
struct acpiphp_bridge *bridge; struct acpiphp_bridge *bridge;
char objname[64]; char objname[64];
...@@ -1165,11 +1186,18 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont ...@@ -1165,11 +1186,18 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont
.pointer = objname }; .pointer = objname };
struct acpi_device *device; struct acpi_device *device;
int num_sub_bridges = 0; int num_sub_bridges = 0;
struct acpiphp_hp_work *hp_work;
acpi_handle handle;
u32 type;
hp_work = container_of(work, struct acpiphp_hp_work, work);
handle = hp_work->handle;
type = hp_work->type;
if (acpi_bus_get_device(handle, &device)) { if (acpi_bus_get_device(handle, &device)) {
/* This bridge must have just been physically inserted */ /* This bridge must have just been physically inserted */
handle_bridge_insertion(handle, type); handle_bridge_insertion(handle, type);
return; goto out;
} }
bridge = acpiphp_handle_to_bridge(handle); bridge = acpiphp_handle_to_bridge(handle);
...@@ -1180,7 +1208,7 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont ...@@ -1180,7 +1208,7 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont
if (!bridge && !num_sub_bridges) { if (!bridge && !num_sub_bridges) {
err("cannot get bridge info\n"); err("cannot get bridge info\n");
return; goto out;
} }
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
...@@ -1241,22 +1269,49 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont ...@@ -1241,22 +1269,49 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont
warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
break; break;
} }
out:
kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
} }
/** /**
* handle_hotplug_event_func - handle ACPI event on functions (i.e. slots) * handle_hotplug_event_bridge - handle ACPI event on bridges
* @handle: Notify()'ed acpi_handle * @handle: Notify()'ed acpi_handle
* @type: Notify code * @type: Notify code
* @context: pointer to acpiphp_func structure * @context: pointer to acpiphp_bridge structure
* *
* Handles ACPI event notification on slots. * Handles ACPI event notification on {host,p2p} bridges.
*/
static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
void *context)
{
/*
* Currently the code adds all hotplug events to the kacpid_wq
* queue when it should add hotplug events to the kacpi_hotplug_wq.
* The proper way to fix this is to reorganize the code so that
* drivers (dock, etc.) do not call acpi_os_execute(), etc.
* For now just re-add this work to the kacpi_hotplug_wq so we
* don't deadlock on hotplug actions.
*/ */
static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context) alloc_acpiphp_hp_work(handle, type, context,
_handle_hotplug_event_bridge);
}
static void _handle_hotplug_event_func(struct work_struct *work)
{ {
struct acpiphp_func *func; struct acpiphp_func *func;
char objname[64]; char objname[64];
struct acpi_buffer buffer = { .length = sizeof(objname), struct acpi_buffer buffer = { .length = sizeof(objname),
.pointer = objname }; .pointer = objname };
struct acpiphp_hp_work *hp_work;
acpi_handle handle;
u32 type;
void *context;
hp_work = container_of(work, struct acpiphp_hp_work, work);
handle = hp_work->handle;
type = hp_work->type;
context = hp_work->context;
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
...@@ -1291,8 +1346,32 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *contex ...@@ -1291,8 +1346,32 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *contex
warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
break; break;
} }
kfree(hp_work); /* allocated in handle_hotplug_event_func */
} }
/**
* handle_hotplug_event_func - handle ACPI event on functions (i.e. slots)
* @handle: Notify()'ed acpi_handle
* @type: Notify code
* @context: pointer to acpiphp_func structure
*
* Handles ACPI event notification on slots.
*/
static void handle_hotplug_event_func(acpi_handle handle, u32 type,
void *context)
{
/*
* Currently the code adds all hotplug events to the kacpid_wq
* queue when it should add hotplug events to the kacpi_hotplug_wq.
* The proper way to fix this is to reorganize the code so that
* drivers (dock, etc.) do not call acpi_os_execute(), etc.
* For now just re-add this work to the kacpi_hotplug_wq so we
* don't deadlock on hotplug actions.
*/
alloc_acpiphp_hp_work(handle, type, context,
_handle_hotplug_event_func);
}
static acpi_status static acpi_status
find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
......
...@@ -722,145 +722,3 @@ int pci_num_vf(struct pci_dev *dev) ...@@ -722,145 +722,3 @@ int pci_num_vf(struct pci_dev *dev)
return dev->sriov->nr_virtfn; return dev->sriov->nr_virtfn;
} }
EXPORT_SYMBOL_GPL(pci_num_vf); EXPORT_SYMBOL_GPL(pci_num_vf);
static int ats_alloc_one(struct pci_dev *dev, int ps)
{
int pos;
u16 cap;
struct pci_ats *ats;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
if (!pos)
return -ENODEV;
ats = kzalloc(sizeof(*ats), GFP_KERNEL);
if (!ats)
return -ENOMEM;
ats->pos = pos;
ats->stu = ps;
pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
PCI_ATS_MAX_QDEP;
dev->ats = ats;
return 0;
}
static void ats_free_one(struct pci_dev *dev)
{
kfree(dev->ats);
dev->ats = NULL;
}
/**
* pci_enable_ats - enable the ATS capability
* @dev: the PCI device
* @ps: the IOMMU page shift
*
* Returns 0 on success, or negative on failure.
*/
int pci_enable_ats(struct pci_dev *dev, int ps)
{
int rc;
u16 ctrl;
BUG_ON(dev->ats && dev->ats->is_enabled);
if (ps < PCI_ATS_MIN_STU)
return -EINVAL;
if (dev->is_physfn || dev->is_virtfn) {
struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
mutex_lock(&pdev->sriov->lock);
if (pdev->ats)
rc = pdev->ats->stu == ps ? 0 : -EINVAL;
else
rc = ats_alloc_one(pdev, ps);
if (!rc)
pdev->ats->ref_cnt++;
mutex_unlock(&pdev->sriov->lock);
if (rc)
return rc;
}
if (!dev->is_physfn) {
rc = ats_alloc_one(dev, ps);
if (rc)
return rc;
}
ctrl = PCI_ATS_CTRL_ENABLE;
if (!dev->is_virtfn)
ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
dev->ats->is_enabled = 1;
return 0;
}
/**
* pci_disable_ats - disable the ATS capability
* @dev: the PCI device
*/
void pci_disable_ats(struct pci_dev *dev)
{
u16 ctrl;
BUG_ON(!dev->ats || !dev->ats->is_enabled);
pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
ctrl &= ~PCI_ATS_CTRL_ENABLE;
pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
dev->ats->is_enabled = 0;
if (dev->is_physfn || dev->is_virtfn) {
struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
mutex_lock(&pdev->sriov->lock);
pdev->ats->ref_cnt--;
if (!pdev->ats->ref_cnt)
ats_free_one(pdev);
mutex_unlock(&pdev->sriov->lock);
}
if (!dev->is_physfn)
ats_free_one(dev);
}
/**
* pci_ats_queue_depth - query the ATS Invalidate Queue Depth
* @dev: the PCI device
*
* Returns the queue depth on success, or negative on failure.
*
* The ATS spec uses 0 in the Invalidate Queue Depth field to
* indicate that the function can accept 32 Invalidate Request.
* But here we use the `real' values (i.e. 1~32) for the Queue
* Depth; and 0 indicates the function shares the Queue with
* other functions (doesn't exclusively own a Queue).
*/
int pci_ats_queue_depth(struct pci_dev *dev)
{
int pos;
u16 cap;
if (dev->is_virtfn)
return 0;
if (dev->ats)
return dev->ats->qdep;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
if (!pos)
return -ENODEV;
pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
PCI_ATS_MAX_QDEP;
}
...@@ -46,6 +46,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) ...@@ -46,6 +46,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
struct pci_dev *pci_dev = context; struct pci_dev *pci_dev = context;
if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) {
if (pci_dev->pme_poll)
pci_dev->pme_poll = false;
pci_wakeup_event(pci_dev); pci_wakeup_event(pci_dev);
pci_check_pme_status(pci_dev); pci_check_pme_status(pci_dev);
pm_runtime_resume(&pci_dev->dev); pm_runtime_resume(&pci_dev->dev);
...@@ -282,7 +285,6 @@ static int acpi_dev_run_wake(struct device *phys_dev, bool enable) ...@@ -282,7 +285,6 @@ static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
{ {
struct acpi_device *dev; struct acpi_device *dev;
acpi_handle handle; acpi_handle handle;
int error = -ENODEV;
if (!device_run_wake(phys_dev)) if (!device_run_wake(phys_dev))
return -EINVAL; return -EINVAL;
...@@ -302,7 +304,7 @@ static int acpi_dev_run_wake(struct device *phys_dev, bool enable) ...@@ -302,7 +304,7 @@ static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
acpi_disable_wakeup_device_power(dev); acpi_disable_wakeup_device_power(dev);
} }
return error; return 0;
} }
static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
......
...@@ -1407,13 +1407,16 @@ bool pci_check_pme_status(struct pci_dev *dev) ...@@ -1407,13 +1407,16 @@ bool pci_check_pme_status(struct pci_dev *dev)
/** /**
* pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
* @dev: Device to handle. * @dev: Device to handle.
* @ign: Ignored. * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
* *
* Check if @dev has generated PME and queue a resume request for it in that * Check if @dev has generated PME and queue a resume request for it in that
* case. * case.
*/ */
static int pci_pme_wakeup(struct pci_dev *dev, void *ign) static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
{ {
if (pme_poll_reset && dev->pme_poll)
dev->pme_poll = false;
if (pci_check_pme_status(dev)) { if (pci_check_pme_status(dev)) {
pci_wakeup_event(dev); pci_wakeup_event(dev);
pm_request_resume(&dev->dev); pm_request_resume(&dev->dev);
...@@ -1428,7 +1431,7 @@ static int pci_pme_wakeup(struct pci_dev *dev, void *ign) ...@@ -1428,7 +1431,7 @@ static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
void pci_pme_wakeup_bus(struct pci_bus *bus) void pci_pme_wakeup_bus(struct pci_bus *bus)
{ {
if (bus) if (bus)
pci_walk_bus(bus, pci_pme_wakeup, NULL); pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
} }
/** /**
...@@ -1446,30 +1449,25 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) ...@@ -1446,30 +1449,25 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
static void pci_pme_list_scan(struct work_struct *work) static void pci_pme_list_scan(struct work_struct *work)
{ {
struct pci_pme_device *pme_dev; struct pci_pme_device *pme_dev, *n;
mutex_lock(&pci_pme_list_mutex); mutex_lock(&pci_pme_list_mutex);
if (!list_empty(&pci_pme_list)) { if (!list_empty(&pci_pme_list)) {
list_for_each_entry(pme_dev, &pci_pme_list, list) list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
if (pme_dev->dev->pme_poll) {
pci_pme_wakeup(pme_dev->dev, NULL); pci_pme_wakeup(pme_dev->dev, NULL);
schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT)); } else {
list_del(&pme_dev->list);
kfree(pme_dev);
}
}
if (!list_empty(&pci_pme_list))
schedule_delayed_work(&pci_pme_work,
msecs_to_jiffies(PME_TIMEOUT));
} }
mutex_unlock(&pci_pme_list_mutex); mutex_unlock(&pci_pme_list_mutex);
} }
/**
* pci_external_pme - is a device an external PCI PME source?
* @dev: PCI device to check
*
*/
static bool pci_external_pme(struct pci_dev *dev)
{
if (pci_is_pcie(dev) || dev->bus->number == 0)
return false;
return true;
}
/** /**
* pci_pme_active - enable or disable PCI device's PME# function * pci_pme_active - enable or disable PCI device's PME# function
* @dev: PCI device to handle. * @dev: PCI device to handle.
...@@ -1503,7 +1501,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable) ...@@ -1503,7 +1501,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
hit, and the power savings from the devices will still be a hit, and the power savings from the devices will still be a
win. */ win. */
if (pci_external_pme(dev)) { if (dev->pme_poll) {
struct pci_pme_device *pme_dev; struct pci_pme_device *pme_dev;
if (enable) { if (enable) {
pme_dev = kmalloc(sizeof(struct pci_pme_device), pme_dev = kmalloc(sizeof(struct pci_pme_device),
...@@ -1821,6 +1819,7 @@ void pci_pm_init(struct pci_dev *dev) ...@@ -1821,6 +1819,7 @@ void pci_pm_init(struct pci_dev *dev)
(pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
(pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
dev->pme_poll = true;
/* /*
* Make device's PM flags reflect the wake-up capability, but * Make device's PM flags reflect the wake-up capability, but
* let the user space enable it to wake up the system as needed. * let the user space enable it to wake up the system as needed.
...@@ -3203,8 +3202,6 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) ...@@ -3203,8 +3202,6 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
goto out; goto out;
v = (ffs(rq) - 8) << 12;
cap = pci_pcie_cap(dev); cap = pci_pcie_cap(dev);
if (!cap) if (!cap)
goto out; goto out;
...@@ -3212,6 +3209,22 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) ...@@ -3212,6 +3209,22 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
if (err) if (err)
goto out; goto out;
/*
* If using the "performance" PCIe config, we clamp the
* read rq size to the max packet size to prevent the
* host bridge generating requests larger than we can
* cope with
*/
if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
int mps = pcie_get_mps(dev);
if (mps < 0)
return mps;
if (mps < rq)
rq = mps;
}
v = (ffs(rq) - 8) << 12;
if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
ctl &= ~PCI_EXP_DEVCTL_READRQ; ctl &= ~PCI_EXP_DEVCTL_READRQ;
......
...@@ -84,6 +84,9 @@ static bool pcie_pme_walk_bus(struct pci_bus *bus) ...@@ -84,6 +84,9 @@ static bool pcie_pme_walk_bus(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) { list_for_each_entry(dev, &bus->devices, bus_list) {
/* Skip PCIe devices in case we started from a root port. */ /* Skip PCIe devices in case we started from a root port. */
if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
if (dev->pme_poll)
dev->pme_poll = false;
pci_wakeup_event(dev); pci_wakeup_event(dev);
pm_request_resume(&dev->dev); pm_request_resume(&dev->dev);
ret = true; ret = true;
...@@ -142,6 +145,9 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) ...@@ -142,6 +145,9 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
/* First, check if the PME is from the root port itself. */ /* First, check if the PME is from the root port itself. */
if (port->devfn == devfn && port->bus->number == busnr) { if (port->devfn == devfn && port->bus->number == busnr) {
if (port->pme_poll)
port->pme_poll = false;
if (pci_check_pme_status(port)) { if (pci_check_pme_status(port)) {
pm_request_resume(&port->dev); pm_request_resume(&port->dev);
found = true; found = true;
...@@ -187,6 +193,9 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) ...@@ -187,6 +193,9 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
/* The device is there, but we have to check its PME status. */ /* The device is there, but we have to check its PME status. */
found = pci_check_pme_status(dev); found = pci_check_pme_status(dev);
if (found) { if (found) {
if (dev->pme_poll)
dev->pme_poll = false;
pci_wakeup_event(dev); pci_wakeup_event(dev);
pm_request_resume(&dev->dev); pm_request_resume(&dev->dev);
} }
......
...@@ -1363,31 +1363,25 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data) ...@@ -1363,31 +1363,25 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data)
static void pcie_write_mps(struct pci_dev *dev, int mps) static void pcie_write_mps(struct pci_dev *dev, int mps)
{ {
int rc, dev_mpss; int rc;
dev_mpss = 128 << dev->pcie_mpss;
if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
if (dev->bus->self) { mps = 128 << dev->pcie_mpss;
dev_dbg(&dev->bus->dev, "Bus MPSS %d\n",
128 << dev->bus->self->pcie_mpss);
/* For "MPS Force Max", the assumption is made that if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self)
/* For "Performance", the assumption is made that
* downstream communication will never be larger than * downstream communication will never be larger than
* the MRRS. So, the MPS only needs to be configured * the MRRS. So, the MPS only needs to be configured
* for the upstream communication. This being the case, * for the upstream communication. This being the case,
* walk from the top down and set the MPS of the child * walk from the top down and set the MPS of the child
* to that of the parent bus. * to that of the parent bus.
*
* Configure the device MPS with the smaller of the
* device MPSS or the bridge MPS (which is assumed to be
* properly configured at this point to the largest
* allowable MPS based on its parent bus).
*/ */
mps = 128 << dev->bus->self->pcie_mpss; mps = min(mps, pcie_get_mps(dev->bus->self));
if (mps > dev_mpss)
dev_warn(&dev->dev, "MPS configured higher than"
" maximum supported by the device. If"
" a bus issue occurs, try running with"
" pci=pcie_bus_safe.\n");
}
dev->pcie_mpss = ffs(mps) - 8;
} }
rc = pcie_set_mps(dev, mps); rc = pcie_set_mps(dev, mps);
...@@ -1395,25 +1389,22 @@ static void pcie_write_mps(struct pci_dev *dev, int mps) ...@@ -1395,25 +1389,22 @@ static void pcie_write_mps(struct pci_dev *dev, int mps)
dev_err(&dev->dev, "Failed attempting to set the MPS\n"); dev_err(&dev->dev, "Failed attempting to set the MPS\n");
} }
static void pcie_write_mrrs(struct pci_dev *dev, int mps) static void pcie_write_mrrs(struct pci_dev *dev)
{ {
int rc, mrrs, dev_mpss; int rc, mrrs;
/* In the "safe" case, do not configure the MRRS. There appear to be /* In the "safe" case, do not configure the MRRS. There appear to be
* issues with setting MRRS to 0 on a number of devices. * issues with setting MRRS to 0 on a number of devices.
*/ */
if (pcie_bus_config != PCIE_BUS_PERFORMANCE) if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
return; return;
dev_mpss = 128 << dev->pcie_mpss;
/* For Max performance, the MRRS must be set to the largest supported /* For Max performance, the MRRS must be set to the largest supported
* value. However, it cannot be configured larger than the MPS the * value. However, it cannot be configured larger than the MPS the
* device or the bus can support. This assumes that the largest MRRS * device or the bus can support. This should already be properly
* available on the device cannot be smaller than the device MPSS. * configured by a prior call to pcie_write_mps.
*/ */
mrrs = min(mps, dev_mpss); mrrs = pcie_get_mps(dev);
/* MRRS is a R/W register. Invalid values can be written, but a /* MRRS is a R/W register. Invalid values can be written, but a
* subsequent read will verify if the value is acceptable or not. * subsequent read will verify if the value is acceptable or not.
...@@ -1421,38 +1412,41 @@ static void pcie_write_mrrs(struct pci_dev *dev, int mps) ...@@ -1421,38 +1412,41 @@ static void pcie_write_mrrs(struct pci_dev *dev, int mps)
* shrink the value until it is acceptable to the HW. * shrink the value until it is acceptable to the HW.
*/ */
while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value"
" to %d. If any issues are encountered, please try "
"running with pci=pcie_bus_safe\n", mrrs);
rc = pcie_set_readrq(dev, mrrs); rc = pcie_set_readrq(dev, mrrs);
if (rc) if (!rc)
dev_err(&dev->dev, break;
"Failed attempting to set the MRRS\n");
dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
mrrs /= 2; mrrs /= 2;
} }
if (mrrs < 128)
dev_err(&dev->dev, "MRRS was unable to be configured with a "
"safe value. If problems are experienced, try running "
"with pci=pcie_bus_safe.\n");
} }
static int pcie_bus_configure_set(struct pci_dev *dev, void *data) static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
{ {
int mps = 128 << *(u8 *)data; int mps, orig_mps;
if (!pci_is_pcie(dev)) if (!pci_is_pcie(dev))
return 0; return 0;
dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", mps = 128 << *(u8 *)data;
pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); orig_mps = pcie_get_mps(dev);
pcie_write_mps(dev, mps); pcie_write_mps(dev, mps);
pcie_write_mrrs(dev, mps); pcie_write_mrrs(dev);
dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), "
pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
orig_mps, pcie_get_readrq(dev));
return 0; return 0;
} }
/* pcie_bus_configure_mps requires that pci_walk_bus work in a top-down, /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
* parents then children fashion. If this changes, then this code will not * parents then children fashion. If this changes, then this code will not
* work as designed. * work as designed.
*/ */
......
...@@ -2745,20 +2745,6 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) ...@@ -2745,20 +2745,6 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
/* disable must be done via function #0 */ /* disable must be done via function #0 */
if (PCI_FUNC(dev->devfn)) if (PCI_FUNC(dev->devfn))
return; return;
pci_read_config_byte(dev, 0xCB, &disable);
if (disable & 0x02)
return;
pci_read_config_byte(dev, 0xCA, &write_enable);
pci_write_config_byte(dev, 0xCA, 0x57);
pci_write_config_byte(dev, 0xCB, disable | 0x02);
pci_write_config_byte(dev, 0xCA, write_enable);
dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
/* /*
* RICOH 0xe823 SD/MMC card reader fails to recognize * RICOH 0xe823 SD/MMC card reader fails to recognize
* certain types of SD/MMC cards. Lowering the SD base * certain types of SD/MMC cards. Lowering the SD base
...@@ -2781,6 +2767,20 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) ...@@ -2781,6 +2767,20 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n"); dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n");
} }
pci_read_config_byte(dev, 0xCB, &disable);
if (disable & 0x02)
return;
pci_read_config_byte(dev, 0xCA, &write_enable);
pci_write_config_byte(dev, 0xCA, 0x57);
pci_write_config_byte(dev, 0xCB, disable | 0x02);
pci_write_config_byte(dev, 0xCA, write_enable);
dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
} }
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
...@@ -2822,6 +2822,89 @@ static void __devinit fixup_ti816x_class(struct pci_dev* dev) ...@@ -2822,6 +2822,89 @@ static void __devinit fixup_ti816x_class(struct pci_dev* dev)
} }
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class);
/* Some PCIe devices do not work reliably with the claimed maximum
* payload size supported.
*/
static void __devinit fixup_mpss_256(struct pci_dev *dev)
{
dev->pcie_mpss = 1; /* 256 bytes */
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
/* Intel 5000 and 5100 Memory controllers have an errata with read completion
* coalescing (which is enabled by default on some BIOSes) and MPS of 256B.
* Since there is no way of knowing what the PCIE MPS on each fabric will be
* until all of the devices are discovered and buses walked, read completion
* coalescing must be disabled. Unfortunately, it cannot be re-enabled because
* it is possible to hotplug a device with MPS of 256B.
*/
static void __devinit quirk_intel_mc_errata(struct pci_dev *dev)
{
int err;
u16 rcc;
if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
return;
/* Intel errata specifies bits to change but does not say what they are.
* Keeping them magical until such time as the registers and values can
* be explained.
*/
err = pci_read_config_word(dev, 0x48, &rcc);
if (err) {
dev_err(&dev->dev, "Error attempting to read the read "
"completion coalescing register.\n");
return;
}
if (!(rcc & (1 << 10)))
return;
rcc &= ~(1 << 10);
err = pci_write_config_word(dev, 0x48, rcc);
if (err) {
dev_err(&dev->dev, "Error attempting to write the read "
"completion coalescing register.\n");
return;
}
pr_info_once("Read completion coalescing disabled due to hardware "
"errata relating to 256B MPS.\n");
}
/* Intel 5000 series memory controllers and ports 2-7 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
/* Intel 5100 series memory controllers and ports 2-7 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end) struct pci_fixup *end)
{ {
......
...@@ -426,7 +426,7 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) ...@@ -426,7 +426,7 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
} }
static void pci_setup_bridge(struct pci_bus *bus) void pci_setup_bridge(struct pci_bus *bus)
{ {
unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH; IORESOURCE_PREFETCH;
......
...@@ -189,6 +189,8 @@ void acpi_os_fixed_event_count(u32 fixed_event_number); ...@@ -189,6 +189,8 @@ void acpi_os_fixed_event_count(u32 fixed_event_number);
/* /*
* Threads and Scheduling * Threads and Scheduling
*/ */
extern struct workqueue_struct *kacpi_hotplug_wq;
acpi_thread_id acpi_os_get_thread_id(void); acpi_thread_id acpi_os_get_thread_id(void);
acpi_status acpi_status
......
#ifndef LINUX_PCI_ATS_H #ifndef LINUX_PCI_ATS_H
#define LINUX_PCI_ATS_H #define LINUX_PCI_ATS_H
#include <linux/pci.h>
/* Address Translation Service */ /* Address Translation Service */
struct pci_ats { struct pci_ats {
int pos; /* capability position */ int pos; /* capability position */
...@@ -15,6 +17,7 @@ struct pci_ats { ...@@ -15,6 +17,7 @@ struct pci_ats {
extern int pci_enable_ats(struct pci_dev *dev, int ps); extern int pci_enable_ats(struct pci_dev *dev, int ps);
extern void pci_disable_ats(struct pci_dev *dev); extern void pci_disable_ats(struct pci_dev *dev);
extern int pci_ats_queue_depth(struct pci_dev *dev); extern int pci_ats_queue_depth(struct pci_dev *dev);
/** /**
* pci_ats_enabled - query the ATS status * pci_ats_enabled - query the ATS status
* @dev: the PCI device * @dev: the PCI device
...@@ -49,4 +52,76 @@ static inline int pci_ats_enabled(struct pci_dev *dev) ...@@ -49,4 +52,76 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
#endif /* CONFIG_PCI_IOV */ #endif /* CONFIG_PCI_IOV */
#ifdef CONFIG_PCI_PRI
extern int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
extern void pci_disable_pri(struct pci_dev *pdev);
extern bool pci_pri_enabled(struct pci_dev *pdev);
extern int pci_reset_pri(struct pci_dev *pdev);
extern bool pci_pri_stopped(struct pci_dev *pdev);
extern int pci_pri_status(struct pci_dev *pdev);
#else /* CONFIG_PCI_PRI */
static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
{
return -ENODEV;
}
static inline void pci_disable_pri(struct pci_dev *pdev)
{
}
static inline bool pci_pri_enabled(struct pci_dev *pdev)
{
return false;
}
static inline int pci_reset_pri(struct pci_dev *pdev)
{
return -ENODEV;
}
static inline bool pci_pri_stopped(struct pci_dev *pdev)
{
return true;
}
static inline int pci_pri_status(struct pci_dev *pdev)
{
return -ENODEV;
}
#endif /* CONFIG_PCI_PRI */
#ifdef CONFIG_PCI_PASID
extern int pci_enable_pasid(struct pci_dev *pdev, int features);
extern void pci_disable_pasid(struct pci_dev *pdev);
extern int pci_pasid_features(struct pci_dev *pdev);
extern int pci_max_pasids(struct pci_dev *pdev);
#else /* CONFIG_PCI_PASID */
static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
{
return -EINVAL;
}
static inline void pci_disable_pasid(struct pci_dev *pdev)
{
}
static inline int pci_pasid_features(struct pci_dev *pdev)
{
return -EINVAL;
}
static inline int pci_max_pasids(struct pci_dev *pdev)
{
return -EINVAL;
}
#endif /* CONFIG_PCI_PASID */
#endif /* LINUX_PCI_ATS_H*/ #endif /* LINUX_PCI_ATS_H*/
...@@ -275,6 +275,7 @@ struct pci_dev { ...@@ -275,6 +275,7 @@ struct pci_dev {
unsigned int pme_support:5; /* Bitmask of states from which PME# unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */ can be generated */
unsigned int pme_interrupt:1; unsigned int pme_interrupt:1;
unsigned int pme_poll:1; /* Poll device's PME status bit */
unsigned int d1_support:1; /* Low power state D1 is supported */ unsigned int d1_support:1; /* Low power state D1 is supported */
unsigned int d2_support:1; /* Low power state D2 is supported */ unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* Only allow D0 and D3 */ unsigned int no_d1d2:1; /* Only allow D0 and D3 */
...@@ -957,6 +958,7 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), ...@@ -957,6 +958,7 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
int pci_cfg_space_size_ext(struct pci_dev *dev); int pci_cfg_space_size_ext(struct pci_dev *dev);
int pci_cfg_space_size(struct pci_dev *dev); int pci_cfg_space_size(struct pci_dev *dev);
unsigned char pci_bus_max_busnr(struct pci_bus *bus); unsigned char pci_bus_max_busnr(struct pci_bus *bus);
void pci_setup_bridge(struct pci_bus *bus);
#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
......
...@@ -2302,6 +2302,11 @@ ...@@ -2302,6 +2302,11 @@
#define PCI_DEVICE_ID_RENESAS_SH7785 0x0007 #define PCI_DEVICE_ID_RENESAS_SH7785 0x0007
#define PCI_DEVICE_ID_RENESAS_SH7786 0x0010 #define PCI_DEVICE_ID_RENESAS_SH7786 0x0010
#define PCI_VENDOR_ID_SOLARFLARE 0x1924
#define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0 0x0703
#define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1 0x6703
#define PCI_DEVICE_ID_SOLARFLARE_SFC4000B 0x0710
#define PCI_VENDOR_ID_TDI 0x192E #define PCI_VENDOR_ID_TDI 0x192E
#define PCI_DEVICE_ID_TDI_EHCI 0x0101 #define PCI_DEVICE_ID_TDI_EHCI 0x0101
......
...@@ -663,6 +663,26 @@ ...@@ -663,6 +663,26 @@
#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */ #define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */ #define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */
/* Page Request Interface */
#define PCI_PRI_CAP 0x13 /* PRI capability ID */
#define PCI_PRI_CONTROL_OFF 0x04 /* Offset of control register */
#define PCI_PRI_STATUS_OFF 0x06 /* Offset of status register */
#define PCI_PRI_ENABLE 0x0001 /* Enable mask */
#define PCI_PRI_RESET 0x0002 /* Reset bit mask */
#define PCI_PRI_STATUS_RF 0x0001 /* Request Failure */
#define PCI_PRI_STATUS_UPRGI 0x0002 /* Unexpected PRG index */
#define PCI_PRI_STATUS_STOPPED 0x0100 /* PRI Stopped */
#define PCI_PRI_MAX_REQ_OFF 0x08 /* Cap offset for max reqs supported */
#define PCI_PRI_ALLOC_REQ_OFF 0x0c /* Cap offset for max reqs allowed */
/* PASID capability */
#define PCI_PASID_CAP 0x1b /* PASID capability ID */
#define PCI_PASID_CAP_OFF 0x04 /* PASID feature register */
#define PCI_PASID_CONTROL_OFF 0x06 /* PASID control register */
#define PCI_PASID_ENABLE 0x01 /* Enable/Supported bit */
#define PCI_PASID_EXEC 0x02 /* Exec permissions Enable/Supported */
#define PCI_PASID_PRIV 0x04 /* Priviledge Mode Enable/Support */
/* Single Root I/O Virtualization */ /* Single Root I/O Virtualization */
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */ #define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */ #define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment