Commit f9bfeccd authored by Bjorn Helgaas's avatar Bjorn Helgaas

Merge branch 'pci/enumeration' into next

* pci/enumeration:
  PCI: Enable ECRC only if device supports it
  PCI: Add sysfs max_link_speed/width, current_link_speed/width, etc
  PCI: Test INTx masking during enumeration, not at run-time
parents 397ee434 675734ba
......@@ -154,6 +154,129 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(resource);
static ssize_t max_link_speed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
u32 linkcap;
int err;
const char *speed;
err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap);
if (err)
return -EINVAL;
switch (linkcap & PCI_EXP_LNKCAP_SLS) {
case PCI_EXP_LNKCAP_SLS_8_0GB:
speed = "8 GT/s";
break;
case PCI_EXP_LNKCAP_SLS_5_0GB:
speed = "5 GT/s";
break;
case PCI_EXP_LNKCAP_SLS_2_5GB:
speed = "2.5 GT/s";
break;
default:
speed = "Unknown speed";
}
return sprintf(buf, "%s\n", speed);
}
static DEVICE_ATTR_RO(max_link_speed);
static ssize_t max_link_width_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
u32 linkcap;
int err;
err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap);
if (err)
return -EINVAL;
return sprintf(buf, "%u\n", (linkcap & PCI_EXP_LNKCAP_MLW) >> 4);
}
static DEVICE_ATTR_RO(max_link_width);
static ssize_t current_link_speed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
u16 linkstat;
int err;
const char *speed;
err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
if (err)
return -EINVAL;
switch (linkstat & PCI_EXP_LNKSTA_CLS) {
case PCI_EXP_LNKSTA_CLS_8_0GB:
speed = "8 GT/s";
break;
case PCI_EXP_LNKSTA_CLS_5_0GB:
speed = "5 GT/s";
break;
case PCI_EXP_LNKSTA_CLS_2_5GB:
speed = "2.5 GT/s";
break;
default:
speed = "Unknown speed";
}
return sprintf(buf, "%s\n", speed);
}
static DEVICE_ATTR_RO(current_link_speed);
static ssize_t current_link_width_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
u16 linkstat;
int err;
err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
if (err)
return -EINVAL;
return sprintf(buf, "%u\n",
(linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
}
static DEVICE_ATTR_RO(current_link_width);
static ssize_t secondary_bus_number_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
u8 sec_bus;
int err;
err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
if (err)
return -EINVAL;
return sprintf(buf, "%u\n", sec_bus);
}
static DEVICE_ATTR_RO(secondary_bus_number);
static ssize_t subordinate_bus_number_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
u8 sub_bus;
int err;
err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
if (err)
return -EINVAL;
return sprintf(buf, "%u\n", sub_bus);
}
static DEVICE_ATTR_RO(subordinate_bus_number);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
......@@ -629,12 +752,17 @@ static struct attribute *pci_dev_attrs[] = {
NULL,
};
static const struct attribute_group pci_dev_group = {
.attrs = pci_dev_attrs,
static struct attribute *pci_bridge_attrs[] = {
&dev_attr_subordinate_bus_number.attr,
&dev_attr_secondary_bus_number.attr,
NULL,
};
const struct attribute_group *pci_dev_groups[] = {
&pci_dev_group,
static struct attribute *pcie_dev_attrs[] = {
&dev_attr_current_link_speed.attr,
&dev_attr_current_link_width.attr,
&dev_attr_max_link_width.attr,
&dev_attr_max_link_speed.attr,
NULL,
};
......@@ -1557,6 +1685,57 @@ static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
if (pci_is_bridge(pdev))
return a->mode;
return 0;
}
static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
if (pci_is_pcie(pdev))
return a->mode;
return 0;
}
static const struct attribute_group pci_dev_group = {
.attrs = pci_dev_attrs,
};
const struct attribute_group *pci_dev_groups[] = {
&pci_dev_group,
NULL,
};
static const struct attribute_group pci_bridge_group = {
.attrs = pci_bridge_attrs,
};
const struct attribute_group *pci_bridge_groups[] = {
&pci_bridge_group,
NULL,
};
static const struct attribute_group pcie_dev_group = {
.attrs = pcie_dev_attrs,
};
const struct attribute_group *pcie_dev_groups[] = {
&pcie_dev_group,
NULL,
};
static struct attribute_group pci_dev_hp_attr_group = {
.attrs = pci_dev_hp_attrs,
.is_visible = pci_dev_hp_attrs_are_visible,
......@@ -1592,12 +1771,24 @@ static struct attribute_group pci_dev_attr_group = {
.is_visible = pci_dev_attrs_are_visible,
};
static struct attribute_group pci_bridge_attr_group = {
.attrs = pci_bridge_attrs,
.is_visible = pci_bridge_attrs_are_visible,
};
static struct attribute_group pcie_dev_attr_group = {
.attrs = pcie_dev_attrs,
.is_visible = pcie_dev_attrs_are_visible,
};
static const struct attribute_group *pci_dev_attr_groups[] = {
&pci_dev_attr_group,
&pci_dev_hp_attr_group,
#ifdef CONFIG_PCI_IOV
&sriov_dev_attr_group,
#endif
&pci_bridge_attr_group,
&pcie_dev_attr_group,
NULL,
};
......
......@@ -3708,46 +3708,6 @@ void pci_intx(struct pci_dev *pdev, int enable)
}
EXPORT_SYMBOL_GPL(pci_intx);
/**
* pci_intx_mask_supported - probe for INTx masking support
* @dev: the PCI device to operate on
*
* Check if the device dev support INTx masking via the config space
* command word.
*/
bool pci_intx_mask_supported(struct pci_dev *dev)
{
bool mask_supported = false;
u16 orig, new;
if (dev->broken_intx_masking)
return false;
pci_cfg_access_lock(dev);
pci_read_config_word(dev, PCI_COMMAND, &orig);
pci_write_config_word(dev, PCI_COMMAND,
orig ^ PCI_COMMAND_INTX_DISABLE);
pci_read_config_word(dev, PCI_COMMAND, &new);
/*
* There's no way to protect against hardware bugs or detect them
* reliably, but as long as we know what the value should be, let's
* go ahead and check it.
*/
if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
orig, new);
} else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
mask_supported = true;
pci_write_config_word(dev, PCI_COMMAND, orig);
}
pci_cfg_access_unlock(dev);
return mask_supported;
}
EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
{
struct pci_bus *bus = dev->bus;
......@@ -3798,7 +3758,7 @@ static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
* @dev: the PCI device to operate on
*
* Check if the device dev has its INTx line asserted, mask it and
* return true in that case. False is returned if not interrupt was
* return true in that case. False is returned if no interrupt was
* pending.
*/
bool pci_check_and_mask_intx(struct pci_dev *dev)
......
......@@ -1329,6 +1329,34 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev)
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
}
/**
* pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
* @dev: PCI device
*
* Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this
* at enumeration-time to avoid modifying PCI_COMMAND at run-time.
*/
static int pci_intx_mask_broken(struct pci_dev *dev)
{
u16 orig, toggle, new;
pci_read_config_word(dev, PCI_COMMAND, &orig);
toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(dev, PCI_COMMAND, toggle);
pci_read_config_word(dev, PCI_COMMAND, &new);
pci_write_config_word(dev, PCI_COMMAND, orig);
/*
* PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
* r2.3, so strictly speaking, a device is not *broken* if it's not
* writable. But we'll live with the misnomer for now.
*/
if (new != toggle)
return 1;
return 0;
}
/**
* pci_setup_device - fill in class and map information of a device
* @dev: the device structure to fill
......@@ -1399,6 +1427,8 @@ int pci_setup_device(struct pci_dev *dev)
}
}
dev->broken_intx_masking = pci_intx_mask_broken(dev);
switch (dev->hdr_type) { /* header type */
case PCI_HEADER_TYPE_NORMAL: /* standard header */
if (class == PCI_CLASS_BRIDGE_PCI)
......@@ -1674,6 +1704,11 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
/* Initialize Advanced Error Capabilities and Control Register */
pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
/* Don't enable ECRC generation or checking if unsupported */
if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
/*
......
......@@ -366,7 +366,7 @@ struct pci_dev {
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
unsigned int __aer_firmware_first_valid:1;
unsigned int __aer_firmware_first:1;
unsigned int broken_intx_masking:1;
unsigned int broken_intx_masking:1; /* INTx masking can't be used */
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
unsigned int irq_managed:1;
unsigned int has_secondary_link:1;
......@@ -1003,6 +1003,15 @@ int __must_check pci_reenable_device(struct pci_dev *);
int __must_check pcim_enable_device(struct pci_dev *pdev);
void pcim_pin_device(struct pci_dev *pdev);
static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
{
/*
* INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
* writable and no quirk has marked the feature broken.
*/
return !pdev->broken_intx_masking;
}
static inline int pci_is_enabled(struct pci_dev *pdev)
{
return (atomic_read(&pdev->enable_cnt) > 0);
......@@ -1026,7 +1035,6 @@ int __must_check pci_set_mwi(struct pci_dev *dev);
int pci_try_set_mwi(struct pci_dev *dev);
void pci_clear_mwi(struct pci_dev *dev);
void pci_intx(struct pci_dev *dev, int enable);
bool pci_intx_mask_supported(struct pci_dev *dev);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
......
......@@ -517,6 +517,7 @@
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment