Commit e0f6fa0d authored by Dan Williams's avatar Dan Williams

Merge branch 'for-6.2/cxl-aer' into for-6.2/cxl

Pick up CXL AER handling and correctable error extensions. Resolve
conflicts with cxl_pmem_wq reworks and RCH support.
parents 95dddcb5 6155ccc9
......@@ -83,6 +83,7 @@ This structure has the form::
int (*mmio_enabled)(struct pci_dev *dev);
int (*slot_reset)(struct pci_dev *dev);
void (*resume)(struct pci_dev *dev);
void (*cor_error_detected)(struct pci_dev *dev);
};
The possible channel states are::
......@@ -422,5 +423,11 @@ That is, the recovery API only requires that:
- drivers/net/cxgb3
- drivers/net/s2io.c
The cor_error_detected() callback is invoked in handle_error_source() when
the error severity is "correctable". The callback is optional and allows
additional logging to be done if desired. See example:
- drivers/cxl/pci.c
The End
-------
......@@ -82,18 +82,23 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
cxlhdm->interleave_mask |= GENMASK(14, 12);
}
static void __iomem *map_hdm_decoder_regs(struct cxl_port *port,
void __iomem *crb)
static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
struct cxl_component_regs *regs)
{
struct cxl_component_reg_map map;
struct cxl_register_map map = {
.resource = port->component_reg_phys,
.base = crb,
.max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
};
cxl_probe_component_regs(&port->dev, crb, &map);
if (!map.hdm_decoder.valid) {
cxl_probe_component_regs(&port->dev, crb, &map.component_map);
if (!map.component_map.hdm_decoder.valid) {
dev_err(&port->dev, "HDM decoder registers invalid\n");
return IOMEM_ERR_PTR(-ENXIO);
return -ENXIO;
}
return crb + map.hdm_decoder.offset;
return cxl_map_component_regs(&port->dev, regs, &map,
BIT(CXL_CM_CAP_CAP_ID_HDM));
}
/**
......@@ -103,25 +108,25 @@ static void __iomem *map_hdm_decoder_regs(struct cxl_port *port,
struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
{
struct device *dev = &port->dev;
void __iomem *crb, *hdm;
struct cxl_hdm *cxlhdm;
void __iomem *crb;
int rc;
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
if (!cxlhdm)
return ERR_PTR(-ENOMEM);
cxlhdm->port = port;
crb = devm_cxl_iomap_block(dev, port->component_reg_phys,
CXL_COMPONENT_REG_BLOCK_SIZE);
crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
if (!crb) {
dev_err(dev, "No component registers mapped\n");
return ERR_PTR(-ENXIO);
}
hdm = map_hdm_decoder_regs(port, crb);
if (IS_ERR(hdm))
return ERR_CAST(hdm);
cxlhdm->regs.hdm_decoder = hdm;
rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
iounmap(crb);
if (rc)
return ERR_PTR(rc);
parse_hdm_decoder_caps(cxlhdm);
if (cxlhdm->decoder_count == 0) {
......
......@@ -344,6 +344,7 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
* needed as this is ordered with cdev_add() publishing the device.
*/
cxlmd->cxlds = cxlds;
cxlds->cxlmd = cxlmd;
cdev = &cxlmd->cdev;
rc = cdev_device_add(cdev, dev);
......
......@@ -54,8 +54,7 @@ static int match_add_dports(struct pci_dev *pdev, void *data)
dev_dbg(&port->dev, "failed to find component registers\n");
port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
dport = devm_cxl_add_dport(port, &pdev->dev, port_num,
cxl_regmap_to_base(pdev, &map));
dport = devm_cxl_add_dport(port, &pdev->dev, port_num, map.resource);
if (IS_ERR(dport)) {
ctx->error = PTR_ERR(dport);
return PTR_ERR(dport);
......
......@@ -1292,7 +1292,7 @@ static resource_size_t find_component_registers(struct device *dev)
pdev = to_pci_dev(dev);
cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
return cxl_regmap_to_base(pdev, &map);
return map.resource;
}
static int add_port_attach_ep(struct cxl_memdev *cxlmd,
......
......@@ -61,36 +61,48 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
for (cap = 1; cap <= cap_count; cap++) {
void __iomem *register_block;
u32 hdr;
int decoder_cnt;
struct cxl_reg_map *rmap;
u16 cap_id, offset;
u32 length;
u32 length, hdr;
hdr = readl(base + cap * 0x4);
cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
register_block = base + offset;
hdr = readl(register_block);
rmap = NULL;
switch (cap_id) {
case CXL_CM_CAP_CAP_ID_HDM:
case CXL_CM_CAP_CAP_ID_HDM: {
int decoder_cnt;
dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
offset);
hdr = readl(register_block);
decoder_cnt = cxl_hdm_decoder_count(hdr);
length = 0x20 * decoder_cnt + 0x10;
map->hdm_decoder.valid = true;
map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
map->hdm_decoder.size = length;
rmap = &map->hdm_decoder;
break;
}
case CXL_CM_CAP_CAP_ID_RAS:
dev_dbg(dev, "found RAS capability (0x%x)\n",
offset);
length = CXL_RAS_CAPABILITY_LENGTH;
rmap = &map->ras;
break;
default:
dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
offset);
break;
}
if (!rmap)
continue;
rmap->valid = true;
rmap->id = cap_id;
rmap->offset = CXL_CM_OFFSET + offset;
rmap->size = length;
}
}
EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs, CXL);
......@@ -119,6 +131,7 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base,
cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
for (cap = 1; cap <= cap_count; cap++) {
struct cxl_reg_map *rmap;
u32 offset, length;
u16 cap_id;
......@@ -127,28 +140,22 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base,
offset = readl(base + cap * 0x10 + 0x4);
length = readl(base + cap * 0x10 + 0x8);
rmap = NULL;
switch (cap_id) {
case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
dev_dbg(dev, "found Status capability (0x%x)\n", offset);
map->status.valid = true;
map->status.offset = offset;
map->status.size = length;
rmap = &map->status;
break;
case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
map->mbox.valid = true;
map->mbox.offset = offset;
map->mbox.size = length;
rmap = &map->mbox;
break;
case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
break;
case CXLDEV_CAP_CAP_ID_MEMDEV:
dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
map->memdev.valid = true;
map->memdev.offset = offset;
map->memdev.size = length;
rmap = &map->memdev;
break;
default:
if (cap_id >= 0x8000)
......@@ -157,6 +164,13 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base,
dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
break;
}
if (!rmap)
continue;
rmap->valid = true;
rmap->id = cap_id;
rmap->offset = offset;
rmap->size = length;
}
}
EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, CXL);
......@@ -185,67 +199,65 @@ void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
return ret_val;
}
int cxl_map_component_regs(struct pci_dev *pdev,
struct cxl_component_regs *regs,
struct cxl_register_map *map)
int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs,
struct cxl_register_map *map, unsigned long map_mask)
{
struct device *dev = &pdev->dev;
resource_size_t phys_addr;
resource_size_t length;
phys_addr = pci_resource_start(pdev, map->barno);
phys_addr += map->block_offset;
struct mapinfo {
struct cxl_reg_map *rmap;
void __iomem **addr;
} mapinfo[] = {
{ &map->component_map.hdm_decoder, &regs->hdm_decoder },
{ &map->component_map.ras, &regs->ras },
};
int i;
for (i = 0; i < ARRAY_SIZE(mapinfo); i++) {
struct mapinfo *mi = &mapinfo[i];
resource_size_t phys_addr;
resource_size_t length;
phys_addr += map->component_map.hdm_decoder.offset;
length = map->component_map.hdm_decoder.size;
regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
if (!regs->hdm_decoder)
return -ENOMEM;
if (!mi->rmap->valid)
continue;
if (!test_bit(mi->rmap->id, &map_mask))
continue;
phys_addr = map->resource + mi->rmap->offset;
length = mi->rmap->size;
*(mi->addr) = devm_cxl_iomap_block(dev, phys_addr, length);
if (!*(mi->addr))
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
int cxl_map_device_regs(struct pci_dev *pdev,
int cxl_map_device_regs(struct device *dev,
struct cxl_device_regs *regs,
struct cxl_register_map *map)
{
struct device *dev = &pdev->dev;
resource_size_t phys_addr;
phys_addr = pci_resource_start(pdev, map->barno);
phys_addr += map->block_offset;
if (map->device_map.status.valid) {
resource_size_t addr;
resource_size_t phys_addr = map->resource;
struct mapinfo {
struct cxl_reg_map *rmap;
void __iomem **addr;
} mapinfo[] = {
{ &map->device_map.status, &regs->status, },
{ &map->device_map.mbox, &regs->mbox, },
{ &map->device_map.memdev, &regs->memdev, },
};
int i;
for (i = 0; i < ARRAY_SIZE(mapinfo); i++) {
struct mapinfo *mi = &mapinfo[i];
resource_size_t length;
addr = phys_addr + map->device_map.status.offset;
length = map->device_map.status.size;
regs->status = devm_cxl_iomap_block(dev, addr, length);
if (!regs->status)
return -ENOMEM;
}
if (map->device_map.mbox.valid) {
resource_size_t addr;
resource_size_t length;
addr = phys_addr + map->device_map.mbox.offset;
length = map->device_map.mbox.size;
regs->mbox = devm_cxl_iomap_block(dev, addr, length);
if (!regs->mbox)
return -ENOMEM;
}
if (!mi->rmap->valid)
continue;
if (map->device_map.memdev.valid) {
resource_size_t addr;
resource_size_t length;
addr = phys_addr + map->device_map.memdev.offset;
length = map->device_map.memdev.size;
regs->memdev = devm_cxl_iomap_block(dev, addr, length);
if (!regs->memdev)
addr = phys_addr + mi->rmap->offset;
length = mi->rmap->size;
*(mi->addr) = devm_cxl_iomap_block(dev, addr, length);
if (!*(mi->addr))
return -ENOMEM;
}
......@@ -253,13 +265,24 @@ int cxl_map_device_regs(struct pci_dev *pdev,
}
EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
static void cxl_decode_regblock(u32 reg_lo, u32 reg_hi,
static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
struct cxl_register_map *map)
{
map->block_offset = ((u64)reg_hi << 32) |
(reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
map->barno = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
u64 offset = ((u64)reg_hi << 32) |
(reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
if (offset > pci_resource_len(pdev, bar)) {
dev_warn(&pdev->dev,
"BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar,
&pdev->resource[bar], &offset, map->reg_type);
return false;
}
map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
map->resource = pci_resource_start(pdev, bar) + offset;
map->max_size = pci_resource_len(pdev, bar) - offset;
return true;
}
/**
......@@ -279,7 +302,7 @@ int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
u32 regloc_size, regblocks;
int regloc, i;
map->block_offset = U64_MAX;
map->resource = CXL_RESOURCE_NONE;
regloc = pci_find_dvsec_capability(pdev, PCI_DVSEC_VENDOR_ID_CXL,
CXL_DVSEC_REG_LOCATOR);
if (!regloc)
......@@ -297,13 +320,14 @@ int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
pci_read_config_dword(pdev, regloc, &reg_lo);
pci_read_config_dword(pdev, regloc + 4, &reg_hi);
cxl_decode_regblock(reg_lo, reg_hi, map);
if (!cxl_decode_regblock(pdev, reg_lo, reg_hi, map))
continue;
if (map->reg_type == type)
return 0;
}
map->block_offset = U64_MAX;
map->resource = CXL_RESOURCE_NONE;
return -ENODEV;
}
EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, CXL);
......
......@@ -33,6 +33,7 @@
#define CXL_CM_CAP_HDR_ARRAY_SIZE_MASK GENMASK(31, 24)
#define CXL_CM_CAP_PTR_MASK GENMASK(31, 20)
#define CXL_CM_CAP_CAP_ID_RAS 0x2
#define CXL_CM_CAP_CAP_ID_HDM 0x5
#define CXL_CM_CAP_CAP_HDM_VERSION 1
......@@ -123,6 +124,22 @@ static inline int ways_to_cxl(unsigned int ways, u8 *iw)
return 0;
}
/* RAS Registers CXL 2.0 8.2.5.9 CXL RAS Capability Structure */
#define CXL_RAS_UNCORRECTABLE_STATUS_OFFSET 0x0
#define CXL_RAS_UNCORRECTABLE_STATUS_MASK (GENMASK(16, 14) | GENMASK(11, 0))
#define CXL_RAS_UNCORRECTABLE_MASK_OFFSET 0x4
#define CXL_RAS_UNCORRECTABLE_MASK_MASK (GENMASK(16, 14) | GENMASK(11, 0))
#define CXL_RAS_UNCORRECTABLE_SEVERITY_OFFSET 0x8
#define CXL_RAS_UNCORRECTABLE_SEVERITY_MASK (GENMASK(16, 14) | GENMASK(11, 0))
#define CXL_RAS_CORRECTABLE_STATUS_OFFSET 0xC
#define CXL_RAS_CORRECTABLE_STATUS_MASK GENMASK(6, 0)
#define CXL_RAS_CORRECTABLE_MASK_OFFSET 0x10
#define CXL_RAS_CORRECTABLE_MASK_MASK GENMASK(6, 0)
#define CXL_RAS_CAP_CONTROL_OFFSET 0x14
#define CXL_RAS_CAP_CONTROL_FE_MASK GENMASK(5, 0)
#define CXL_RAS_HEADER_LOG_OFFSET 0x18
#define CXL_RAS_CAPABILITY_LENGTH 0x58
/* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
#define CXLDEV_CAP_ARRAY_OFFSET 0x0
#define CXLDEV_CAP_ARRAY_CAP_ID 0
......@@ -157,9 +174,11 @@ struct cxl_regs {
/*
* Common set of CXL Component register block base pointers
* @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
* @ras: CXL 2.0 8.2.5.9 CXL RAS Capability Structure
*/
struct_group_tagged(cxl_component_regs, component,
void __iomem *hdm_decoder;
void __iomem *ras;
);
/*
* Common set of CXL Device register block base pointers
......@@ -174,12 +193,14 @@ struct cxl_regs {
struct cxl_reg_map {
bool valid;
int id;
unsigned long offset;
unsigned long size;
};
struct cxl_component_reg_map {
struct cxl_reg_map hdm_decoder;
struct cxl_reg_map ras;
};
struct cxl_device_reg_map {
......@@ -191,17 +212,17 @@ struct cxl_device_reg_map {
/**
* struct cxl_register_map - DVSEC harvested register block mapping parameters
* @base: virtual base of the register-block-BAR + @block_offset
* @block_offset: offset to start of register block in @barno
* @resource: physical resource base of the register block
* @max_size: maximum mapping size to perform register search
* @reg_type: see enum cxl_regloc_type
* @barno: PCI BAR number containing the register block
* @component_map: cxl_reg_map for component registers
* @device_map: cxl_reg_maps for device registers
*/
struct cxl_register_map {
void __iomem *base;
u64 block_offset;
resource_size_t resource;
resource_size_t max_size;
u8 reg_type;
u8 barno;
union {
struct cxl_component_reg_map component_map;
struct cxl_device_reg_map device_map;
......@@ -212,11 +233,10 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_reg_map *map);
void cxl_probe_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_reg_map *map);
int cxl_map_component_regs(struct pci_dev *pdev,
struct cxl_component_regs *regs,
struct cxl_register_map *map);
int cxl_map_device_regs(struct pci_dev *pdev,
struct cxl_device_regs *regs,
int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs,
struct cxl_register_map *map,
unsigned long map_mask);
int cxl_map_device_regs(struct device *dev, struct cxl_device_regs *regs,
struct cxl_register_map *map);
enum cxl_regloc_type;
......
......@@ -199,6 +199,7 @@ struct cxl_endpoint_dvsec_info {
* Currently only memory devices are represented.
*
* @dev: The device associated with this CXL state
* @cxlmd: The device representing the CXL.mem capabilities of @dev
* @regs: Parsed register blocks
* @cxl_dvsec: Offset to the PCIe device DVSEC
* @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
......@@ -232,6 +233,7 @@ struct cxl_endpoint_dvsec_info {
*/
struct cxl_dev_state {
struct device *dev;
struct cxl_memdev *cxlmd;
struct cxl_regs regs;
int cxl_dvsec;
......
......@@ -62,15 +62,6 @@ enum cxl_regloc_type {
CXL_REGLOC_RBI_TYPES
};
static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev,
struct cxl_register_map *map)
{
if (map->block_offset == U64_MAX)
return CXL_RESOURCE_NONE;
return pci_resource_start(pdev, map->barno) + map->block_offset;
}
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm);
......
......@@ -9,10 +9,13 @@
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/pci-doe.h>
#include <linux/aer.h>
#include <linux/io.h>
#include "cxlmem.h"
#include "cxlpci.h"
#include "cxl.h"
#define CREATE_TRACE_POINTS
#include <trace/events/cxl.h>
/**
* DOC: cxl pci
......@@ -276,35 +279,22 @@ static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map)
{
void __iomem *addr;
int bar = map->barno;
struct device *dev = &pdev->dev;
resource_size_t offset = map->block_offset;
/* Basic sanity check that BAR is big enough */
if (pci_resource_len(pdev, bar) < offset) {
dev_err(dev, "BAR%d: %pr: too small (offset: %pa)\n", bar,
&pdev->resource[bar], &offset);
return -ENXIO;
}
addr = pci_iomap(pdev, bar, 0);
if (!addr) {
map->base = ioremap(map->resource, map->max_size);
if (!map->base) {
dev_err(dev, "failed to map registers\n");
return -ENOMEM;
}
dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %pa\n",
bar, &offset);
map->base = addr + map->block_offset;
dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource);
return 0;
}
static void cxl_unmap_regblock(struct pci_dev *pdev,
struct cxl_register_map *map)
{
pci_iounmap(pdev, map->base - map->block_offset);
iounmap(map->base);
map->base = NULL;
}
......@@ -324,6 +314,9 @@ static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map)
return -ENXIO;
}
if (!comp_map->ras.valid)
dev_dbg(dev, "RAS registers not found\n");
dev_dbg(dev, "Set up component registers\n");
break;
case CXL_REGLOC_RBI_MEMDEV:
......@@ -347,27 +340,6 @@ static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map)
return 0;
}
static int cxl_map_regs(struct cxl_dev_state *cxlds, struct cxl_register_map *map)
{
struct device *dev = cxlds->dev;
struct pci_dev *pdev = to_pci_dev(dev);
switch (map->reg_type) {
case CXL_REGLOC_RBI_COMPONENT:
cxl_map_component_regs(pdev, &cxlds->regs.component, map);
dev_dbg(dev, "Mapping component registers...\n");
break;
case CXL_REGLOC_RBI_MEMDEV:
cxl_map_device_regs(pdev, &cxlds->regs.device_regs, map);
dev_dbg(dev, "Probing device registers...\n");
break;
default:
break;
}
return 0;
}
static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
struct cxl_register_map *map)
{
......@@ -442,6 +414,11 @@ static bool is_cxl_restricted(struct pci_dev *pdev)
return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
}
static void disable_aer(void *pdev)
{
pci_disable_pcie_error_reporting(pdev);
}
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct cxl_register_map map;
......@@ -463,6 +440,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
cxlds = cxl_dev_state_create(&pdev->dev);
if (IS_ERR(cxlds))
return PTR_ERR(cxlds);
pci_set_drvdata(pdev, cxlds);
cxlds->rcd = is_cxl_restricted(pdev);
cxlds->serial = pci_get_dsn(pdev);
......@@ -476,7 +454,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
rc = cxl_map_regs(cxlds, &map);
rc = cxl_map_device_regs(&pdev->dev, &cxlds->regs.device_regs, &map);
if (rc)
return rc;
......@@ -489,10 +467,15 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
dev_warn(&pdev->dev, "No component registers (%d)\n", rc);
cxlds->component_reg_phys = cxl_regmap_to_base(pdev, &map);
cxlds->component_reg_phys = map.resource;
devm_cxl_pci_create_doe(cxlds);
rc = cxl_map_component_regs(&pdev->dev, &cxlds->regs.component,
&map, BIT(CXL_CM_CAP_CAP_ID_RAS));
if (rc)
dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
rc = cxl_pci_setup_mailbox(cxlds);
if (rc)
return rc;
......@@ -513,6 +496,14 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
if (cxlds->regs.ras) {
pci_enable_pcie_error_reporting(pdev);
rc = devm_add_action_or_reset(&pdev->dev, disable_aer, pdev);
if (rc)
return rc;
}
pci_save_state(pdev);
return rc;
}
......@@ -523,10 +514,152 @@ static const struct pci_device_id cxl_mem_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
/* CXL spec rev3.0 8.2.4.16.1 */
static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
{
void __iomem *addr;
u32 *log_addr;
int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET;
log_addr = log;
for (i = 0; i < log_u32_size; i++) {
*log_addr = readl(addr);
log_addr++;
addr += sizeof(u32);
}
}
/*
* Log the state of the RAS status registers and prepare them to log the
* next error status. Return 1 if reset needed.
*/
static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
{
struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct device *dev = &cxlmd->dev;
u32 hl[CXL_HEADERLOG_SIZE_U32];
void __iomem *addr;
u32 status;
u32 fe;
if (!cxlds->regs.ras)
return false;
addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
status = le32_to_cpu((__force __le32)readl(addr));
if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
return false;
/* If multiple errors, log header points to first error from ctrl reg */
if (hweight32(status) > 1) {
addr = cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
fe = BIT(le32_to_cpu((__force __le32)readl(addr)) &
CXL_RAS_CAP_CONTROL_FE_MASK);
} else {
fe = status;
}
header_log_copy(cxlds, hl);
trace_cxl_aer_uncorrectable_error(dev_name(dev), status, fe, hl);
writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
return true;
}
static pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct device *dev = &cxlmd->dev;
bool ue;
/*
* A frozen channel indicates an impending reset which is fatal to
* CXL.mem operation, and will likely crash the system. On the off
* chance the situation is recoverable dump the status of the RAS
* capability registers and bounce the active state of the memdev.
*/
ue = cxl_report_and_clear(cxlds);
switch (state) {
case pci_channel_io_normal:
if (ue) {
device_release_driver(dev);
return PCI_ERS_RESULT_NEED_RESET;
}
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
dev_warn(&pdev->dev,
"%s: frozen state error detected, disable CXL.mem\n",
dev_name(dev));
device_release_driver(dev);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
dev_warn(&pdev->dev,
"failure state error detected, request disconnect\n");
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t cxl_slot_reset(struct pci_dev *pdev)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct device *dev = &cxlmd->dev;
dev_info(&pdev->dev, "%s: restart CXL.mem after slot reset\n",
dev_name(dev));
pci_restore_state(pdev);
if (device_attach(dev) <= 0)
return PCI_ERS_RESULT_DISCONNECT;
return PCI_ERS_RESULT_RECOVERED;
}
static void cxl_error_resume(struct pci_dev *pdev)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct device *dev = &cxlmd->dev;
dev_info(&pdev->dev, "%s: error resume %s\n", dev_name(dev),
dev->driver ? "successful" : "failed");
}
static void cxl_cor_error_detected(struct pci_dev *pdev)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
struct cxl_memdev *cxlmd = cxlds->cxlmd;
struct device *dev = &cxlmd->dev;
void __iomem *addr;
u32 status;
if (!cxlds->regs.ras)
return;
addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
status = le32_to_cpu(readl(addr));
if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
trace_cxl_aer_correctable_error(dev_name(dev), status);
}
}
static const struct pci_error_handlers cxl_error_handlers = {
.error_detected = cxl_error_detected,
.slot_reset = cxl_slot_reset,
.resume = cxl_error_resume,
.cor_error_detected = cxl_cor_error_detected,
};
static struct pci_driver cxl_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = cxl_mem_pci_tbl,
.probe = cxl_pci_probe,
.err_handler = &cxl_error_handlers,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
......
......@@ -961,8 +961,14 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
if (aer)
pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS,
info->status);
if (pcie_aer_is_native(dev))
if (pcie_aer_is_native(dev)) {
struct pci_driver *pdrv = dev->driver;
if (pdrv && pdrv->err_handler &&
pdrv->err_handler->cor_error_detected)
pdrv->err_handler->cor_error_detected(dev);
pcie_clear_device_status(dev);
}
} else if (info->severity == AER_NONFATAL)
pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset);
else if (info->severity == AER_FATAL)
......
......@@ -844,6 +844,9 @@ struct pci_error_handlers {
/* Device driver may resume normal operations */
void (*resume)(struct pci_dev *dev);
/* Allow device driver to record more details of a correctable error */
void (*cor_error_detected)(struct pci_dev *dev);
};
......
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cxl
#if !defined(_CXL_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _CXL_EVENTS_H
#include <linux/tracepoint.h>
#define CXL_HEADERLOG_SIZE SZ_512
#define CXL_HEADERLOG_SIZE_U32 SZ_512 / sizeof(u32)
#define CXL_RAS_UC_CACHE_DATA_PARITY BIT(0)
#define CXL_RAS_UC_CACHE_ADDR_PARITY BIT(1)
#define CXL_RAS_UC_CACHE_BE_PARITY BIT(2)
#define CXL_RAS_UC_CACHE_DATA_ECC BIT(3)
#define CXL_RAS_UC_MEM_DATA_PARITY BIT(4)
#define CXL_RAS_UC_MEM_ADDR_PARITY BIT(5)
#define CXL_RAS_UC_MEM_BE_PARITY BIT(6)
#define CXL_RAS_UC_MEM_DATA_ECC BIT(7)
#define CXL_RAS_UC_REINIT_THRESH BIT(8)
#define CXL_RAS_UC_RSVD_ENCODE BIT(9)
#define CXL_RAS_UC_POISON BIT(10)
#define CXL_RAS_UC_RECV_OVERFLOW BIT(11)
#define CXL_RAS_UC_INTERNAL_ERR BIT(14)
#define CXL_RAS_UC_IDE_TX_ERR BIT(15)
#define CXL_RAS_UC_IDE_RX_ERR BIT(16)
#define show_uc_errs(status) __print_flags(status, " | ", \
{ CXL_RAS_UC_CACHE_DATA_PARITY, "Cache Data Parity Error" }, \
{ CXL_RAS_UC_CACHE_ADDR_PARITY, "Cache Address Parity Error" }, \
{ CXL_RAS_UC_CACHE_BE_PARITY, "Cache Byte Enable Parity Error" }, \
{ CXL_RAS_UC_CACHE_DATA_ECC, "Cache Data ECC Error" }, \
{ CXL_RAS_UC_MEM_DATA_PARITY, "Memory Data Parity Error" }, \
{ CXL_RAS_UC_MEM_ADDR_PARITY, "Memory Address Parity Error" }, \
{ CXL_RAS_UC_MEM_BE_PARITY, "Memory Byte Enable Parity Error" }, \
{ CXL_RAS_UC_MEM_DATA_ECC, "Memory Data ECC Error" }, \
{ CXL_RAS_UC_REINIT_THRESH, "REINIT Threshold Hit" }, \
{ CXL_RAS_UC_RSVD_ENCODE, "Received Unrecognized Encoding" }, \
{ CXL_RAS_UC_POISON, "Received Poison From Peer" }, \
{ CXL_RAS_UC_RECV_OVERFLOW, "Receiver Overflow" }, \
{ CXL_RAS_UC_INTERNAL_ERR, "Component Specific Error" }, \
{ CXL_RAS_UC_IDE_TX_ERR, "IDE Tx Error" }, \
{ CXL_RAS_UC_IDE_RX_ERR, "IDE Rx Error" } \
)
TRACE_EVENT(cxl_aer_uncorrectable_error,
TP_PROTO(const char *dev_name, u32 status, u32 fe, u32 *hl),
TP_ARGS(dev_name, status, fe, hl),
TP_STRUCT__entry(
__string(dev_name, dev_name)
__field(u32, status)
__field(u32, first_error)
__array(u32, header_log, CXL_HEADERLOG_SIZE_U32)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__entry->status = status;
__entry->first_error = fe;
/*
* Embed the 512B headerlog data for user app retrieval and
* parsing, but no need to print this in the trace buffer.
*/
memcpy(__entry->header_log, hl, CXL_HEADERLOG_SIZE);
),
TP_printk("%s: status: '%s' first_error: '%s'",
__get_str(dev_name),
show_uc_errs(__entry->status),
show_uc_errs(__entry->first_error)
)
);
#define CXL_RAS_CE_CACHE_DATA_ECC BIT(0)
#define CXL_RAS_CE_MEM_DATA_ECC BIT(1)
#define CXL_RAS_CE_CRC_THRESH BIT(2)
#define CLX_RAS_CE_RETRY_THRESH BIT(3)
#define CXL_RAS_CE_CACHE_POISON BIT(4)
#define CXL_RAS_CE_MEM_POISON BIT(5)
#define CXL_RAS_CE_PHYS_LAYER_ERR BIT(6)
#define show_ce_errs(status) __print_flags(status, " | ", \
{ CXL_RAS_CE_CACHE_DATA_ECC, "Cache Data ECC Error" }, \
{ CXL_RAS_CE_MEM_DATA_ECC, "Memory Data ECC Error" }, \
{ CXL_RAS_CE_CRC_THRESH, "CRC Threshold Hit" }, \
{ CLX_RAS_CE_RETRY_THRESH, "Retry Threshold" }, \
{ CXL_RAS_CE_CACHE_POISON, "Received Cache Poison From Peer" }, \
{ CXL_RAS_CE_MEM_POISON, "Received Memory Poison From Peer" }, \
{ CXL_RAS_CE_PHYS_LAYER_ERR, "Received Error From Physical Layer" } \
)
TRACE_EVENT(cxl_aer_correctable_error,
TP_PROTO(const char *dev_name, u32 status),
TP_ARGS(dev_name, status),
TP_STRUCT__entry(
__string(dev_name, dev_name)
__field(u32, status)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__entry->status = status;
),
TP_printk("%s: status: '%s'",
__get_str(dev_name), show_ce_errs(__entry->status)
)
);
#endif /* _CXL_EVENTS_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE cxl
#include <trace/define_trace.h>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment