Commit 73e93150 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'cxl-fixes-6.10-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull cxl fixes from Dave Jiang:

 - Fix no cxl_nvd during pmem region auto-assemble

 - Avoid NULLL pointer dereference in region lookup

 - Add missing checks to interleave capability

 - Add cxl kdoc fix to address document compilation error

* tag 'cxl-fixes-6.10-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl:
  cxl: documentation: add missing files to cxl driver-api
  cxl/region: check interleave capability
  cxl/region: Avoid null pointer dereference in region lookup
  cxl/mem: Fix no cxl_nvd during pmem region auto-assembling
parents cfbc0ffe a0f39d51
......@@ -328,6 +328,12 @@ CXL Memory Device
.. kernel-doc:: drivers/cxl/mem.c
:doc: cxl mem
.. kernel-doc:: drivers/cxl/cxlmem.h
:internal:
.. kernel-doc:: drivers/cxl/core/memdev.c
:identifiers:
CXL Port
--------
.. kernel-doc:: drivers/cxl/port.c
......@@ -341,6 +347,15 @@ CXL Core
.. kernel-doc:: drivers/cxl/cxl.h
:internal:
.. kernel-doc:: drivers/cxl/core/hdm.c
:doc: cxl core hdm
.. kernel-doc:: drivers/cxl/core/hdm.c
:identifiers:
.. kernel-doc:: drivers/cxl/core/cdat.c
:identifiers:
.. kernel-doc:: drivers/cxl/core/port.c
:doc: cxl core
......
......@@ -52,6 +52,14 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
struct cxl_dport *dport = NULL;
int single_port_map[1];
unsigned long index;
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
/*
* Capability checks are moot for passthrough decoders, support
* any and all possibilities.
*/
cxlhdm->interleave_mask = ~0U;
cxlhdm->iw_cap_mask = ~0UL;
cxlsd = cxl_switch_decoder_alloc(port, 1);
if (IS_ERR(cxlsd))
......@@ -79,6 +87,11 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
cxlhdm->interleave_mask |= GENMASK(11, 8);
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
cxlhdm->interleave_mask |= GENMASK(14, 12);
cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
cxlhdm->iw_cap_mask |= BIT(16);
}
static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
......
......@@ -62,10 +62,14 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
return is_cxl_nvdimm_bridge(dev);
}
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
/**
* cxl_find_nvdimm_bridge() - find a bridge device relative to a port
* @port: any descendant port of an nvdimm-bridge associated
* root-cxl-port
*/
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port)
{
struct cxl_root *cxl_root __free(put_cxl_root) =
find_cxl_root(cxlmd->endpoint);
struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
struct device *dev;
if (!cxl_root)
......@@ -242,18 +246,20 @@ static void cxlmd_release_nvdimm(void *_cxlmd)
/**
* devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
* @parent_port: parent port for the (to be added) @cxlmd endpoint port
* @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
*
* Return: 0 on success negative error code on failure.
*/
int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
int devm_cxl_add_nvdimm(struct cxl_port *parent_port,
struct cxl_memdev *cxlmd)
{
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_nvdimm *cxl_nvd;
struct device *dev;
int rc;
cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
cxl_nvb = cxl_find_nvdimm_bridge(parent_port);
if (!cxl_nvb)
return -ENODEV;
......
......@@ -1101,6 +1101,26 @@ static int cxl_port_attach_region(struct cxl_port *port,
}
cxld = cxl_rr->decoder;
/*
* the number of targets should not exceed the target_count
* of the decoder
*/
if (is_switch_decoder(&cxld->dev)) {
struct cxl_switch_decoder *cxlsd;
cxlsd = to_cxl_switch_decoder(&cxld->dev);
if (cxl_rr->nr_targets > cxlsd->nr_targets) {
dev_dbg(&cxlr->dev,
"%s:%s %s add: %s:%s @ %d overflows targets: %d\n",
dev_name(port->uport_dev), dev_name(&port->dev),
dev_name(&cxld->dev), dev_name(&cxlmd->dev),
dev_name(&cxled->cxld.dev), pos,
cxlsd->nr_targets);
rc = -ENXIO;
goto out_erase;
}
}
rc = cxl_rr_ep_add(cxl_rr, cxled);
if (rc) {
dev_dbg(&cxlr->dev,
......@@ -1210,6 +1230,50 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled,
return 0;
}
static int check_interleave_cap(struct cxl_decoder *cxld, int iw, int ig)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
unsigned int interleave_mask;
u8 eiw;
u16 eig;
int high_pos, low_pos;
if (!test_bit(iw, &cxlhdm->iw_cap_mask))
return -ENXIO;
/*
* Per CXL specification r3.1(8.2.4.20.13 Decoder Protection),
* if eiw < 8:
* DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + 8 + eiw]
* DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
*
* when the eiw is 0, all the bits of HPAOFFSET[51: 0] are used, the
* interleave bits are none.
*
* if eiw >= 8:
* DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + eiw] / 3
* DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
*
* when the eiw is 8, all the bits of HPAOFFSET[51: 0] are used, the
* interleave bits are none.
*/
ways_to_eiw(iw, &eiw);
if (eiw == 0 || eiw == 8)
return 0;
granularity_to_eig(ig, &eig);
if (eiw > 8)
high_pos = eiw + eig - 1;
else
high_pos = eiw + eig + 7;
low_pos = eig + 8;
interleave_mask = GENMASK(high_pos, low_pos);
if (interleave_mask & ~cxlhdm->interleave_mask)
return -ENXIO;
return 0;
}
static int cxl_port_setup_targets(struct cxl_port *port,
struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled)
......@@ -1360,6 +1424,15 @@ static int cxl_port_setup_targets(struct cxl_port *port,
return -ENXIO;
}
} else {
rc = check_interleave_cap(cxld, iw, ig);
if (rc) {
dev_dbg(&cxlr->dev,
"%s:%s iw: %d ig: %d is not supported\n",
dev_name(port->uport_dev),
dev_name(&port->dev), iw, ig);
return rc;
}
cxld->interleave_ways = iw;
cxld->interleave_granularity = ig;
cxld->hpa_range = (struct range) {
......@@ -1796,6 +1869,15 @@ static int cxl_region_attach(struct cxl_region *cxlr,
struct cxl_dport *dport;
int rc = -ENXIO;
rc = check_interleave_cap(&cxled->cxld, p->interleave_ways,
p->interleave_granularity);
if (rc) {
dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n",
dev_name(&cxled->cxld.dev), p->interleave_ways,
p->interleave_granularity);
return rc;
}
if (cxled->mode != cxlr->mode) {
dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
......@@ -2688,22 +2770,33 @@ static int __cxl_dpa_to_region(struct device *dev, void *arg)
{
struct cxl_dpa_to_region_context *ctx = arg;
struct cxl_endpoint_decoder *cxled;
struct cxl_region *cxlr;
u64 dpa = ctx->dpa;
if (!is_endpoint_decoder(dev))
return 0;
cxled = to_cxl_endpoint_decoder(dev);
if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res))
return 0;
if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
return 0;
dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
dev_name(&cxled->cxld.region->dev));
/*
* Stop the region search (return 1) when an endpoint mapping is
* found. The region may not be fully constructed so offering
* the cxlr in the context structure is not guaranteed.
*/
cxlr = cxled->cxld.region;
if (cxlr)
dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
dev_name(&cxlr->dev));
else
dev_dbg(dev, "dpa:0x%llx mapped in endpoint:%s\n", dpa,
dev_name(dev));
ctx->cxlr = cxled->cxld.region;
ctx->cxlr = cxlr;
return 1;
}
......@@ -2847,7 +2940,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
* bridge for one device is the same for all.
*/
if (i == 0) {
cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint);
if (!cxl_nvb)
return -ENODEV;
cxlr->cxl_nvb = cxl_nvb;
......
......@@ -47,6 +47,8 @@ extern const struct nvdimm_security_ops *cxl_security_ops;
#define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4)
#define CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8)
#define CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9)
#define CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY BIT(11)
#define CXL_HDM_DECODER_INTERLEAVE_16_WAY BIT(12)
#define CXL_HDM_DECODER_CTRL_OFFSET 0x4
#define CXL_HDM_DECODER_ENABLE BIT(1)
#define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10)
......@@ -855,8 +857,8 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd);
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd);
int devm_cxl_add_nvdimm(struct cxl_port *parent_port, struct cxl_memdev *cxlmd);
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port);
#ifdef CONFIG_CXL_REGION
bool is_cxl_pmem_region(struct device *dev);
......
......@@ -395,9 +395,9 @@ enum cxl_devtype {
/**
* struct cxl_dpa_perf - DPA performance property entry
* @dpa_range - range for DPA address
* @coord - QoS performance data (i.e. latency, bandwidth)
* @qos_class - QoS Class cookies
* @dpa_range: range for DPA address
* @coord: QoS performance data (i.e. latency, bandwidth)
* @qos_class: QoS Class cookies
*/
struct cxl_dpa_perf {
struct range dpa_range;
......@@ -464,13 +464,14 @@ struct cxl_dev_state {
* @active_persistent_bytes: sum of hard + soft persistent
* @next_volatile_bytes: volatile capacity change pending device reset
* @next_persistent_bytes: persistent capacity change pending device reset
* @ram_perf: performance data entry matched to RAM partition
* @pmem_perf: performance data entry matched to PMEM partition
* @event: event log driver state
* @poison: poison driver state info
* @security: security driver state info
* @fw: firmware upload / activation state
* @mbox_wait: RCU wait for mbox send completely
* @mbox_send: @dev specific transport for transmitting mailbox commands
* @ram_perf: performance data entry matched to RAM partition
* @pmem_perf: performance data entry matched to PMEM partition
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
......@@ -851,11 +852,21 @@ static inline void cxl_mem_active_dec(void)
int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd);
/**
* struct cxl_hdm - HDM Decoder registers and cached / decoded capabilities
* @regs: mapped registers, see devm_cxl_setup_hdm()
* @decoder_count: number of decoders for this port
* @target_count: for switch decoders, max downstream port targets
* @interleave_mask: interleave granularity capability, see check_interleave_cap()
* @iw_cap_mask: bitmask of supported interleave ways, see check_interleave_cap()
* @port: mapped cxl_port, see devm_cxl_setup_hdm()
*/
struct cxl_hdm {
struct cxl_component_regs regs;
unsigned int decoder_count;
unsigned int target_count;
unsigned int interleave_mask;
unsigned long iw_cap_mask;
struct cxl_port *port;
};
......
......@@ -152,6 +152,15 @@ static int cxl_mem_probe(struct device *dev)
return -ENXIO;
}
if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) {
rc = devm_cxl_add_nvdimm(parent_port, cxlmd);
if (rc) {
if (rc == -ENODEV)
dev_info(dev, "PMEM disabled by platform\n");
return rc;
}
}
if (dport->rch)
endpoint_parent = parent_port->uport_dev;
else
......@@ -174,14 +183,6 @@ static int cxl_mem_probe(struct device *dev)
if (rc)
return rc;
if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) {
rc = devm_cxl_add_nvdimm(cxlmd);
if (rc == -ENODEV)
dev_info(dev, "PMEM disabled by platform\n");
else
return rc;
}
/*
* The kernel may be operating out of CXL memory on this device,
* there is no spec defined way to determine whether this device
......
......@@ -630,11 +630,15 @@ static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info)
{
struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
struct device *dev = &port->dev;
if (!cxlhdm)
return ERR_PTR(-ENOMEM);
cxlhdm->port = port;
cxlhdm->interleave_mask = ~0U;
cxlhdm->iw_cap_mask = ~0UL;
dev_set_drvdata(dev, cxlhdm);
return cxlhdm;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment