Commit 362f533a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'cxl-for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull CXL (Compute Express Link) updates from Dan Williams:
 "The highlight is initial support for CXL memory hotplug. The static
  NUMA node (ACPI SRAT Physical Address to Proximity Domain) information
  known to platform firmware is extended to support the potential
  performance-class / memory-target nodes dynamically created from
  available CXL memory device capacity.

  New unit test infrastructure is added for validating health
  information payloads.

  Fixes to module reload stress and stack usage from exposure in -next
  are included. A symbol rename and some other miscellaneous fixups are
  included as well.

  Summary:

   - Rework ACPI sub-table infrastructure to optionally be used outside
     of __init scenarios and use it for CEDT.CFMWS sub-table parsing.

   - Add support for extending num_possible_nodes by the potential
     hotplug CXL memory ranges

   - Extend tools/testing/cxl with mock memory device health information

   - Fix a module-reload workqueue race

   - Fix excessive stack-frame usage

   - Rename the driver context data structure from "cxl_mem" since that
     name collides with a proposed driver name

   - Use EXPORT_SYMBOL_NS_GPL instead of -DDEFAULT_SYMBOL_NAMESPACE at
     build time"

* tag 'cxl-for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl:
  cxl/core: Remove cxld_const_init in cxl_decoder_alloc()
  cxl/pmem: Fix module reload vs workqueue state
  ACPI: NUMA: Add a node and memblk for each CFMWS not in SRAT
  cxl/test: Mock acpi_table_parse_cedt()
  cxl/acpi: Convert CFMWS parsing to ACPI sub-table helpers
  ACPI: Add a context argument for table parsing handlers
  ACPI: Teach ACPI table parsing about the CEDT header format
  ACPI: Keep sub-table parsing infrastructure available for modules
  tools/testing/cxl: add mock output for the GET_HEALTH_INFO command
  cxl/memdev: Remove unused cxlmd field
  cxl/core: Convert to EXPORT_SYMBOL_NS_GPL
  cxl/memdev: Change cxl_mem to a more descriptive name
  cxl/mbox: Remove bad comment
  cxl/pmem: Fix reference counting for delayed work
parents 3acbdbf4 be185c29
......@@ -59,6 +59,9 @@ config ACPI_SYSTEM_POWER_STATES_SUPPORT
config ACPI_CCA_REQUIRED
bool
config ACPI_TABLE_LIB
bool
config ACPI_DEBUGGER
bool "AML debugger interface"
select ACPI_DEBUG
......
......@@ -297,6 +297,47 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
out_err:
return -EINVAL;
}
static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
void *arg, const unsigned long table_end)
{
struct acpi_cedt_cfmws *cfmws;
int *fake_pxm = arg;
u64 start, end;
int node;
cfmws = (struct acpi_cedt_cfmws *)header;
start = cfmws->base_hpa;
end = cfmws->base_hpa + cfmws->window_size;
/* Skip if the SRAT already described the NUMA details for this HPA */
node = phys_to_target_node(start);
if (node != NUMA_NO_NODE)
return 0;
node = acpi_map_pxm_to_node(*fake_pxm);
if (node == NUMA_NO_NODE) {
pr_err("ACPI NUMA: Too many proximity domains while processing CFMWS.\n");
return -EINVAL;
}
if (numa_add_memblk(node, start, end) < 0) {
/* CXL driver must handle the NUMA_NO_NODE case */
pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
node, start, end);
}
/* Set the next available fake_pxm value */
(*fake_pxm)++;
return 0;
}
#else
static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
void *arg, const unsigned long table_end)
{
return 0;
}
#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
static int __init acpi_parse_slit(struct acpi_table_header *table)
......@@ -441,7 +482,7 @@ acpi_table_parse_srat(enum acpi_srat_type id,
int __init acpi_numa_init(void)
{
int cnt = 0;
int i, fake_pxm, cnt = 0;
if (acpi_disabled)
return -EINVAL;
......@@ -477,6 +518,22 @@ int __init acpi_numa_init(void)
/* SLIT: System Locality Information Table */
acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
/*
* CXL Fixed Memory Window Structures (CFMWS) must be parsed
* after the SRAT. Create NUMA Nodes for CXL memory ranges that
* are defined in the CFMWS and not already defined in the SRAT.
* Initialize a fake_pxm as the first available PXM to emulate.
*/
/* fake_pxm is the next unused PXM value after SRAT parsing */
for (i = 0, fake_pxm = -1; i < MAX_NUMNODES - 1; i++) {
if (node_to_pxm_map[i] > fake_pxm)
fake_pxm = node_to_pxm_map[i];
}
fake_pxm++;
acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, acpi_parse_cfmws,
&fake_pxm);
if (cnt < 0)
return cnt;
else if (!parsed_numa_memblks)
......
......@@ -35,12 +35,13 @@ static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" };
static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
static int acpi_apic_instance __initdata;
static int acpi_apic_instance __initdata_or_acpilib;
enum acpi_subtable_type {
ACPI_SUBTABLE_COMMON,
ACPI_SUBTABLE_HMAT,
ACPI_SUBTABLE_PRMT,
ACPI_SUBTABLE_CEDT,
};
struct acpi_subtable_entry {
......@@ -52,7 +53,7 @@ struct acpi_subtable_entry {
* Disable table checksum verification for the early stage due to the size
* limitation of the current x86 early mapping implementation.
*/
static bool acpi_verify_table_checksum __initdata = false;
static bool acpi_verify_table_checksum __initdata_or_acpilib = false;
void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
......@@ -216,7 +217,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
}
}
static unsigned long __init
static unsigned long __init_or_acpilib
acpi_get_entry_type(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
......@@ -226,11 +227,13 @@ acpi_get_entry_type(struct acpi_subtable_entry *entry)
return entry->hdr->hmat.type;
case ACPI_SUBTABLE_PRMT:
return 0;
case ACPI_SUBTABLE_CEDT:
return entry->hdr->cedt.type;
}
return 0;
}
static unsigned long __init
static unsigned long __init_or_acpilib
acpi_get_entry_length(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
......@@ -240,11 +243,13 @@ acpi_get_entry_length(struct acpi_subtable_entry *entry)
return entry->hdr->hmat.length;
case ACPI_SUBTABLE_PRMT:
return entry->hdr->prmt.length;
case ACPI_SUBTABLE_CEDT:
return entry->hdr->cedt.length;
}
return 0;
}
static unsigned long __init
static unsigned long __init_or_acpilib
acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
......@@ -254,20 +259,40 @@ acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
return sizeof(entry->hdr->hmat);
case ACPI_SUBTABLE_PRMT:
return sizeof(entry->hdr->prmt);
case ACPI_SUBTABLE_CEDT:
return sizeof(entry->hdr->cedt);
}
return 0;
}
static enum acpi_subtable_type __init
static enum acpi_subtable_type __init_or_acpilib
acpi_get_subtable_type(char *id)
{
if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
return ACPI_SUBTABLE_HMAT;
if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
return ACPI_SUBTABLE_PRMT;
if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
return ACPI_SUBTABLE_CEDT;
return ACPI_SUBTABLE_COMMON;
}
static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc)
{
return proc->handler || proc->handler_arg;
}
static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
union acpi_subtable_headers *hdr,
unsigned long end)
{
if (proc->handler)
return proc->handler(hdr, end);
if (proc->handler_arg)
return proc->handler_arg(hdr, proc->arg, end);
return -EINVAL;
}
/**
* acpi_parse_entries_array - for each proc_num find a suitable subtable
*
......@@ -291,10 +316,10 @@ acpi_get_subtable_type(char *id)
* On success returns sum of all matching entries for all proc handlers.
* Otherwise, -ENODEV or -EINVAL is returned.
*/
static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
struct acpi_table_header *table_header,
struct acpi_subtable_proc *proc, int proc_num,
unsigned int max_entries)
static int __init_or_acpilib acpi_parse_entries_array(
char *id, unsigned long table_size,
struct acpi_table_header *table_header, struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries)
{
struct acpi_subtable_entry entry;
unsigned long table_end, subtable_len, entry_len;
......@@ -318,8 +343,9 @@ static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
for (i = 0; i < proc_num; i++) {
if (acpi_get_entry_type(&entry) != proc[i].id)
continue;
if (!proc[i].handler ||
(!errs && proc[i].handler(entry.hdr, table_end))) {
if (!has_handler(&proc[i]) ||
(!errs &&
call_handler(&proc[i], entry.hdr, table_end))) {
errs++;
continue;
}
......@@ -352,10 +378,9 @@ static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
return errs ? -EINVAL : count;
}
int __init acpi_table_parse_entries_array(char *id,
unsigned long table_size,
struct acpi_subtable_proc *proc, int proc_num,
unsigned int max_entries)
int __init_or_acpilib acpi_table_parse_entries_array(
char *id, unsigned long table_size, struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries)
{
struct acpi_table_header *table_header = NULL;
int count;
......@@ -386,21 +411,41 @@ int __init acpi_table_parse_entries_array(char *id,
return count;
}
int __init acpi_table_parse_entries(char *id,
unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
unsigned int max_entries)
static int __init_or_acpilib __acpi_table_parse_entries(
char *id, unsigned long table_size, int entry_id,
acpi_tbl_entry_handler handler, acpi_tbl_entry_handler_arg handler_arg,
void *arg, unsigned int max_entries)
{
struct acpi_subtable_proc proc = {
.id = entry_id,
.handler = handler,
.handler_arg = handler_arg,
.arg = arg,
};
return acpi_table_parse_entries_array(id, table_size, &proc, 1,
max_entries);
}
int __init_or_acpilib
acpi_table_parse_cedt(enum acpi_cedt_type id,
acpi_tbl_entry_handler_arg handler_arg, void *arg)
{
return __acpi_table_parse_entries(ACPI_SIG_CEDT,
sizeof(struct acpi_table_cedt), id,
NULL, handler_arg, arg, 0);
}
EXPORT_SYMBOL_ACPI_LIB(acpi_table_parse_cedt);
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
unsigned int max_entries)
{
return __acpi_table_parse_entries(id, table_size, entry_id, handler,
NULL, NULL, max_entries);
}
int __init acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler, unsigned int max_entries)
{
......
......@@ -51,6 +51,7 @@ config CXL_ACPI
tristate "CXL ACPI: Platform Support"
depends on ACPI
default CXL_BUS
select ACPI_TABLE_LIB
help
Enable support for host managed device memory (HDM) resources
published by a platform's ACPI CXL memory layout description. See
......
......@@ -8,8 +8,6 @@
#include <linux/pci.h>
#include "cxl.h"
static struct acpi_table_header *acpi_cedt;
/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
#define CFMWS_INTERLEAVE_WAYS(x) (1 << (x)->interleave_ways)
#define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8)
......@@ -74,64 +72,47 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
return 0;
}
static void cxl_add_cfmws_decoders(struct device *dev,
struct cxl_port *root_port)
struct cxl_cfmws_context {
struct device *dev;
struct cxl_port *root_port;
};
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_cfmws_context *ctx = arg;
struct cxl_port *root_port = ctx->root_port;
struct device *dev = ctx->dev;
struct acpi_cedt_cfmws *cfmws;
struct cxl_decoder *cxld;
acpi_size len, cur = 0;
void *cedt_subtable;
int rc;
len = acpi_cedt->length - sizeof(*acpi_cedt);
cedt_subtable = acpi_cedt + 1;
while (cur < len) {
struct acpi_cedt_header *c = cedt_subtable + cur;
int i;
if (c->type != ACPI_CEDT_TYPE_CFMWS) {
cur += c->length;
continue;
}
int rc, i;
cfmws = cedt_subtable + cur;
if (cfmws->header.length < sizeof(*cfmws)) {
dev_warn_once(dev,
"CFMWS entry skipped:invalid length:%u\n",
cfmws->header.length);
cur += c->length;
continue;
}
cfmws = (struct acpi_cedt_cfmws *) header;
rc = cxl_acpi_cfmws_verify(dev, cfmws);
if (rc) {
dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
cfmws->base_hpa, cfmws->base_hpa +
cfmws->window_size - 1);
cur += c->length;
continue;
cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
return 0;
}
for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
target_map[i] = cfmws->interleave_targets[i];
cxld = cxl_decoder_alloc(root_port,
CFMWS_INTERLEAVE_WAYS(cfmws));
cxld = cxl_decoder_alloc(root_port, CFMWS_INTERLEAVE_WAYS(cfmws));
if (IS_ERR(cxld))
goto next;
return 0;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
cxld->target_type = CXL_DECODER_EXPANDER;
cxld->range = (struct range) {
cxld->range = (struct range){
.start = cfmws->base_hpa,
.end = cfmws->base_hpa + cfmws->window_size - 1,
};
cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
cxld->interleave_granularity =
CFMWS_INTERLEAVE_GRANULARITY(cfmws);
cxld->interleave_granularity = CFMWS_INTERLEAVE_GRANULARITY(cfmws);
rc = cxl_decoder_add(cxld, target_map);
if (rc)
......@@ -140,68 +121,15 @@ static void cxl_add_cfmws_decoders(struct device *dev,
rc = cxl_decoder_autoremove(dev, cxld);
if (rc) {
dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",
cfmws->base_hpa, cfmws->base_hpa +
cfmws->window_size - 1);
goto next;
}
dev_dbg(dev, "add: %s range %#llx-%#llx\n",
dev_name(&cxld->dev), cfmws->base_hpa,
cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
next:
cur += c->length;
}
}
static struct acpi_cedt_chbs *cxl_acpi_match_chbs(struct device *dev, u32 uid)
{
struct acpi_cedt_chbs *chbs, *chbs_match = NULL;
acpi_size len, cur = 0;
void *cedt_subtable;
len = acpi_cedt->length - sizeof(*acpi_cedt);
cedt_subtable = acpi_cedt + 1;
while (cur < len) {
struct acpi_cedt_header *c = cedt_subtable + cur;
if (c->type != ACPI_CEDT_TYPE_CHBS) {
cur += c->length;
continue;
}
chbs = cedt_subtable + cur;
if (chbs->header.length < sizeof(*chbs)) {
dev_warn_once(dev,
"CHBS entry skipped: invalid length:%u\n",
chbs->header.length);
cur += c->length;
continue;
}
if (chbs->uid != uid) {
cur += c->length;
continue;
}
if (chbs_match) {
dev_warn_once(dev,
"CHBS entry skipped: duplicate UID:%u\n",
uid);
cur += c->length;
continue;
}
chbs_match = chbs;
cur += c->length;
return 0;
}
dev_dbg(dev, "add: %s node: %d range %#llx-%#llx\n",
dev_name(&cxld->dev), phys_to_target_node(cxld->range.start),
cfmws->base_hpa, cfmws->base_hpa + cfmws->window_size - 1);
return chbs_match ? chbs_match : ERR_PTR(-ENODEV);
}
static resource_size_t get_chbcr(struct acpi_cedt_chbs *chbs)
{
return IS_ERR(chbs) ? CXL_RESOURCE_NONE : chbs->base;
return 0;
}
__mock int match_add_root_ports(struct pci_dev *pdev, void *data)
......@@ -355,12 +283,36 @@ static int add_host_bridge_uport(struct device *match, void *arg)
return rc;
}
struct cxl_chbs_context {
struct device *dev;
unsigned long long uid;
resource_size_t chbcr;
};
static int cxl_get_chbcr(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
struct cxl_chbs_context *ctx = arg;
struct acpi_cedt_chbs *chbs;
if (ctx->chbcr)
return 0;
chbs = (struct acpi_cedt_chbs *) header;
if (ctx->uid != chbs->uid)
return 0;
ctx->chbcr = chbs->base;
return 0;
}
static int add_host_bridge_dport(struct device *match, void *arg)
{
int rc;
acpi_status status;
unsigned long long uid;
struct acpi_cedt_chbs *chbs;
struct cxl_chbs_context ctx;
struct cxl_port *root_port = arg;
struct device *host = root_port->dev.parent;
struct acpi_device *bridge = to_cxl_host_bridge(host, match);
......@@ -376,14 +328,19 @@ static int add_host_bridge_dport(struct device *match, void *arg)
return -ENODEV;
}
chbs = cxl_acpi_match_chbs(host, uid);
if (IS_ERR(chbs)) {
ctx = (struct cxl_chbs_context) {
.dev = host,
.uid = uid,
};
acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbcr, &ctx);
if (ctx.chbcr == 0) {
dev_warn(host, "No CHBS found for Host Bridge: %s\n",
dev_name(match));
return 0;
}
rc = cxl_add_dport(root_port, match, uid, get_chbcr(chbs));
rc = cxl_add_dport(root_port, match, uid, ctx.chbcr);
if (rc) {
dev_err(host, "failed to add downstream port: %s\n",
dev_name(match));
......@@ -417,40 +374,29 @@ static int add_root_nvdimm_bridge(struct device *match, void *data)
return 1;
}
static u32 cedt_instance(struct platform_device *pdev)
{
const bool *native_acpi0017 = acpi_device_get_match_data(&pdev->dev);
if (native_acpi0017 && *native_acpi0017)
return 0;
/* for cxl_test request a non-canonical instance */
return U32_MAX;
}
static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
acpi_status status;
struct cxl_port *root_port;
struct device *host = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(host);
struct cxl_cfmws_context ctx;
root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
if (IS_ERR(root_port))
return PTR_ERR(root_port);
dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));
status = acpi_get_table(ACPI_SIG_CEDT, cedt_instance(pdev), &acpi_cedt);
if (ACPI_FAILURE(status))
return -ENXIO;
rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
add_host_bridge_dport);
if (rc)
goto out;
if (rc < 0)
return rc;
cxl_add_cfmws_decoders(host, root_port);
ctx = (struct cxl_cfmws_context) {
.dev = host,
.root_port = root_port,
};
acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
/*
* Root level scanned with host-bridge as dports, now scan host-bridges
......@@ -458,24 +404,20 @@ static int cxl_acpi_probe(struct platform_device *pdev)
*/
rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
add_host_bridge_uport);
if (rc)
goto out;
if (rc < 0)
return rc;
if (IS_ENABLED(CONFIG_CXL_PMEM))
rc = device_for_each_child(&root_port->dev, root_port,
add_root_nvdimm_bridge);
out:
acpi_put_table(acpi_cedt);
if (rc < 0)
return rc;
return 0;
}
static bool native_acpi0017 = true;
static const struct acpi_device_id cxl_acpi_ids[] = {
{ "ACPI0017", (unsigned long) &native_acpi0017 },
{ "ACPI0017" },
{ },
};
MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
......@@ -491,3 +433,4 @@ static struct platform_driver cxl_acpi_driver = {
module_platform_driver(cxl_acpi_driver);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
MODULE_IMPORT_NS(ACPI);
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CXL_BUS) += cxl_core.o
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl
ccflags-y += -I$(srctree)/drivers/cxl
cxl_core-y := bus.o
cxl_core-y += pmem.o
cxl_core-y += regs.o
......
......@@ -200,7 +200,7 @@ bool is_root_decoder(struct device *dev)
{
return dev->type == &cxl_decoder_root_type;
}
EXPORT_SYMBOL_GPL(is_root_decoder);
EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
struct cxl_decoder *to_cxl_decoder(struct device *dev)
{
......@@ -209,7 +209,7 @@ struct cxl_decoder *to_cxl_decoder(struct device *dev)
return NULL;
return container_of(dev, struct cxl_decoder, dev);
}
EXPORT_SYMBOL_GPL(to_cxl_decoder);
EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
static void cxl_dport_release(struct cxl_dport *dport)
{
......@@ -376,7 +376,7 @@ struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
put_device(dev);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(devm_cxl_add_port);
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
......@@ -451,7 +451,7 @@ int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id,
cxl_dport_release(dport);
return rc;
}
EXPORT_SYMBOL_GPL(cxl_add_dport);
EXPORT_SYMBOL_NS_GPL(cxl_add_dport, CXL);
static int decoder_populate_targets(struct cxl_decoder *cxld,
struct cxl_port *port, int *target_map)
......@@ -485,9 +485,7 @@ static int decoder_populate_targets(struct cxl_decoder *cxld,
struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets)
{
struct cxl_decoder *cxld, cxld_const_init = {
.nr_targets = nr_targets,
};
struct cxl_decoder *cxld;
struct device *dev;
int rc = 0;
......@@ -497,13 +495,13 @@ struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets)
cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
if (!cxld)
return ERR_PTR(-ENOMEM);
memcpy(cxld, &cxld_const_init, sizeof(cxld_const_init));
rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
if (rc < 0)
goto err;
cxld->id = rc;
cxld->nr_targets = nr_targets;
dev = &cxld->dev;
device_initialize(dev);
device_set_pm_not_required(dev);
......@@ -521,7 +519,7 @@ struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets)
kfree(cxld);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(cxl_decoder_alloc);
EXPORT_SYMBOL_NS_GPL(cxl_decoder_alloc, CXL);
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
{
......@@ -550,7 +548,7 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
return device_add(dev);
}
EXPORT_SYMBOL_GPL(cxl_decoder_add);
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
static void cxld_unregister(void *dev)
{
......@@ -561,7 +559,7 @@ int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
{
return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
}
EXPORT_SYMBOL_GPL(cxl_decoder_autoremove);
EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
/**
* __cxl_driver_register - register a driver for the cxl bus
......@@ -594,13 +592,13 @@ int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
return driver_register(&cxl_drv->drv);
}
EXPORT_SYMBOL_GPL(__cxl_driver_register);
EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
void cxl_driver_unregister(struct cxl_driver *cxl_drv)
{
driver_unregister(&cxl_drv->drv);
}
EXPORT_SYMBOL_GPL(cxl_driver_unregister);
EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
static int cxl_device_id(struct device *dev)
{
......@@ -642,7 +640,7 @@ struct bus_type cxl_bus_type = {
.probe = cxl_bus_probe,
.remove = cxl_bus_remove,
};
EXPORT_SYMBOL_GPL(cxl_bus_type);
EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
static __init int cxl_core_init(void)
{
......
This diff is collapsed.
......@@ -37,9 +37,9 @@ static ssize_t firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version);
}
static DEVICE_ATTR_RO(firmware_version);
......@@ -47,9 +47,9 @@ static ssize_t payload_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
return sysfs_emit(buf, "%zu\n", cxlds->payload_size);
}
static DEVICE_ATTR_RO(payload_max);
......@@ -57,9 +57,9 @@ static ssize_t label_storage_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
return sysfs_emit(buf, "%zu\n", cxlds->lsa_size);
}
static DEVICE_ATTR_RO(label_storage_size);
......@@ -67,8 +67,8 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
unsigned long long len = range_len(&cxlm->ram_range);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
unsigned long long len = range_len(&cxlds->ram_range);
return sysfs_emit(buf, "%#llx\n", len);
}
......@@ -80,8 +80,8 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
unsigned long long len = range_len(&cxlm->pmem_range);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
unsigned long long len = range_len(&cxlds->pmem_range);
return sysfs_emit(buf, "%#llx\n", len);
}
......@@ -136,42 +136,42 @@ static const struct device_type cxl_memdev_type = {
/**
* set_exclusive_cxl_commands() - atomically disable user cxl commands
* @cxlm: cxl_mem instance to modify
* @cxlds: The device state to operate on
* @cmds: bitmap of commands to mark exclusive
*
* Grab the cxl_memdev_rwsem in write mode to flush in-flight
* invocations of the ioctl path and then disable future execution of
* commands with the command ids set in @cmds.
*/
void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
{
down_write(&cxl_memdev_rwsem);
bitmap_or(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
EXPORT_SYMBOL_GPL(set_exclusive_cxl_commands);
EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
/**
* clear_exclusive_cxl_commands() - atomically enable user cxl commands
* @cxlm: cxl_mem instance to modify
* @cxlds: The device state to modify
* @cmds: bitmap of commands to mark available for userspace
*/
void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
{
down_write(&cxl_memdev_rwsem);
bitmap_andnot(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
EXPORT_SYMBOL_GPL(clear_exclusive_cxl_commands);
EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
static void cxl_memdev_shutdown(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
down_write(&cxl_memdev_rwsem);
cxlmd->cxlm = NULL;
cxlmd->cxlds = NULL;
up_write(&cxl_memdev_rwsem);
}
......@@ -185,7 +185,7 @@ static void cxl_memdev_unregister(void *_cxlmd)
put_device(dev);
}
static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
const struct file_operations *fops)
{
struct cxl_memdev *cxlmd;
......@@ -204,7 +204,7 @@ static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
dev = &cxlmd->dev;
device_initialize(dev);
dev->parent = cxlm->dev;
dev->parent = cxlds->dev;
dev->bus = &cxl_bus_type;
dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
dev->type = &cxl_memdev_type;
......@@ -239,7 +239,7 @@ static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
int rc = -ENXIO;
down_read(&cxl_memdev_rwsem);
if (cxlmd->cxlm)
if (cxlmd->cxlds)
rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
up_read(&cxl_memdev_rwsem);
......@@ -276,15 +276,14 @@ static const struct file_operations cxl_memdev_fops = {
.llseek = noop_llseek,
};
struct cxl_memdev *
devm_cxl_add_memdev(struct cxl_mem *cxlm)
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
{
struct cxl_memdev *cxlmd;
struct device *dev;
struct cdev *cdev;
int rc;
cxlmd = cxl_memdev_alloc(cxlm, &cxl_memdev_fops);
cxlmd = cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
if (IS_ERR(cxlmd))
return cxlmd;
......@@ -297,14 +296,14 @@ devm_cxl_add_memdev(struct cxl_mem *cxlm)
* Activate ioctl operations, no cxl_memdev_rwsem manipulation
* needed as this is ordered with cdev_add() publishing the device.
*/
cxlmd->cxlm = cxlm;
cxlmd->cxlds = cxlds;
cdev = &cxlmd->cdev;
rc = cdev_device_add(cdev, dev);
if (rc)
goto err;
rc = devm_add_action_or_reset(cxlm->dev, cxl_memdev_unregister, cxlmd);
rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
if (rc)
return ERR_PTR(rc);
return cxlmd;
......@@ -318,7 +317,7 @@ devm_cxl_add_memdev(struct cxl_mem *cxlm)
put_device(dev);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(devm_cxl_add_memdev);
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
__init int cxl_memdev_init(void)
{
......
......@@ -49,12 +49,18 @@ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
return NULL;
return container_of(dev, struct cxl_nvdimm_bridge, dev);
}
EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, CXL);
__mock int match_nvdimm_bridge(struct device *dev, const void *data)
bool is_cxl_nvdimm_bridge(struct device *dev)
{
return dev->type == &cxl_nvdimm_bridge_type;
}
EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, CXL);
__mock int match_nvdimm_bridge(struct device *dev, const void *data)
{
return is_cxl_nvdimm_bridge(dev);
}
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
{
......@@ -65,7 +71,7 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
return NULL;
return to_cxl_nvdimm_bridge(dev);
}
EXPORT_SYMBOL_GPL(cxl_find_nvdimm_bridge);
EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL);
static struct cxl_nvdimm_bridge *
cxl_nvdimm_bridge_alloc(struct cxl_port *port)
......@@ -167,7 +173,7 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
put_device(dev);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, CXL);
static void cxl_nvdimm_release(struct device *dev)
{
......@@ -191,7 +197,7 @@ bool is_cxl_nvdimm(struct device *dev)
{
return dev->type == &cxl_nvdimm_type;
}
EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, CXL);
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
{
......@@ -200,7 +206,7 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
return NULL;
return container_of(dev, struct cxl_nvdimm, dev);
}
EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
{
......@@ -262,4 +268,4 @@ int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
put_device(dev);
return rc;
}
EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL);
......@@ -90,7 +90,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
}
}
}
EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs, CXL);
/**
* cxl_probe_device_regs() - Detect CXL Device register blocks
......@@ -156,7 +156,7 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base,
}
}
}
EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, CXL);
static void __iomem *devm_cxl_iomap_block(struct device *dev,
resource_size_t addr,
......@@ -199,7 +199,7 @@ int cxl_map_component_regs(struct pci_dev *pdev,
return 0;
}
EXPORT_SYMBOL_GPL(cxl_map_component_regs);
EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
int cxl_map_device_regs(struct pci_dev *pdev,
struct cxl_device_regs *regs,
......@@ -246,4 +246,4 @@ int cxl_map_device_regs(struct pci_dev *pdev,
return 0;
}
EXPORT_SYMBOL_GPL(cxl_map_device_regs);
EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
......@@ -191,11 +191,18 @@ struct cxl_decoder {
int interleave_granularity;
enum cxl_decoder_type target_type;
unsigned long flags;
const int nr_targets;
int nr_targets;
struct cxl_dport *target[];
};
/**
* enum cxl_nvdimm_brige_state - state machine for managing bus rescans
* @CXL_NVB_NEW: Set at bridge create and after cxl_pmem_wq is destroyed
* @CXL_NVB_DEAD: Set at brige unregistration to preclude async probing
* @CXL_NVB_ONLINE: Target state after successful ->probe()
* @CXL_NVB_OFFLINE: Target state after ->remove() or failed ->probe()
*/
enum cxl_nvdimm_brige_state {
CXL_NVB_NEW,
CXL_NVB_DEAD,
......@@ -308,6 +315,7 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_port *port);
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd);
......
......@@ -33,13 +33,13 @@
* struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
* @dev: driver core device object
* @cdev: char dev core object for ioctl operations
* @cxlm: pointer to the parent device driver data
* @cxlds: The device state backing this device
* @id: id number of this memdev instance.
*/
struct cxl_memdev {
struct device dev;
struct cdev cdev;
struct cxl_mem *cxlm;
struct cxl_dev_state *cxlds;
int id;
};
......@@ -48,7 +48,7 @@ static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
return container_of(dev, struct cxl_memdev, dev);
}
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_mem *cxlm);
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
/**
* struct cxl_mbox_cmd - A command to be submitted to hardware.
......@@ -90,9 +90,13 @@ struct cxl_mbox_cmd {
#define CXL_CAPACITY_MULTIPLIER SZ_256M
/**
* struct cxl_mem - A CXL memory device
* @dev: The device associated with this CXL device.
* @cxlmd: Logical memory device chardev / interface
* struct cxl_dev_state - The driver device state
*
* cxl_dev_state represents the CXL driver/device state. It provides an
* interface to mailbox commands as well as some cached data about the device.
* Currently only memory devices are represented.
*
* @dev: The device associated with this CXL state
* @regs: Parsed register blocks
* @payload_size: Size of space for payload
* (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
......@@ -117,9 +121,8 @@ struct cxl_mbox_cmd {
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
*/
struct cxl_mem {
struct cxl_dev_state {
struct device *dev;
struct cxl_memdev *cxlmd;
struct cxl_regs regs;
......@@ -142,7 +145,7 @@ struct cxl_mem {
u64 next_volatile_bytes;
u64 next_persistent_bytes;
int (*mbox_send)(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd);
int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
};
enum cxl_opcode {
......@@ -253,12 +256,12 @@ struct cxl_mem_command {
#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
};
int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
size_t in_size, void *out, size_t out_size);
int cxl_mem_identify(struct cxl_mem *cxlm);
int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm);
int cxl_mem_create_range_info(struct cxl_mem *cxlm);
struct cxl_mem *cxl_mem_create(struct device *dev);
void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
int cxl_dev_state_identify(struct cxl_dev_state *cxlds);
int cxl_enumerate_cmds(struct cxl_dev_state *cxlds);
int cxl_mem_create_range_info(struct cxl_dev_state *cxlds);
struct cxl_dev_state *cxl_dev_state_create(struct device *dev);
void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
#endif /* __CXL_MEM_H__ */
This diff is collapsed.
......@@ -19,9 +19,9 @@ static struct workqueue_struct *cxl_pmem_wq;
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
static void clear_exclusive(void *cxlm)
static void clear_exclusive(void *cxlds)
{
clear_exclusive_cxl_commands(cxlm, exclusive_cmds);
clear_exclusive_cxl_commands(cxlds, exclusive_cmds);
}
static void unregister_nvdimm(void *nvdimm)
......@@ -34,7 +34,7 @@ static int cxl_nvdimm_probe(struct device *dev)
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
unsigned long flags = 0, cmd_mask = 0;
struct cxl_mem *cxlm = cxlmd->cxlm;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_nvdimm_bridge *cxl_nvb;
struct nvdimm *nvdimm;
int rc;
......@@ -49,8 +49,8 @@ static int cxl_nvdimm_probe(struct device *dev)
goto out;
}
set_exclusive_cxl_commands(cxlm, exclusive_cmds);
rc = devm_add_action_or_reset(dev, clear_exclusive, cxlm);
set_exclusive_cxl_commands(cxlds, exclusive_cmds);
rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds);
if (rc)
goto out;
......@@ -80,7 +80,7 @@ static struct cxl_driver cxl_nvdimm_driver = {
.id = CXL_DEVICE_NVDIMM,
};
static int cxl_pmem_get_config_size(struct cxl_mem *cxlm,
static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds,
struct nd_cmd_get_config_size *cmd,
unsigned int buf_len)
{
......@@ -88,14 +88,14 @@ static int cxl_pmem_get_config_size(struct cxl_mem *cxlm,
return -EINVAL;
*cmd = (struct nd_cmd_get_config_size) {
.config_size = cxlm->lsa_size,
.max_xfer = cxlm->payload_size,
.config_size = cxlds->lsa_size,
.max_xfer = cxlds->payload_size,
};
return 0;
}
static int cxl_pmem_get_config_data(struct cxl_mem *cxlm,
static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
struct nd_cmd_get_config_data_hdr *cmd,
unsigned int buf_len)
{
......@@ -112,15 +112,14 @@ static int cxl_pmem_get_config_data(struct cxl_mem *cxlm,
.length = cmd->in_length,
};
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LSA, &get_lsa,
sizeof(get_lsa), cmd->out_buf,
cmd->in_length);
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa,
sizeof(get_lsa), cmd->out_buf, cmd->in_length);
cmd->status = 0;
return rc;
}
static int cxl_pmem_set_config_data(struct cxl_mem *cxlm,
static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
struct nd_cmd_set_config_hdr *cmd,
unsigned int buf_len)
{
......@@ -144,7 +143,7 @@ static int cxl_pmem_set_config_data(struct cxl_mem *cxlm,
};
memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_SET_LSA, set_lsa,
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_LSA, set_lsa,
struct_size(set_lsa, data, cmd->in_length),
NULL, 0);
......@@ -164,18 +163,18 @@ static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_mem *cxlm = cxlmd->cxlm;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
if (!test_bit(cmd, &cmd_mask))
return -ENOTTY;
switch (cmd) {
case ND_CMD_GET_CONFIG_SIZE:
return cxl_pmem_get_config_size(cxlm, buf, buf_len);
return cxl_pmem_get_config_size(cxlds, buf, buf_len);
case ND_CMD_GET_CONFIG_DATA:
return cxl_pmem_get_config_data(cxlm, buf, buf_len);
return cxl_pmem_get_config_data(cxlds, buf, buf_len);
case ND_CMD_SET_CONFIG_DATA:
return cxl_pmem_set_config_data(cxlm, buf, buf_len);
return cxl_pmem_set_config_data(cxlds, buf, buf_len);
default:
return -ENOTTY;
}
......@@ -266,14 +265,24 @@ static void cxl_nvb_update_state(struct work_struct *work)
put_device(&cxl_nvb->dev);
}
static void cxl_nvdimm_bridge_state_work(struct cxl_nvdimm_bridge *cxl_nvb)
{
/*
* Take a reference that the workqueue will drop if new work
* gets queued.
*/
get_device(&cxl_nvb->dev);
if (!queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
put_device(&cxl_nvb->dev);
}
static void cxl_nvdimm_bridge_remove(struct device *dev)
{
struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
if (cxl_nvb->state == CXL_NVB_ONLINE)
cxl_nvb->state = CXL_NVB_OFFLINE;
if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
get_device(&cxl_nvb->dev);
cxl_nvdimm_bridge_state_work(cxl_nvb);
}
static int cxl_nvdimm_bridge_probe(struct device *dev)
......@@ -294,8 +303,7 @@ static int cxl_nvdimm_bridge_probe(struct device *dev)
}
cxl_nvb->state = CXL_NVB_ONLINE;
if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
get_device(&cxl_nvb->dev);
cxl_nvdimm_bridge_state_work(cxl_nvb);
return 0;
}
......@@ -307,6 +315,31 @@ static struct cxl_driver cxl_nvdimm_bridge_driver = {
.id = CXL_DEVICE_NVDIMM_BRIDGE,
};
/*
* Return all bridges to the CXL_NVB_NEW state to invalidate any
* ->state_work referring to the now destroyed cxl_pmem_wq.
*/
static int cxl_nvdimm_bridge_reset(struct device *dev, void *data)
{
struct cxl_nvdimm_bridge *cxl_nvb;
if (!is_cxl_nvdimm_bridge(dev))
return 0;
cxl_nvb = to_cxl_nvdimm_bridge(dev);
device_lock(dev);
cxl_nvb->state = CXL_NVB_NEW;
device_unlock(dev);
return 0;
}
static void destroy_cxl_pmem_wq(void)
{
destroy_workqueue(cxl_pmem_wq);
bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_bridge_reset);
}
static __init int cxl_pmem_init(void)
{
int rc;
......@@ -332,7 +365,7 @@ static __init int cxl_pmem_init(void)
err_nvdimm:
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
err_bridge:
destroy_workqueue(cxl_pmem_wq);
destroy_cxl_pmem_wq();
return rc;
}
......@@ -340,7 +373,7 @@ static __exit void cxl_pmem_exit(void)
{
cxl_driver_unregister(&cxl_nvdimm_driver);
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
destroy_workqueue(cxl_pmem_wq);
destroy_cxl_pmem_wq();
}
MODULE_LICENSE("GPL v2");
......
......@@ -133,6 +133,7 @@ union acpi_subtable_headers {
struct acpi_subtable_header common;
struct acpi_hmat_structure hmat;
struct acpi_prmt_module_header prmt;
struct acpi_cedt_header cedt;
};
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
......@@ -140,6 +141,9 @@ typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
const unsigned long end);
typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
void *arg, const unsigned long end);
/* Debugger support */
struct acpi_debugger_ops {
......@@ -216,6 +220,8 @@ static inline int acpi_debugger_notify_command_complete(void)
struct acpi_subtable_proc {
int id;
acpi_tbl_entry_handler handler;
acpi_tbl_entry_handler_arg handler_arg;
void *arg;
int count;
};
......@@ -232,17 +238,31 @@ int acpi_locate_initial_tables (void);
void acpi_reserve_initial_tables (void);
void acpi_table_init_complete (void);
int acpi_table_init (void);
#ifdef CONFIG_ACPI_TABLE_LIB
#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
#define __init_or_acpilib
#define __initdata_or_acpilib
#else
#define EXPORT_SYMBOL_ACPI_LIB(x)
#define __init_or_acpilib __init
#define __initdata_or_acpilib __initdata
#endif
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
unsigned int max_entries);
int __init acpi_table_parse_entries_array(char *id, unsigned long table_size,
struct acpi_subtable_proc *proc, int proc_num,
unsigned int max_entries);
int __init_or_acpilib acpi_table_parse_entries(char *id,
unsigned long table_size, int entry_id,
acpi_tbl_entry_handler handler, unsigned int max_entries);
int __init_or_acpilib acpi_table_parse_entries_array(char *id,
unsigned long table_size, struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries);
int acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler,
unsigned int max_entries);
int __init_or_acpilib
acpi_table_parse_cedt(enum acpi_cedt_type id,
acpi_tbl_entry_handler_arg handler_arg, void *arg);
int acpi_parse_mcfg (struct acpi_table_header *header);
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
......
# SPDX-License-Identifier: GPL-2.0
ldflags-y += --wrap=acpi_table_parse_cedt
ldflags-y += --wrap=is_acpi_device_node
ldflags-y += --wrap=acpi_get_table
ldflags-y += --wrap=acpi_put_table
ldflags-y += --wrap=acpi_evaluate_integer
ldflags-y += --wrap=acpi_pci_find_root
ldflags-y += --wrap=pci_walk_bus
......
......@@ -182,6 +182,13 @@ static struct {
},
};
struct acpi_cedt_cfmws *mock_cfmws[4] = {
[0] = &mock_cedt.cfmws0.cfmws,
[1] = &mock_cedt.cfmws1.cfmws,
[2] = &mock_cedt.cfmws2.cfmws,
[3] = &mock_cedt.cfmws3.cfmws,
};
struct cxl_mock_res {
struct list_head list;
struct range range;
......@@ -232,12 +239,6 @@ static struct cxl_mock_res *alloc_mock_res(resource_size_t size)
static int populate_cedt(void)
{
struct acpi_cedt_cfmws *cfmws[4] = {
[0] = &mock_cedt.cfmws0.cfmws,
[1] = &mock_cedt.cfmws1.cfmws,
[2] = &mock_cedt.cfmws2.cfmws,
[3] = &mock_cedt.cfmws3.cfmws,
};
struct cxl_mock_res *res;
int i;
......@@ -257,8 +258,8 @@ static int populate_cedt(void)
chbs->length = size;
}
for (i = 0; i < ARRAY_SIZE(cfmws); i++) {
struct acpi_cedt_cfmws *window = cfmws[i];
for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
struct acpi_cedt_cfmws *window = mock_cfmws[i];
res = alloc_mock_res(window->window_size);
if (!res)
......@@ -269,21 +270,44 @@ static int populate_cedt(void)
return 0;
}
static acpi_status mock_acpi_get_table(char *signature, u32 instance,
struct acpi_table_header **out_table)
/*
* WARNING, this hack assumes the format of 'struct
* cxl_cfmws_context' and 'struct cxl_chbs_context' share the property that
* the first struct member is the device being probed by the cxl_acpi
* driver.
*/
struct cxl_cedt_context {
struct device *dev;
};
static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
acpi_tbl_entry_handler_arg handler_arg,
void *arg)
{
if (instance < U32_MAX || strcmp(signature, ACPI_SIG_CEDT) != 0)
return acpi_get_table(signature, instance, out_table);
struct cxl_cedt_context *ctx = arg;
struct device *dev = ctx->dev;
union acpi_subtable_headers *h;
unsigned long end;
int i;
*out_table = (struct acpi_table_header *) &mock_cedt;
return AE_OK;
}
if (dev != &cxl_acpi->dev)
return acpi_table_parse_cedt(id, handler_arg, arg);
static void mock_acpi_put_table(struct acpi_table_header *table)
{
if (table == (struct acpi_table_header *) &mock_cedt)
return;
acpi_put_table(table);
if (id == ACPI_CEDT_TYPE_CHBS)
for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
end = (unsigned long)&mock_cedt.chbs[i + 1];
handler_arg(h, arg, end);
}
if (id == ACPI_CEDT_TYPE_CFMWS)
for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) {
h = (union acpi_subtable_headers *) mock_cfmws[i];
end = (unsigned long) h + mock_cfmws[i]->header.length;
handler_arg(h, arg, end);
}
return 0;
}
static bool is_mock_bridge(struct device *dev)
......@@ -388,8 +412,7 @@ static struct cxl_mock_ops cxl_mock_ops = {
.is_mock_port = is_mock_port,
.is_mock_dev = is_mock_dev,
.mock_port = mock_cxl_root_port,
.acpi_get_table = mock_acpi_get_table,
.acpi_put_table = mock_acpi_put_table,
.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
.acpi_evaluate_integer = mock_acpi_evaluate_integer,
.acpi_pci_find_root = mock_acpi_pci_find_root,
.list = LIST_HEAD_INIT(cxl_mock_ops.list),
......@@ -574,3 +597,4 @@ static __exit void cxl_test_exit(void)
module_init(cxl_test_init);
module_exit(cxl_test_exit);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(ACPI);
......@@ -28,8 +28,24 @@ static struct cxl_cel_entry mock_cel[] = {
.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
.effect = cpu_to_le16(EFFECT(1) | EFFECT(2)),
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
.effect = cpu_to_le16(0),
},
};
/* See CXL 2.0 Table 181 Get Health Info Output Payload */
struct cxl_mbox_health_info {
u8 health_status;
u8 media_status;
u8 ext_status;
u8 life_used;
__le16 temperature;
__le32 dirty_shutdowns;
__le32 volatile_errors;
__le32 pmem_errors;
} __packed;
static struct {
struct cxl_mbox_get_supported_logs gsl;
struct cxl_gsl_entry entry;
......@@ -54,7 +70,7 @@ static int mock_gsl(struct cxl_mbox_cmd *cmd)
return 0;
}
static int mock_get_log(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_get_log *gl = cmd->payload_in;
u32 offset = le32_to_cpu(gl->offset);
......@@ -64,7 +80,7 @@ static int mock_get_log(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
if (cmd->size_in < sizeof(*gl))
return -EINVAL;
if (length > cxlm->payload_size)
if (length > cxlds->payload_size)
return -EINVAL;
if (offset + length > sizeof(mock_cel))
return -EINVAL;
......@@ -78,9 +94,9 @@ static int mock_get_log(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
return 0;
}
static int mock_id(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
{
struct platform_device *pdev = to_platform_device(cxlm->dev);
struct platform_device *pdev = to_platform_device(cxlds->dev);
struct cxl_mbox_identify id = {
.fw_revision = { "mock fw v1 " },
.lsa_size = cpu_to_le32(LSA_SIZE),
......@@ -120,10 +136,10 @@ static int mock_id(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
return 0;
}
static int mock_get_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
void *lsa = dev_get_drvdata(cxlm->dev);
void *lsa = dev_get_drvdata(cxlds->dev);
u32 offset, length;
if (sizeof(*get_lsa) > cmd->size_in)
......@@ -139,10 +155,10 @@ static int mock_get_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
return 0;
}
static int mock_set_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
void *lsa = dev_get_drvdata(cxlm->dev);
void *lsa = dev_get_drvdata(cxlds->dev);
u32 offset, length;
if (sizeof(*set_lsa) > cmd->size_in)
......@@ -156,9 +172,39 @@ static int mock_set_lsa(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
return 0;
}
static int cxl_mock_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
static int mock_health_info(struct cxl_dev_state *cxlds,
struct cxl_mbox_cmd *cmd)
{
struct device *dev = cxlm->dev;
struct cxl_mbox_health_info health_info = {
/* set flags for maint needed, perf degraded, hw replacement */
.health_status = 0x7,
/* set media status to "All Data Lost" */
.media_status = 0x3,
/*
* set ext_status flags for:
* ext_life_used: normal,
* ext_temperature: critical,
* ext_corrected_volatile: warning,
* ext_corrected_persistent: normal,
*/
.ext_status = 0x18,
.life_used = 15,
.temperature = cpu_to_le16(25),
.dirty_shutdowns = cpu_to_le32(10),
.volatile_errors = cpu_to_le32(20),
.pmem_errors = cpu_to_le32(30),
};
if (cmd->size_out < sizeof(health_info))
return -EINVAL;
memcpy(cmd->payload_out, &health_info, sizeof(health_info));
return 0;
}
static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
{
struct device *dev = cxlds->dev;
int rc = -EIO;
switch (cmd->opcode) {
......@@ -166,16 +212,19 @@ static int cxl_mock_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
rc = mock_gsl(cmd);
break;
case CXL_MBOX_OP_GET_LOG:
rc = mock_get_log(cxlm, cmd);
rc = mock_get_log(cxlds, cmd);
break;
case CXL_MBOX_OP_IDENTIFY:
rc = mock_id(cxlm, cmd);
rc = mock_id(cxlds, cmd);
break;
case CXL_MBOX_OP_GET_LSA:
rc = mock_get_lsa(cxlm, cmd);
rc = mock_get_lsa(cxlds, cmd);
break;
case CXL_MBOX_OP_SET_LSA:
rc = mock_set_lsa(cxlm, cmd);
rc = mock_set_lsa(cxlds, cmd);
break;
case CXL_MBOX_OP_GET_HEALTH_INFO:
rc = mock_health_info(cxlds, cmd);
break;
default:
break;
......@@ -196,7 +245,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cxl_memdev *cxlmd;
struct cxl_mem *cxlm;
struct cxl_dev_state *cxlds;
void *lsa;
int rc;
......@@ -208,30 +257,30 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
return rc;
dev_set_drvdata(dev, lsa);
cxlm = cxl_mem_create(dev);
if (IS_ERR(cxlm))
return PTR_ERR(cxlm);
cxlds = cxl_dev_state_create(dev);
if (IS_ERR(cxlds))
return PTR_ERR(cxlds);
cxlm->mbox_send = cxl_mock_mbox_send;
cxlm->payload_size = SZ_4K;
cxlds->mbox_send = cxl_mock_mbox_send;
cxlds->payload_size = SZ_4K;
rc = cxl_mem_enumerate_cmds(cxlm);
rc = cxl_enumerate_cmds(cxlds);
if (rc)
return rc;
rc = cxl_mem_identify(cxlm);
rc = cxl_dev_state_identify(cxlds);
if (rc)
return rc;
rc = cxl_mem_create_range_info(cxlm);
rc = cxl_mem_create_range_info(cxlds);
if (rc)
return rc;
cxlmd = devm_cxl_add_memdev(cxlm);
cxlmd = devm_cxl_add_memdev(cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
if (range_len(&cxlds->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
rc = devm_cxl_add_nvdimm(dev, cxlmd);
return 0;
......
......@@ -58,36 +58,23 @@ bool __wrap_is_acpi_device_node(const struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL(__wrap_is_acpi_device_node);
acpi_status __wrap_acpi_get_table(char *signature, u32 instance,
struct acpi_table_header **out_table)
int __wrap_acpi_table_parse_cedt(enum acpi_cedt_type id,
acpi_tbl_entry_handler_arg handler_arg,
void *arg)
{
int index;
int index, rc;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
acpi_status status;
if (ops)
status = ops->acpi_get_table(signature, instance, out_table);
rc = ops->acpi_table_parse_cedt(id, handler_arg, arg);
else
status = acpi_get_table(signature, instance, out_table);
rc = acpi_table_parse_cedt(id, handler_arg, arg);
put_cxl_mock_ops(index);
return status;
}
EXPORT_SYMBOL(__wrap_acpi_get_table);
void __wrap_acpi_put_table(struct acpi_table_header *table)
{
int index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops)
ops->acpi_put_table(table);
else
acpi_put_table(table);
put_cxl_mock_ops(index);
return rc;
}
EXPORT_SYMBOL(__wrap_acpi_put_table);
EXPORT_SYMBOL_NS_GPL(__wrap_acpi_table_parse_cedt, ACPI);
acpi_status __wrap_acpi_evaluate_integer(acpi_handle handle,
acpi_string pathname,
......@@ -169,3 +156,4 @@ __wrap_nvdimm_bus_register(struct device *dev,
EXPORT_SYMBOL_GPL(__wrap_nvdimm_bus_register);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(ACPI);
......@@ -6,9 +6,9 @@
struct cxl_mock_ops {
struct list_head list;
bool (*is_mock_adev)(struct acpi_device *dev);
acpi_status (*acpi_get_table)(char *signature, u32 instance,
struct acpi_table_header **out_table);
void (*acpi_put_table)(struct acpi_table_header *table);
int (*acpi_table_parse_cedt)(enum acpi_cedt_type id,
acpi_tbl_entry_handler_arg handler_arg,
void *arg);
bool (*is_mock_bridge)(struct device *dev);
acpi_status (*acpi_evaluate_integer)(acpi_handle handle,
acpi_string pathname,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment