Commit 59f8d151 authored by Dan Williams's avatar Dan Williams

cxl/mbox: Move mailbox related driver state to its own data structure

'struct cxl_dev_state' makes too many assumptions about the capabilities
of a CXL device. In particular it assumes a CXL device has a mailbox and
all of the infrastructure and state that comes along with that.

In preparation for supporting accelerator / Type-2 devices that may not
have a mailbox and in general maintain a minimal core context structure,
make mailbox functionality a super-set of  'struct cxl_dev_state' with
'struct cxl_memdev_state'.

With this reorganization it allows for CXL devices that support HDM
decoder mapping, but not other general-expander / Type-3 capabilities,
to only enable that subset without the rest of the mailbox
infrastructure coming along for the ride.
Reviewed-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
Reviewed-by: default avatarDave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/168679260240.3436160.15520641540463704524.stgit@dwillia2-xfh.jf.intel.comSigned-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 3fe7feb0
This diff is collapsed.
...@@ -39,8 +39,9 @@ static ssize_t firmware_version_show(struct device *dev, ...@@ -39,8 +39,9 @@ static ssize_t firmware_version_show(struct device *dev,
{ {
struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version); return sysfs_emit(buf, "%.16s\n", mds->firmware_version);
} }
static DEVICE_ATTR_RO(firmware_version); static DEVICE_ATTR_RO(firmware_version);
...@@ -49,8 +50,9 @@ static ssize_t payload_max_show(struct device *dev, ...@@ -49,8 +50,9 @@ static ssize_t payload_max_show(struct device *dev,
{ {
struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
return sysfs_emit(buf, "%zu\n", cxlds->payload_size); return sysfs_emit(buf, "%zu\n", mds->payload_size);
} }
static DEVICE_ATTR_RO(payload_max); static DEVICE_ATTR_RO(payload_max);
...@@ -59,8 +61,9 @@ static ssize_t label_storage_size_show(struct device *dev, ...@@ -59,8 +61,9 @@ static ssize_t label_storage_size_show(struct device *dev,
{ {
struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
return sysfs_emit(buf, "%zu\n", cxlds->lsa_size); return sysfs_emit(buf, "%zu\n", mds->lsa_size);
} }
static DEVICE_ATTR_RO(label_storage_size); static DEVICE_ATTR_RO(label_storage_size);
...@@ -231,7 +234,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) ...@@ -231,7 +234,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
{ {
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_inject_poison inject; struct cxl_mbox_inject_poison inject;
struct cxl_poison_record record; struct cxl_poison_record record;
struct cxl_mbox_cmd mbox_cmd; struct cxl_mbox_cmd mbox_cmd;
...@@ -255,13 +258,13 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) ...@@ -255,13 +258,13 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
.size_in = sizeof(inject), .size_in = sizeof(inject),
.payload_in = &inject, .payload_in = &inject,
}; };
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc) if (rc)
goto out; goto out;
cxlr = cxl_dpa_to_region(cxlmd, dpa); cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr) if (cxlr)
dev_warn_once(cxlds->dev, dev_warn_once(mds->cxlds.dev,
"poison inject dpa:%#llx region: %s\n", dpa, "poison inject dpa:%#llx region: %s\n", dpa,
dev_name(&cxlr->dev)); dev_name(&cxlr->dev));
...@@ -279,7 +282,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL); ...@@ -279,7 +282,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
{ {
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_clear_poison clear; struct cxl_mbox_clear_poison clear;
struct cxl_poison_record record; struct cxl_poison_record record;
struct cxl_mbox_cmd mbox_cmd; struct cxl_mbox_cmd mbox_cmd;
...@@ -312,14 +315,15 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) ...@@ -312,14 +315,15 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
.payload_in = &clear, .payload_in = &clear,
}; };
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc) if (rc)
goto out; goto out;
cxlr = cxl_dpa_to_region(cxlmd, dpa); cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr) if (cxlr)
dev_warn_once(cxlds->dev, "poison clear dpa:%#llx region: %s\n", dev_warn_once(mds->cxlds.dev,
dpa, dev_name(&cxlr->dev)); "poison clear dpa:%#llx region: %s\n", dpa,
dev_name(&cxlr->dev));
record = (struct cxl_poison_record) { record = (struct cxl_poison_record) {
.address = cpu_to_le64(dpa), .address = cpu_to_le64(dpa),
...@@ -397,17 +401,18 @@ EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL); ...@@ -397,17 +401,18 @@ EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL);
/** /**
* set_exclusive_cxl_commands() - atomically disable user cxl commands * set_exclusive_cxl_commands() - atomically disable user cxl commands
* @cxlds: The device state to operate on * @mds: The device state to operate on
* @cmds: bitmap of commands to mark exclusive * @cmds: bitmap of commands to mark exclusive
* *
* Grab the cxl_memdev_rwsem in write mode to flush in-flight * Grab the cxl_memdev_rwsem in write mode to flush in-flight
* invocations of the ioctl path and then disable future execution of * invocations of the ioctl path and then disable future execution of
* commands with the command ids set in @cmds. * commands with the command ids set in @cmds.
*/ */
void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds) void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds)
{ {
down_write(&cxl_memdev_rwsem); down_write(&cxl_memdev_rwsem);
bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds, bitmap_or(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
CXL_MEM_COMMAND_ID_MAX); CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem); up_write(&cxl_memdev_rwsem);
} }
...@@ -415,13 +420,14 @@ EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL); ...@@ -415,13 +420,14 @@ EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
/** /**
* clear_exclusive_cxl_commands() - atomically enable user cxl commands * clear_exclusive_cxl_commands() - atomically enable user cxl commands
* @cxlds: The device state to modify * @mds: The device state to modify
* @cmds: bitmap of commands to mark available for userspace * @cmds: bitmap of commands to mark available for userspace
*/ */
void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds) void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds)
{ {
down_write(&cxl_memdev_rwsem); down_write(&cxl_memdev_rwsem);
bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds, bitmap_andnot(mds->exclusive_cmds, mds->exclusive_cmds, cmds,
CXL_MEM_COMMAND_ID_MAX); CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem); up_write(&cxl_memdev_rwsem);
} }
......
...@@ -267,6 +267,34 @@ struct cxl_poison_state { ...@@ -267,6 +267,34 @@ struct cxl_poison_state {
* @cxl_dvsec: Offset to the PCIe device DVSEC * @cxl_dvsec: Offset to the PCIe device DVSEC
* @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH) * @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
* @media_ready: Indicate whether the device media is usable * @media_ready: Indicate whether the device media is usable
* @dpa_res: Overall DPA resource tree for the device
* @pmem_res: Active Persistent memory capacity configuration
* @ram_res: Active Volatile memory capacity configuration
* @component_reg_phys: register base of component registers
* @serial: PCIe Device Serial Number
*/
struct cxl_dev_state {
struct device *dev;
struct cxl_memdev *cxlmd;
struct cxl_regs regs;
int cxl_dvsec;
bool rcd;
bool media_ready;
struct resource dpa_res;
struct resource pmem_res;
struct resource ram_res;
resource_size_t component_reg_phys;
u64 serial;
};
/**
* struct cxl_memdev_state - Generic Type-3 Memory Device Class driver data
*
* CXL 8.1.12.1 PCI Header - Class Code Register Memory Device defines
* common memory device functionality like the presence of a mailbox and
* the functionality related to that like Identify Memory Device and Get
* Partition Info
* @cxlds: Core driver state common across Type-2 and Type-3 devices
* @payload_size: Size of space for payload * @payload_size: Size of space for payload
* (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register) * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
* @lsa_size: Size of Label Storage Area * @lsa_size: Size of Label Storage Area
...@@ -275,9 +303,6 @@ struct cxl_poison_state { ...@@ -275,9 +303,6 @@ struct cxl_poison_state {
* @firmware_version: Firmware version for the memory device. * @firmware_version: Firmware version for the memory device.
* @enabled_cmds: Hardware commands found enabled in CEL. * @enabled_cmds: Hardware commands found enabled in CEL.
* @exclusive_cmds: Commands that are kernel-internal only * @exclusive_cmds: Commands that are kernel-internal only
* @dpa_res: Overall DPA resource tree for the device
* @pmem_res: Active Persistent memory capacity configuration
* @ram_res: Active Volatile memory capacity configuration
* @total_bytes: sum of all possible capacities * @total_bytes: sum of all possible capacities
* @volatile_only_bytes: hard volatile capacity * @volatile_only_bytes: hard volatile capacity
* @persistent_only_bytes: hard persistent capacity * @persistent_only_bytes: hard persistent capacity
...@@ -286,53 +311,41 @@ struct cxl_poison_state { ...@@ -286,53 +311,41 @@ struct cxl_poison_state {
* @active_persistent_bytes: sum of hard + soft persistent * @active_persistent_bytes: sum of hard + soft persistent
* @next_volatile_bytes: volatile capacity change pending device reset * @next_volatile_bytes: volatile capacity change pending device reset
* @next_persistent_bytes: persistent capacity change pending device reset * @next_persistent_bytes: persistent capacity change pending device reset
* @component_reg_phys: register base of component registers
* @serial: PCIe Device Serial Number
* @event: event log driver state * @event: event log driver state
* @poison: poison driver state info * @poison: poison driver state info
* @mbox_send: @dev specific transport for transmitting mailbox commands * @mbox_send: @dev specific transport for transmitting mailbox commands
* *
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters. * details on capacity parameters.
*/ */
struct cxl_dev_state { struct cxl_memdev_state {
struct device *dev; struct cxl_dev_state cxlds;
struct cxl_memdev *cxlmd;
struct cxl_regs regs;
int cxl_dvsec;
bool rcd;
bool media_ready;
size_t payload_size; size_t payload_size;
size_t lsa_size; size_t lsa_size;
struct mutex mbox_mutex; /* Protects device mailbox and firmware */ struct mutex mbox_mutex; /* Protects device mailbox and firmware */
char firmware_version[0x10]; char firmware_version[0x10];
DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX); DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
struct resource dpa_res;
struct resource pmem_res;
struct resource ram_res;
u64 total_bytes; u64 total_bytes;
u64 volatile_only_bytes; u64 volatile_only_bytes;
u64 persistent_only_bytes; u64 persistent_only_bytes;
u64 partition_align_bytes; u64 partition_align_bytes;
u64 active_volatile_bytes; u64 active_volatile_bytes;
u64 active_persistent_bytes; u64 active_persistent_bytes;
u64 next_volatile_bytes; u64 next_volatile_bytes;
u64 next_persistent_bytes; u64 next_persistent_bytes;
resource_size_t component_reg_phys;
u64 serial;
struct cxl_event_state event; struct cxl_event_state event;
struct cxl_poison_state poison; struct cxl_poison_state poison;
int (*mbox_send)(struct cxl_memdev_state *mds,
int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd); struct cxl_mbox_cmd *cmd);
}; };
static inline struct cxl_memdev_state *
to_cxl_memdev_state(struct cxl_dev_state *cxlds)
{
return container_of(cxlds, struct cxl_memdev_state, cxlds);
}
enum cxl_opcode { enum cxl_opcode {
CXL_MBOX_OP_INVALID = 0x0000, CXL_MBOX_OP_INVALID = 0x0000,
CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID, CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID,
...@@ -691,18 +704,20 @@ enum { ...@@ -691,18 +704,20 @@ enum {
CXL_PMEM_SEC_PASS_USER, CXL_PMEM_SEC_PASS_USER,
}; };
int cxl_internal_send_cmd(struct cxl_dev_state *cxlds, int cxl_internal_send_cmd(struct cxl_memdev_state *mds,
struct cxl_mbox_cmd *cmd); struct cxl_mbox_cmd *cmd);
int cxl_dev_state_identify(struct cxl_dev_state *cxlds); int cxl_dev_state_identify(struct cxl_memdev_state *mds);
int cxl_await_media_ready(struct cxl_dev_state *cxlds); int cxl_await_media_ready(struct cxl_dev_state *cxlds);
int cxl_enumerate_cmds(struct cxl_dev_state *cxlds); int cxl_enumerate_cmds(struct cxl_memdev_state *mds);
int cxl_mem_create_range_info(struct cxl_dev_state *cxlds); int cxl_mem_create_range_info(struct cxl_memdev_state *mds);
struct cxl_dev_state *cxl_dev_state_create(struct device *dev); struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev);
void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds); void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds); unsigned long *cmds);
void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status); void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
int cxl_set_timestamp(struct cxl_dev_state *cxlds); unsigned long *cmds);
int cxl_poison_state_init(struct cxl_dev_state *cxlds); void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status);
int cxl_set_timestamp(struct cxl_memdev_state *mds);
int cxl_poison_state_init(struct cxl_memdev_state *mds);
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_region *cxlr); struct cxl_region *cxlr);
int cxl_trigger_poison_list(struct cxl_memdev *cxlmd); int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
......
...@@ -117,6 +117,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL, ...@@ -117,6 +117,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL,
static int cxl_mem_probe(struct device *dev) static int cxl_mem_probe(struct device *dev)
{ {
struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *endpoint_parent; struct device *endpoint_parent;
struct cxl_port *parent_port; struct cxl_port *parent_port;
...@@ -141,10 +142,10 @@ static int cxl_mem_probe(struct device *dev) ...@@ -141,10 +142,10 @@ static int cxl_mem_probe(struct device *dev)
dentry = cxl_debugfs_create_dir(dev_name(dev)); dentry = cxl_debugfs_create_dir(dev_name(dev));
debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show); debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show);
if (test_bit(CXL_POISON_ENABLED_INJECT, cxlds->poison.enabled_cmds)) if (test_bit(CXL_POISON_ENABLED_INJECT, mds->poison.enabled_cmds))
debugfs_create_file("inject_poison", 0200, dentry, cxlmd, debugfs_create_file("inject_poison", 0200, dentry, cxlmd,
&cxl_poison_inject_fops); &cxl_poison_inject_fops);
if (test_bit(CXL_POISON_ENABLED_CLEAR, cxlds->poison.enabled_cmds)) if (test_bit(CXL_POISON_ENABLED_CLEAR, mds->poison.enabled_cmds))
debugfs_create_file("clear_poison", 0200, dentry, cxlmd, debugfs_create_file("clear_poison", 0200, dentry, cxlmd,
&cxl_poison_clear_fops); &cxl_poison_clear_fops);
...@@ -227,9 +228,12 @@ static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n) ...@@ -227,9 +228,12 @@ static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
{ {
if (a == &dev_attr_trigger_poison_list.attr) { if (a == &dev_attr_trigger_poison_list.attr) {
struct device *dev = kobj_to_dev(kobj); struct device *dev = kobj_to_dev(kobj);
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_memdev_state *mds =
to_cxl_memdev_state(cxlmd->cxlds);
if (!test_bit(CXL_POISON_ENABLED_LIST, if (!test_bit(CXL_POISON_ENABLED_LIST,
to_cxl_memdev(dev)->cxlds->poison.enabled_cmds)) mds->poison.enabled_cmds))
return 0; return 0;
} }
return a->mode; return a->mode;
......
This diff is collapsed.
...@@ -15,9 +15,9 @@ extern const struct nvdimm_security_ops *cxl_security_ops; ...@@ -15,9 +15,9 @@ extern const struct nvdimm_security_ops *cxl_security_ops;
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
static void clear_exclusive(void *cxlds) static void clear_exclusive(void *mds)
{ {
clear_exclusive_cxl_commands(cxlds, exclusive_cmds); clear_exclusive_cxl_commands(mds, exclusive_cmds);
} }
static void unregister_nvdimm(void *nvdimm) static void unregister_nvdimm(void *nvdimm)
...@@ -65,13 +65,13 @@ static int cxl_nvdimm_probe(struct device *dev) ...@@ -65,13 +65,13 @@ static int cxl_nvdimm_probe(struct device *dev)
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb; struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
unsigned long flags = 0, cmd_mask = 0; unsigned long flags = 0, cmd_mask = 0;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct nvdimm *nvdimm; struct nvdimm *nvdimm;
int rc; int rc;
set_exclusive_cxl_commands(cxlds, exclusive_cmds); set_exclusive_cxl_commands(mds, exclusive_cmds);
rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds); rc = devm_add_action_or_reset(dev, clear_exclusive, mds);
if (rc) if (rc)
return rc; return rc;
...@@ -100,22 +100,23 @@ static struct cxl_driver cxl_nvdimm_driver = { ...@@ -100,22 +100,23 @@ static struct cxl_driver cxl_nvdimm_driver = {
}, },
}; };
static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds, static int cxl_pmem_get_config_size(struct cxl_memdev_state *mds,
struct nd_cmd_get_config_size *cmd, struct nd_cmd_get_config_size *cmd,
unsigned int buf_len) unsigned int buf_len)
{ {
if (sizeof(*cmd) > buf_len) if (sizeof(*cmd) > buf_len)
return -EINVAL; return -EINVAL;
*cmd = (struct nd_cmd_get_config_size) { *cmd = (struct nd_cmd_get_config_size){
.config_size = cxlds->lsa_size, .config_size = mds->lsa_size,
.max_xfer = cxlds->payload_size - sizeof(struct cxl_mbox_set_lsa), .max_xfer =
mds->payload_size - sizeof(struct cxl_mbox_set_lsa),
}; };
return 0; return 0;
} }
static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds, static int cxl_pmem_get_config_data(struct cxl_memdev_state *mds,
struct nd_cmd_get_config_data_hdr *cmd, struct nd_cmd_get_config_data_hdr *cmd,
unsigned int buf_len) unsigned int buf_len)
{ {
...@@ -140,13 +141,13 @@ static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds, ...@@ -140,13 +141,13 @@ static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
.payload_out = cmd->out_buf, .payload_out = cmd->out_buf,
}; };
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); rc = cxl_internal_send_cmd(mds, &mbox_cmd);
cmd->status = 0; cmd->status = 0;
return rc; return rc;
} }
static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds, static int cxl_pmem_set_config_data(struct cxl_memdev_state *mds,
struct nd_cmd_set_config_hdr *cmd, struct nd_cmd_set_config_hdr *cmd,
unsigned int buf_len) unsigned int buf_len)
{ {
...@@ -176,7 +177,7 @@ static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds, ...@@ -176,7 +177,7 @@ static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
.size_in = struct_size(set_lsa, data, cmd->in_length), .size_in = struct_size(set_lsa, data, cmd->in_length),
}; };
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); rc = cxl_internal_send_cmd(mds, &mbox_cmd);
/* /*
* Set "firmware" status (4-packed bytes at the end of the input * Set "firmware" status (4-packed bytes at the end of the input
...@@ -194,18 +195,18 @@ static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, ...@@ -194,18 +195,18 @@ static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm); unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
if (!test_bit(cmd, &cmd_mask)) if (!test_bit(cmd, &cmd_mask))
return -ENOTTY; return -ENOTTY;
switch (cmd) { switch (cmd) {
case ND_CMD_GET_CONFIG_SIZE: case ND_CMD_GET_CONFIG_SIZE:
return cxl_pmem_get_config_size(cxlds, buf, buf_len); return cxl_pmem_get_config_size(mds, buf, buf_len);
case ND_CMD_GET_CONFIG_DATA: case ND_CMD_GET_CONFIG_DATA:
return cxl_pmem_get_config_data(cxlds, buf, buf_len); return cxl_pmem_get_config_data(mds, buf, buf_len);
case ND_CMD_SET_CONFIG_DATA: case ND_CMD_SET_CONFIG_DATA:
return cxl_pmem_set_config_data(cxlds, buf, buf_len); return cxl_pmem_set_config_data(mds, buf, buf_len);
default: default:
return -ENOTTY; return -ENOTTY;
} }
......
...@@ -14,7 +14,7 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm, ...@@ -14,7 +14,7 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
{ {
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
unsigned long security_flags = 0; unsigned long security_flags = 0;
struct cxl_get_security_output { struct cxl_get_security_output {
__le32 flags; __le32 flags;
...@@ -29,7 +29,7 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm, ...@@ -29,7 +29,7 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm,
.payload_out = &out, .payload_out = &out,
}; };
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc < 0) if (rc < 0)
return 0; return 0;
...@@ -67,7 +67,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm, ...@@ -67,7 +67,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
{ {
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_cmd mbox_cmd; struct cxl_mbox_cmd mbox_cmd;
struct cxl_set_pass set_pass; struct cxl_set_pass set_pass;
...@@ -84,7 +84,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm, ...@@ -84,7 +84,7 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm,
.payload_in = &set_pass, .payload_in = &set_pass,
}; };
return cxl_internal_send_cmd(cxlds, &mbox_cmd); return cxl_internal_send_cmd(mds, &mbox_cmd);
} }
static int __cxl_pmem_security_disable(struct nvdimm *nvdimm, static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
...@@ -93,7 +93,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm, ...@@ -93,7 +93,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
{ {
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_disable_pass dis_pass; struct cxl_disable_pass dis_pass;
struct cxl_mbox_cmd mbox_cmd; struct cxl_mbox_cmd mbox_cmd;
...@@ -109,7 +109,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm, ...@@ -109,7 +109,7 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm,
.payload_in = &dis_pass, .payload_in = &dis_pass,
}; };
return cxl_internal_send_cmd(cxlds, &mbox_cmd); return cxl_internal_send_cmd(mds, &mbox_cmd);
} }
static int cxl_pmem_security_disable(struct nvdimm *nvdimm, static int cxl_pmem_security_disable(struct nvdimm *nvdimm,
...@@ -128,12 +128,12 @@ static int cxl_pmem_security_freeze(struct nvdimm *nvdimm) ...@@ -128,12 +128,12 @@ static int cxl_pmem_security_freeze(struct nvdimm *nvdimm)
{ {
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_cmd mbox_cmd = { struct cxl_mbox_cmd mbox_cmd = {
.opcode = CXL_MBOX_OP_FREEZE_SECURITY, .opcode = CXL_MBOX_OP_FREEZE_SECURITY,
}; };
return cxl_internal_send_cmd(cxlds, &mbox_cmd); return cxl_internal_send_cmd(mds, &mbox_cmd);
} }
static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
...@@ -141,7 +141,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, ...@@ -141,7 +141,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
{ {
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
u8 pass[NVDIMM_PASSPHRASE_LEN]; u8 pass[NVDIMM_PASSPHRASE_LEN];
struct cxl_mbox_cmd mbox_cmd; struct cxl_mbox_cmd mbox_cmd;
int rc; int rc;
...@@ -153,7 +153,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, ...@@ -153,7 +153,7 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm,
.payload_in = pass, .payload_in = pass,
}; };
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc < 0) if (rc < 0)
return rc; return rc;
...@@ -166,7 +166,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm, ...@@ -166,7 +166,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
{ {
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_cmd mbox_cmd; struct cxl_mbox_cmd mbox_cmd;
struct cxl_pass_erase erase; struct cxl_pass_erase erase;
int rc; int rc;
...@@ -182,7 +182,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm, ...@@ -182,7 +182,7 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm,
.payload_in = &erase, .payload_in = &erase,
}; };
rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc < 0) if (rc < 0)
return rc; return rc;
......
...@@ -102,7 +102,7 @@ struct mock_event_log { ...@@ -102,7 +102,7 @@ struct mock_event_log {
}; };
struct mock_event_store { struct mock_event_store {
struct cxl_dev_state *cxlds; struct cxl_memdev_state *mds;
struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX]; struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
u32 ev_status; u32 ev_status;
}; };
...@@ -291,7 +291,7 @@ static void cxl_mock_event_trigger(struct device *dev) ...@@ -291,7 +291,7 @@ static void cxl_mock_event_trigger(struct device *dev)
event_reset_log(log); event_reset_log(log);
} }
cxl_mem_get_event_records(mes->cxlds, mes->ev_status); cxl_mem_get_event_records(mes->mds, mes->ev_status);
} }
struct cxl_event_record_raw maint_needed = { struct cxl_event_record_raw maint_needed = {
...@@ -451,7 +451,7 @@ static int mock_gsl(struct cxl_mbox_cmd *cmd) ...@@ -451,7 +451,7 @@ static int mock_gsl(struct cxl_mbox_cmd *cmd)
return 0; return 0;
} }
static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
{ {
struct cxl_mbox_get_log *gl = cmd->payload_in; struct cxl_mbox_get_log *gl = cmd->payload_in;
u32 offset = le32_to_cpu(gl->offset); u32 offset = le32_to_cpu(gl->offset);
...@@ -461,7 +461,7 @@ static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) ...@@ -461,7 +461,7 @@ static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
if (cmd->size_in < sizeof(*gl)) if (cmd->size_in < sizeof(*gl))
return -EINVAL; return -EINVAL;
if (length > cxlds->payload_size) if (length > mds->payload_size)
return -EINVAL; return -EINVAL;
if (offset + length > sizeof(mock_cel)) if (offset + length > sizeof(mock_cel))
return -EINVAL; return -EINVAL;
...@@ -1105,8 +1105,10 @@ static struct attribute *cxl_mock_mem_core_attrs[] = { ...@@ -1105,8 +1105,10 @@ static struct attribute *cxl_mock_mem_core_attrs[] = {
}; };
ATTRIBUTE_GROUPS(cxl_mock_mem_core); ATTRIBUTE_GROUPS(cxl_mock_mem_core);
static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
struct cxl_mbox_cmd *cmd)
{ {
struct cxl_dev_state *cxlds = &mds->cxlds;
struct device *dev = cxlds->dev; struct device *dev = cxlds->dev;
struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
int rc = -EIO; int rc = -EIO;
...@@ -1119,7 +1121,7 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd * ...@@ -1119,7 +1121,7 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *
rc = mock_gsl(cmd); rc = mock_gsl(cmd);
break; break;
case CXL_MBOX_OP_GET_LOG: case CXL_MBOX_OP_GET_LOG:
rc = mock_get_log(cxlds, cmd); rc = mock_get_log(mds, cmd);
break; break;
case CXL_MBOX_OP_IDENTIFY: case CXL_MBOX_OP_IDENTIFY:
if (cxlds->rcd) if (cxlds->rcd)
...@@ -1207,6 +1209,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev) ...@@ -1207,6 +1209,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct cxl_memdev *cxlmd; struct cxl_memdev *cxlmd;
struct cxl_memdev_state *mds;
struct cxl_dev_state *cxlds; struct cxl_dev_state *cxlds;
struct cxl_mockmem_data *mdata; struct cxl_mockmem_data *mdata;
int rc; int rc;
...@@ -1223,48 +1226,50 @@ static int cxl_mock_mem_probe(struct platform_device *pdev) ...@@ -1223,48 +1226,50 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
if (rc) if (rc)
return rc; return rc;
cxlds = cxl_dev_state_create(dev); mds = cxl_memdev_state_create(dev);
if (IS_ERR(cxlds)) if (IS_ERR(mds))
return PTR_ERR(cxlds); return PTR_ERR(mds);
mds->mbox_send = cxl_mock_mbox_send;
mds->payload_size = SZ_4K;
mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
cxlds = &mds->cxlds;
cxlds->serial = pdev->id; cxlds->serial = pdev->id;
cxlds->mbox_send = cxl_mock_mbox_send;
cxlds->payload_size = SZ_4K;
cxlds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
if (is_rcd(pdev)) { if (is_rcd(pdev)) {
cxlds->rcd = true; cxlds->rcd = true;
cxlds->component_reg_phys = CXL_RESOURCE_NONE; cxlds->component_reg_phys = CXL_RESOURCE_NONE;
} }
rc = cxl_enumerate_cmds(cxlds); rc = cxl_enumerate_cmds(mds);
if (rc) if (rc)
return rc; return rc;
rc = cxl_poison_state_init(cxlds); rc = cxl_poison_state_init(mds);
if (rc) if (rc)
return rc; return rc;
rc = cxl_set_timestamp(cxlds); rc = cxl_set_timestamp(mds);
if (rc) if (rc)
return rc; return rc;
cxlds->media_ready = true; cxlds->media_ready = true;
rc = cxl_dev_state_identify(cxlds); rc = cxl_dev_state_identify(mds);
if (rc) if (rc)
return rc; return rc;
rc = cxl_mem_create_range_info(cxlds); rc = cxl_mem_create_range_info(mds);
if (rc) if (rc)
return rc; return rc;
mdata->mes.cxlds = cxlds; mdata->mes.mds = mds;
cxl_mock_add_event_logs(&mdata->mes); cxl_mock_add_event_logs(&mdata->mes);
cxlmd = devm_cxl_add_memdev(cxlds); cxlmd = devm_cxl_add_memdev(cxlds);
if (IS_ERR(cxlmd)) if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd); return PTR_ERR(cxlmd);
cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL); cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment