Commit 70868a18 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'cxl-for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull CXL (Compute Express Link) updates from Dan Williams:

 - Fix detection of CXL host bridges to filter out disabled ACPI0016
   devices in the ACPI DSDT.

 - Fix kernel lockdown integration to disable raw commands when raw PCI
   access is disabled.

 - Fix a broken debug message.

 - Add support for "Get Partition Info". I.e. enumerate the split
   between volatile and persistent capacity on bi-modal CXL memory
   expanders.

 - Re-factor the core by subject area. This is a work in progress.

 - Prepare libnvdimm to understand CXL labels in addition to EFI labels.
   This is a work in progress.

* tag 'cxl-for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (25 commits)
  cxl/registers: Fix Documentation warning
  cxl/pmem: Fix Documentation warning
  cxl/uapi: Fix defined but not used warnings
  cxl/pci: Fix debug message in cxl_probe_regs()
  cxl/pci: Fix lockdown level
  cxl/acpi: Do not add DSDT disabled ACPI0016 host bridge ports
  libnvdimm/labels: Add claim class helpers
  libnvdimm/labels: Add type-guid helpers
  libnvdimm/labels: Add blk special cases for nlabel and position helpers
  libnvdimm/labels: Add blk isetcookie set / validation helpers
  libnvdimm/labels: Add a checksum calculation helper
  libnvdimm/labels: Introduce label setter helpers
  libnvdimm/labels: Add isetcookie validation helper
  libnvdimm/labels: Introduce getters for namespace label fields
  cxl/mem: Adjust ram/pmem range to represent DPA ranges
  cxl/mem: Account for partitionable space in ram/pmem ranges
  cxl/pci: Store memory capacity values
  cxl/pci: Simplify register setup
  cxl/pci: Ignore unknown register block types
  cxl/core: Move memdev management to core
  ...
parents 2e5fd489 2b922a9d
...@@ -36,9 +36,15 @@ CXL Core ...@@ -36,9 +36,15 @@ CXL Core
.. kernel-doc:: drivers/cxl/cxl.h .. kernel-doc:: drivers/cxl/cxl.h
:internal: :internal:
.. kernel-doc:: drivers/cxl/core.c .. kernel-doc:: drivers/cxl/core/bus.c
:doc: cxl core :doc: cxl core
.. kernel-doc:: drivers/cxl/core/pmem.c
:doc: cxl pmem
.. kernel-doc:: drivers/cxl/core/regs.c
:doc: cxl registers
External Interfaces External Interfaces
=================== ===================
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CXL_BUS) += cxl_core.o obj-$(CONFIG_CXL_BUS) += core/
obj-$(CONFIG_CXL_MEM) += cxl_pci.o obj-$(CONFIG_CXL_MEM) += cxl_pci.o
obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL
cxl_core-y := core.o
cxl_pci-y := pci.o cxl_pci-y := pci.o
cxl_acpi-y := acpi.o cxl_acpi-y := acpi.o
cxl_pmem-y := pmem.o cxl_pmem-y := pmem.o
...@@ -243,6 +243,9 @@ static struct acpi_device *to_cxl_host_bridge(struct device *dev) ...@@ -243,6 +243,9 @@ static struct acpi_device *to_cxl_host_bridge(struct device *dev)
{ {
struct acpi_device *adev = to_acpi_device(dev); struct acpi_device *adev = to_acpi_device(dev);
if (!acpi_pci_find_root(adev->handle))
return NULL;
if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0) if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
return adev; return adev;
return NULL; return NULL;
...@@ -266,10 +269,6 @@ static int add_host_bridge_uport(struct device *match, void *arg) ...@@ -266,10 +269,6 @@ static int add_host_bridge_uport(struct device *match, void *arg)
if (!bridge) if (!bridge)
return 0; return 0;
pci_root = acpi_pci_find_root(bridge->handle);
if (!pci_root)
return -ENXIO;
dport = find_dport_by_dev(root_port, match); dport = find_dport_by_dev(root_port, match);
if (!dport) { if (!dport) {
dev_dbg(host, "host bridge expected and not found\n"); dev_dbg(host, "host bridge expected and not found\n");
...@@ -282,6 +281,11 @@ static int add_host_bridge_uport(struct device *match, void *arg) ...@@ -282,6 +281,11 @@ static int add_host_bridge_uport(struct device *match, void *arg)
return PTR_ERR(port); return PTR_ERR(port);
dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev)); dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
/*
* Note that this lookup already succeeded in
* to_cxl_host_bridge(), so no need to check for failure here
*/
pci_root = acpi_pci_find_root(bridge->handle);
ctx = (struct cxl_walk_context){ ctx = (struct cxl_walk_context){
.dev = host, .dev = host,
.root = pci_root->bus, .root = pci_root->bus,
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CXL_BUS) += cxl_core.o
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl
cxl_core-y := bus.o
cxl_core-y += pmem.o
cxl_core-y += regs.o
cxl_core-y += memdev.o
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright(c) 2020 Intel Corporation. */
#ifndef __CXL_CORE_H__
#define __CXL_CORE_H__
extern const struct device_type cxl_nvdimm_bridge_type;
extern const struct device_type cxl_nvdimm_type;
extern struct attribute_group cxl_base_attribute_group;
static inline void unregister_cxl_dev(void *dev)
{
device_unregister(dev);
}
int cxl_memdev_init(void);
void cxl_memdev_exit(void);
#endif /* __CXL_CORE_H__ */
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. */
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/pci.h>
#include <cxlmem.h>
#include "core.h"
/*
* An entire PCI topology full of devices should be enough for any
* config
*/
#define CXL_MEM_MAX_DEVS 65536
static int cxl_mem_major;
static DEFINE_IDA(cxl_memdev_ida);
static void cxl_memdev_release(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
ida_free(&cxl_memdev_ida, cxlmd->id);
kfree(cxlmd);
}
static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
kgid_t *gid)
{
return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
}
static ssize_t firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
}
static DEVICE_ATTR_RO(firmware_version);
static ssize_t payload_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
}
static DEVICE_ATTR_RO(payload_max);
static ssize_t label_storage_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
}
static DEVICE_ATTR_RO(label_storage_size);
static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
unsigned long long len = range_len(&cxlm->ram_range);
return sysfs_emit(buf, "%#llx\n", len);
}
static struct device_attribute dev_attr_ram_size =
__ATTR(size, 0444, ram_size_show, NULL);
static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_mem *cxlm = cxlmd->cxlm;
unsigned long long len = range_len(&cxlm->pmem_range);
return sysfs_emit(buf, "%#llx\n", len);
}
static struct device_attribute dev_attr_pmem_size =
__ATTR(size, 0444, pmem_size_show, NULL);
static struct attribute *cxl_memdev_attributes[] = {
&dev_attr_firmware_version.attr,
&dev_attr_payload_max.attr,
&dev_attr_label_storage_size.attr,
NULL,
};
static struct attribute *cxl_memdev_pmem_attributes[] = {
&dev_attr_pmem_size.attr,
NULL,
};
static struct attribute *cxl_memdev_ram_attributes[] = {
&dev_attr_ram_size.attr,
NULL,
};
static struct attribute_group cxl_memdev_attribute_group = {
.attrs = cxl_memdev_attributes,
};
static struct attribute_group cxl_memdev_ram_attribute_group = {
.name = "ram",
.attrs = cxl_memdev_ram_attributes,
};
static struct attribute_group cxl_memdev_pmem_attribute_group = {
.name = "pmem",
.attrs = cxl_memdev_pmem_attributes,
};
static const struct attribute_group *cxl_memdev_attribute_groups[] = {
&cxl_memdev_attribute_group,
&cxl_memdev_ram_attribute_group,
&cxl_memdev_pmem_attribute_group,
NULL,
};
static const struct device_type cxl_memdev_type = {
.name = "cxl_memdev",
.release = cxl_memdev_release,
.devnode = cxl_memdev_devnode,
.groups = cxl_memdev_attribute_groups,
};
static void cxl_memdev_unregister(void *_cxlmd)
{
struct cxl_memdev *cxlmd = _cxlmd;
struct device *dev = &cxlmd->dev;
struct cdev *cdev = &cxlmd->cdev;
const struct cdevm_file_operations *cdevm_fops;
cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops);
cdevm_fops->shutdown(dev);
cdev_device_del(&cxlmd->cdev, dev);
put_device(dev);
}
static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
const struct file_operations *fops)
{
struct pci_dev *pdev = cxlm->pdev;
struct cxl_memdev *cxlmd;
struct device *dev;
struct cdev *cdev;
int rc;
cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
if (!cxlmd)
return ERR_PTR(-ENOMEM);
rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
if (rc < 0)
goto err;
cxlmd->id = rc;
dev = &cxlmd->dev;
device_initialize(dev);
dev->parent = &pdev->dev;
dev->bus = &cxl_bus_type;
dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
dev->type = &cxl_memdev_type;
device_set_pm_not_required(dev);
cdev = &cxlmd->cdev;
cdev_init(cdev, fops);
return cxlmd;
err:
kfree(cxlmd);
return ERR_PTR(rc);
}
struct cxl_memdev *
devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
const struct cdevm_file_operations *cdevm_fops)
{
struct cxl_memdev *cxlmd;
struct device *dev;
struct cdev *cdev;
int rc;
cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops);
if (IS_ERR(cxlmd))
return cxlmd;
dev = &cxlmd->dev;
rc = dev_set_name(dev, "mem%d", cxlmd->id);
if (rc)
goto err;
/*
* Activate ioctl operations, no cxl_memdev_rwsem manipulation
* needed as this is ordered with cdev_add() publishing the device.
*/
cxlmd->cxlm = cxlm;
cdev = &cxlmd->cdev;
rc = cdev_device_add(cdev, dev);
if (rc)
goto err;
rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
if (rc)
return ERR_PTR(rc);
return cxlmd;
err:
/*
* The cdev was briefly live, shutdown any ioctl operations that
* saw that state.
*/
cdevm_fops->shutdown(dev);
put_device(dev);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(devm_cxl_add_memdev);
__init int cxl_memdev_init(void)
{
dev_t devt;
int rc;
rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
if (rc)
return rc;
cxl_mem_major = MAJOR(devt);
return 0;
}
void cxl_memdev_exit(void)
{
unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
}
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. */
#include <linux/device.h>
#include <linux/slab.h>
#include <cxlmem.h>
#include <cxl.h>
#include "core.h"
/**
* DOC: cxl pmem
*
* The core CXL PMEM infrastructure supports persistent memory
* provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
* 'bridge' device is added at the root of a CXL device topology if
* platform firmware advertises at least one persistent memory capable
* CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
* device. Then for each cxl_memdev in the CXL device topology a bridge
* device is added to host a LIBNVDIMM dimm object. When these bridges
* are registered native LIBNVDIMM uapis are translated to CXL
* operations, for example, namespace label access commands.
*/
static void cxl_nvdimm_bridge_release(struct device *dev)
{
struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
kfree(cxl_nvb);
}
static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
&cxl_base_attribute_group,
NULL,
};
const struct device_type cxl_nvdimm_bridge_type = {
.name = "cxl_nvdimm_bridge",
.release = cxl_nvdimm_bridge_release,
.groups = cxl_nvdimm_bridge_attribute_groups,
};
struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
{
if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
"not a cxl_nvdimm_bridge device\n"))
return NULL;
return container_of(dev, struct cxl_nvdimm_bridge, dev);
}
EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
static struct cxl_nvdimm_bridge *
cxl_nvdimm_bridge_alloc(struct cxl_port *port)
{
struct cxl_nvdimm_bridge *cxl_nvb;
struct device *dev;
cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
if (!cxl_nvb)
return ERR_PTR(-ENOMEM);
dev = &cxl_nvb->dev;
cxl_nvb->port = port;
cxl_nvb->state = CXL_NVB_NEW;
device_initialize(dev);
device_set_pm_not_required(dev);
dev->parent = &port->dev;
dev->bus = &cxl_bus_type;
dev->type = &cxl_nvdimm_bridge_type;
return cxl_nvb;
}
static void unregister_nvb(void *_cxl_nvb)
{
struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
bool flush;
/*
* If the bridge was ever activated then there might be in-flight state
* work to flush. Once the state has been changed to 'dead' then no new
* work can be queued by user-triggered bind.
*/
device_lock(&cxl_nvb->dev);
flush = cxl_nvb->state != CXL_NVB_NEW;
cxl_nvb->state = CXL_NVB_DEAD;
device_unlock(&cxl_nvb->dev);
/*
* Even though the device core will trigger device_release_driver()
* before the unregister, it does not know about the fact that
* cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
* release not and flush it before tearing down the nvdimm device
* hierarchy.
*/
device_release_driver(&cxl_nvb->dev);
if (flush)
flush_work(&cxl_nvb->state_work);
device_unregister(&cxl_nvb->dev);
}
/**
* devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
* @host: platform firmware root device
* @port: CXL port at the root of a CXL topology
*
* Return: bridge device that can host cxl_nvdimm objects
*/
struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_port *port)
{
struct cxl_nvdimm_bridge *cxl_nvb;
struct device *dev;
int rc;
if (!IS_ENABLED(CONFIG_CXL_PMEM))
return ERR_PTR(-ENXIO);
cxl_nvb = cxl_nvdimm_bridge_alloc(port);
if (IS_ERR(cxl_nvb))
return cxl_nvb;
dev = &cxl_nvb->dev;
rc = dev_set_name(dev, "nvdimm-bridge");
if (rc)
goto err;
rc = device_add(dev);
if (rc)
goto err;
rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
if (rc)
return ERR_PTR(rc);
return cxl_nvb;
err:
put_device(dev);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
static void cxl_nvdimm_release(struct device *dev)
{
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
kfree(cxl_nvd);
}
static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
&cxl_base_attribute_group,
NULL,
};
const struct device_type cxl_nvdimm_type = {
.name = "cxl_nvdimm",
.release = cxl_nvdimm_release,
.groups = cxl_nvdimm_attribute_groups,
};
bool is_cxl_nvdimm(struct device *dev)
{
return dev->type == &cxl_nvdimm_type;
}
EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
{
if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
"not a cxl_nvdimm device\n"))
return NULL;
return container_of(dev, struct cxl_nvdimm, dev);
}
EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
{
struct cxl_nvdimm *cxl_nvd;
struct device *dev;
cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
if (!cxl_nvd)
return ERR_PTR(-ENOMEM);
dev = &cxl_nvd->dev;
cxl_nvd->cxlmd = cxlmd;
device_initialize(dev);
device_set_pm_not_required(dev);
dev->parent = &cxlmd->dev;
dev->bus = &cxl_bus_type;
dev->type = &cxl_nvdimm_type;
return cxl_nvd;
}
/**
* devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
* @host: same host as @cxlmd
* @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
*
* Return: 0 on success negative error code on failure.
*/
int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
{
struct cxl_nvdimm *cxl_nvd;
struct device *dev;
int rc;
cxl_nvd = cxl_nvdimm_alloc(cxlmd);
if (IS_ERR(cxl_nvd))
return PTR_ERR(cxl_nvd);
dev = &cxl_nvd->dev;
rc = dev_set_name(dev, "pmem%d", cxlmd->id);
if (rc)
goto err;
rc = device_add(dev);
if (rc)
goto err;
dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
dev_name(dev));
return devm_add_action_or_reset(host, unregister_cxl_dev, dev);
err:
put_device(dev);
return rc;
}
EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. */
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <cxlmem.h>
/**
* DOC: cxl registers
*
* CXL device capabilities are enumerated by PCI DVSEC (Designated
* Vendor-specific) and / or descriptors provided by platform firmware.
* They can be defined as a set like the device and component registers
* mandated by CXL Section 8.1.12.2 Memory Device PCIe Capabilities and
* Extended Capabilities, or they can be individual capabilities
* appended to bridged and endpoint devices.
*
* Provide common infrastructure for enumerating and mapping these
* discrete capabilities.
*/
/**
* cxl_probe_component_regs() - Detect CXL Component register blocks
* @dev: Host device of the @base mapping
* @base: Mapping containing the HDM Decoder Capability Header
* @map: Map object describing the register block information found
*
* See CXL 2.0 8.2.4 Component Register Layout and Definition
* See CXL 2.0 8.2.5.5 CXL Device Register Interface
*
* Probe for component register information and return it in map object.
*/
void cxl_probe_component_regs(struct device *dev, void __iomem *base,
struct cxl_component_reg_map *map)
{
int cap, cap_count;
u64 cap_array;
*map = (struct cxl_component_reg_map) { 0 };
/*
* CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
* CXL 2.0 8.2.4 Table 141.
*/
base += CXL_CM_OFFSET;
cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
dev_err(dev,
"Couldn't locate the CXL.cache and CXL.mem capability array header./n");
return;
}
/* It's assumed that future versions will be backward compatible */
cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
for (cap = 1; cap <= cap_count; cap++) {
void __iomem *register_block;
u32 hdr;
int decoder_cnt;
u16 cap_id, offset;
u32 length;
hdr = readl(base + cap * 0x4);
cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
register_block = base + offset;
switch (cap_id) {
case CXL_CM_CAP_CAP_ID_HDM:
dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
offset);
hdr = readl(register_block);
decoder_cnt = cxl_hdm_decoder_count(hdr);
length = 0x20 * decoder_cnt + 0x10;
map->hdm_decoder.valid = true;
map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
map->hdm_decoder.size = length;
break;
default:
dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
offset);
break;
}
}
}
EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
/**
* cxl_probe_device_regs() - Detect CXL Device register blocks
* @dev: Host device of the @base mapping
* @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
* @map: Map object describing the register block information found
*
* Probe for device register information and return it in map object.
*/
void cxl_probe_device_regs(struct device *dev, void __iomem *base,
struct cxl_device_reg_map *map)
{
int cap, cap_count;
u64 cap_array;
*map = (struct cxl_device_reg_map){ 0 };
cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
CXLDEV_CAP_ARRAY_CAP_ID)
return;
cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
for (cap = 1; cap <= cap_count; cap++) {
u32 offset, length;
u16 cap_id;
cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
readl(base + cap * 0x10));
offset = readl(base + cap * 0x10 + 0x4);
length = readl(base + cap * 0x10 + 0x8);
switch (cap_id) {
case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
dev_dbg(dev, "found Status capability (0x%x)\n", offset);
map->status.valid = true;
map->status.offset = offset;
map->status.size = length;
break;
case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
map->mbox.valid = true;
map->mbox.offset = offset;
map->mbox.size = length;
break;
case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
break;
case CXLDEV_CAP_CAP_ID_MEMDEV:
dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
map->memdev.valid = true;
map->memdev.offset = offset;
map->memdev.size = length;
break;
default:
if (cap_id >= 0x8000)
dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
else
dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
break;
}
}
}
EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
static void __iomem *devm_cxl_iomap_block(struct device *dev,
resource_size_t addr,
resource_size_t length)
{
void __iomem *ret_val;
struct resource *res;
res = devm_request_mem_region(dev, addr, length, dev_name(dev));
if (!res) {
resource_size_t end = addr + length - 1;
dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
return NULL;
}
ret_val = devm_ioremap(dev, addr, length);
if (!ret_val)
dev_err(dev, "Failed to map region %pr\n", res);
return ret_val;
}
int cxl_map_component_regs(struct pci_dev *pdev,
struct cxl_component_regs *regs,
struct cxl_register_map *map)
{
struct device *dev = &pdev->dev;
resource_size_t phys_addr;
resource_size_t length;
phys_addr = pci_resource_start(pdev, map->barno);
phys_addr += map->block_offset;
phys_addr += map->component_map.hdm_decoder.offset;
length = map->component_map.hdm_decoder.size;
regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
if (!regs->hdm_decoder)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(cxl_map_component_regs);
int cxl_map_device_regs(struct pci_dev *pdev,
struct cxl_device_regs *regs,
struct cxl_register_map *map)
{
struct device *dev = &pdev->dev;
resource_size_t phys_addr;
phys_addr = pci_resource_start(pdev, map->barno);
phys_addr += map->block_offset;
if (map->device_map.status.valid) {
resource_size_t addr;
resource_size_t length;
addr = phys_addr + map->device_map.status.offset;
length = map->device_map.status.size;
regs->status = devm_cxl_iomap_block(dev, addr, length);
if (!regs->status)
return -ENOMEM;
}
if (map->device_map.mbox.valid) {
resource_size_t addr;
resource_size_t length;
addr = phys_addr + map->device_map.mbox.offset;
length = map->device_map.mbox.size;
regs->mbox = devm_cxl_iomap_block(dev, addr, length);
if (!regs->mbox)
return -ENOMEM;
}
if (map->device_map.memdev.valid) {
resource_size_t addr;
resource_size_t length;
addr = phys_addr + map->device_map.memdev.offset;
length = map->device_map.memdev.size;
regs->memdev = devm_cxl_iomap_block(dev, addr, length);
if (!regs->memdev)
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL_GPL(cxl_map_device_regs);
...@@ -140,7 +140,6 @@ struct cxl_device_reg_map { ...@@ -140,7 +140,6 @@ struct cxl_device_reg_map {
}; };
struct cxl_register_map { struct cxl_register_map {
struct list_head list;
u64 block_offset; u64 block_offset;
u8 reg_type; u8 reg_type;
u8 barno; u8 barno;
......
...@@ -28,11 +28,20 @@ ...@@ -28,11 +28,20 @@
(FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \ (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
CXLMDEV_RESET_NEEDED_NOT) CXLMDEV_RESET_NEEDED_NOT)
/* /**
* An entire PCI topology full of devices should be enough for any * struct cdevm_file_operations - devm coordinated cdev file operations
* config * @fops: file operations that are synchronized against @shutdown
* @shutdown: disconnect driver data
*
* @shutdown is invoked in the devres release path to disconnect any
* driver instance data from @dev. It assumes synchronization with any
* fops operation that requires driver data. After @shutdown an
* operation may only reference @device data.
*/ */
#define CXL_MEM_MAX_DEVS 65536 struct cdevm_file_operations {
struct file_operations fops;
void (*shutdown)(struct device *dev);
};
/** /**
* struct cxl_memdev - CXL bus object representing a Type-3 Memory Device * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
...@@ -48,6 +57,15 @@ struct cxl_memdev { ...@@ -48,6 +57,15 @@ struct cxl_memdev {
int id; int id;
}; };
static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
{
return container_of(dev, struct cxl_memdev, dev);
}
struct cxl_memdev *
devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
const struct cdevm_file_operations *cdevm_fops);
/** /**
* struct cxl_mem - A CXL memory device * struct cxl_mem - A CXL memory device
* @pdev: The PCI device associated with this CXL device. * @pdev: The PCI device associated with this CXL device.
...@@ -77,5 +95,14 @@ struct cxl_mem { ...@@ -77,5 +95,14 @@ struct cxl_mem {
struct range pmem_range; struct range pmem_range;
struct range ram_range; struct range ram_range;
u64 total_bytes;
u64 volatile_only_bytes;
u64 persistent_only_bytes;
u64 partition_align_bytes;
u64 active_volatile_bytes;
u64 active_persistent_bytes;
u64 next_volatile_bytes;
u64 next_persistent_bytes;
}; };
#endif /* __CXL_MEM_H__ */ #endif /* __CXL_MEM_H__ */
This diff is collapsed.
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#define CXL_REGLOC_RBI_COMPONENT 1 #define CXL_REGLOC_RBI_COMPONENT 1
#define CXL_REGLOC_RBI_VIRT 2 #define CXL_REGLOC_RBI_VIRT 2
#define CXL_REGLOC_RBI_MEMDEV 3 #define CXL_REGLOC_RBI_MEMDEV 3
#define CXL_REGLOC_RBI_TYPES CXL_REGLOC_RBI_MEMDEV + 1
#define CXL_REGLOC_ADDR_MASK GENMASK(31, 16) #define CXL_REGLOC_ADDR_MASK GENMASK(31, 16)
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <linux/ndctl.h> #include <linux/ndctl.h>
#include <linux/async.h> #include <linux/async.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "mem.h" #include "cxlmem.h"
#include "cxl.h" #include "cxl.h"
/* /*
......
This diff is collapsed.
...@@ -135,7 +135,6 @@ struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n); ...@@ -135,7 +135,6 @@ struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n);
u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd); u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd);
bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot); bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot);
u32 nd_label_nfree(struct nvdimm_drvdata *ndd); u32 nd_label_nfree(struct nvdimm_drvdata *ndd);
enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid);
struct nd_region; struct nd_region;
struct nd_namespace_pmem; struct nd_namespace_pmem;
struct nd_namespace_blk; struct nd_namespace_blk;
......
...@@ -1235,7 +1235,7 @@ static int namespace_update_uuid(struct nd_region *nd_region, ...@@ -1235,7 +1235,7 @@ static int namespace_update_uuid(struct nd_region *nd_region,
if (!nd_label) if (!nd_label)
continue; continue;
nd_label_gen_id(&label_id, nd_label->uuid, nd_label_gen_id(&label_id, nd_label->uuid,
__le32_to_cpu(nd_label->flags)); nsl_get_flags(ndd, nd_label));
if (strcmp(old_label_id.id, label_id.id) == 0) if (strcmp(old_label_id.id, label_id.id) == 0)
set_bit(ND_LABEL_REAP, &label_ent->flags); set_bit(ND_LABEL_REAP, &label_ent->flags);
} }
...@@ -1847,28 +1847,21 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid, ...@@ -1847,28 +1847,21 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
list_for_each_entry(label_ent, &nd_mapping->labels, list) { list_for_each_entry(label_ent, &nd_mapping->labels, list) {
struct nd_namespace_label *nd_label = label_ent->label; struct nd_namespace_label *nd_label = label_ent->label;
u16 position, nlabel; u16 position, nlabel;
u64 isetcookie;
if (!nd_label) if (!nd_label)
continue; continue;
isetcookie = __le64_to_cpu(nd_label->isetcookie); position = nsl_get_position(ndd, nd_label);
position = __le16_to_cpu(nd_label->position); nlabel = nsl_get_nlabel(ndd, nd_label);
nlabel = __le16_to_cpu(nd_label->nlabel);
if (isetcookie != cookie) if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
continue; continue;
if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0) if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
continue; continue;
if (namespace_label_has(ndd, type_guid) if (!nsl_validate_type_guid(ndd, nd_label,
&& !guid_equal(&nd_set->type_guid, &nd_set->type_guid))
&nd_label->type_guid)) {
dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
&nd_set->type_guid,
&nd_label->type_guid);
continue; continue;
}
if (found_uuid) { if (found_uuid) {
dev_dbg(ndd->dev, "duplicate entry for uuid\n"); dev_dbg(ndd->dev, "duplicate entry for uuid\n");
...@@ -1923,8 +1916,8 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) ...@@ -1923,8 +1916,8 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
*/ */
hw_start = nd_mapping->start; hw_start = nd_mapping->start;
hw_end = hw_start + nd_mapping->size; hw_end = hw_start + nd_mapping->size;
pmem_start = __le64_to_cpu(nd_label->dpa); pmem_start = nsl_get_dpa(ndd, nd_label);
pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize); pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label);
if (pmem_start >= hw_start && pmem_start < hw_end if (pmem_start >= hw_start && pmem_start < hw_end
&& pmem_end <= hw_end && pmem_end > hw_start) && pmem_end <= hw_end && pmem_end > hw_start)
/* pass */; /* pass */;
...@@ -1947,14 +1940,16 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) ...@@ -1947,14 +1940,16 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
* @nd_label: target pmem namespace label to evaluate * @nd_label: target pmem namespace label to evaluate
*/ */
static struct device *create_namespace_pmem(struct nd_region *nd_region, static struct device *create_namespace_pmem(struct nd_region *nd_region,
struct nd_namespace_index *nsindex, struct nd_mapping *nd_mapping,
struct nd_namespace_label *nd_label) struct nd_namespace_label *nd_label)
{ {
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_index *nsindex =
to_namespace_index(ndd, ndd->ns_current);
u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex); u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
u64 altcookie = nd_region_interleave_set_altcookie(nd_region); u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
struct nd_label_ent *label_ent; struct nd_label_ent *label_ent;
struct nd_namespace_pmem *nspm; struct nd_namespace_pmem *nspm;
struct nd_mapping *nd_mapping;
resource_size_t size = 0; resource_size_t size = 0;
struct resource *res; struct resource *res;
struct device *dev; struct device *dev;
...@@ -1966,10 +1961,10 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, ...@@ -1966,10 +1961,10 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
return ERR_PTR(-ENXIO); return ERR_PTR(-ENXIO);
} }
if (__le64_to_cpu(nd_label->isetcookie) != cookie) { if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n", dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
nd_label->uuid); nd_label->uuid);
if (__le64_to_cpu(nd_label->isetcookie) != altcookie) if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n", dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
...@@ -2037,20 +2032,18 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, ...@@ -2037,20 +2032,18 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
continue; continue;
} }
size += __le64_to_cpu(label0->rawsize); ndd = to_ndd(nd_mapping);
if (__le16_to_cpu(label0->position) != 0) size += nsl_get_rawsize(ndd, label0);
if (nsl_get_position(ndd, label0) != 0)
continue; continue;
WARN_ON(nspm->alt_name || nspm->uuid); WARN_ON(nspm->alt_name || nspm->uuid);
nspm->alt_name = kmemdup((void __force *) label0->name, nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
NSLABEL_NAME_LEN, GFP_KERNEL); NSLABEL_NAME_LEN, GFP_KERNEL);
nspm->uuid = kmemdup((void __force *) label0->uuid, nspm->uuid = kmemdup((void __force *) label0->uuid,
NSLABEL_UUID_LEN, GFP_KERNEL); NSLABEL_UUID_LEN, GFP_KERNEL);
nspm->lbasize = __le64_to_cpu(label0->lbasize); nspm->lbasize = nsl_get_lbasize(ndd, label0);
ndd = to_ndd(nd_mapping); nspm->nsio.common.claim_class =
if (namespace_label_has(ndd, abstraction_guid)) nsl_get_claim_class(ndd, label0);
nspm->nsio.common.claim_class
= to_nvdimm_cclass(&label0->abstraction_guid);
} }
if (!nspm->alt_name || !nspm->uuid) { if (!nspm->alt_name || !nspm->uuid) {
...@@ -2237,7 +2230,7 @@ static int add_namespace_resource(struct nd_region *nd_region, ...@@ -2237,7 +2230,7 @@ static int add_namespace_resource(struct nd_region *nd_region,
if (is_namespace_blk(devs[i])) { if (is_namespace_blk(devs[i])) {
res = nsblk_add_resource(nd_region, ndd, res = nsblk_add_resource(nd_region, ndd,
to_nd_namespace_blk(devs[i]), to_nd_namespace_blk(devs[i]),
__le64_to_cpu(nd_label->dpa)); nsl_get_dpa(ndd, nd_label));
if (!res) if (!res)
return -ENXIO; return -ENXIO;
nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count); nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
...@@ -2265,21 +2258,10 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, ...@@ -2265,21 +2258,10 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
struct device *dev = NULL; struct device *dev = NULL;
struct resource *res; struct resource *res;
if (namespace_label_has(ndd, type_guid)) { if (!nsl_validate_type_guid(ndd, nd_label, &nd_set->type_guid))
if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
&nd_set->type_guid,
&nd_label->type_guid);
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
} if (!nsl_validate_blk_isetcookie(ndd, nd_label, nd_set->cookie2))
if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) {
dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n",
nd_set->cookie2,
__le64_to_cpu(nd_label->isetcookie));
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
}
}
nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
if (!nsblk) if (!nsblk)
...@@ -2288,23 +2270,19 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, ...@@ -2288,23 +2270,19 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
dev->type = &namespace_blk_device_type; dev->type = &namespace_blk_device_type;
dev->parent = &nd_region->dev; dev->parent = &nd_region->dev;
nsblk->id = -1; nsblk->id = -1;
nsblk->lbasize = __le64_to_cpu(nd_label->lbasize); nsblk->lbasize = nsl_get_lbasize(ndd, nd_label);
nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, GFP_KERNEL);
GFP_KERNEL); nsblk->common.claim_class = nsl_get_claim_class(ndd, nd_label);
if (namespace_label_has(ndd, abstraction_guid))
nsblk->common.claim_class
= to_nvdimm_cclass(&nd_label->abstraction_guid);
if (!nsblk->uuid) if (!nsblk->uuid)
goto blk_err; goto blk_err;
memcpy(name, nd_label->name, NSLABEL_NAME_LEN); nsl_get_name(ndd, nd_label, name);
if (name[0]) { if (name[0]) {
nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL);
GFP_KERNEL);
if (!nsblk->alt_name) if (!nsblk->alt_name)
goto blk_err; goto blk_err;
} }
res = nsblk_add_resource(nd_region, ndd, nsblk, res = nsblk_add_resource(nd_region, ndd, nsblk,
__le64_to_cpu(nd_label->dpa)); nsl_get_dpa(ndd, nd_label));
if (!res) if (!res)
goto blk_err; goto blk_err;
nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count); nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
...@@ -2345,6 +2323,7 @@ static struct device **scan_labels(struct nd_region *nd_region) ...@@ -2345,6 +2323,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
struct device *dev, **devs = NULL; struct device *dev, **devs = NULL;
struct nd_label_ent *label_ent, *e; struct nd_label_ent *label_ent, *e;
struct nd_mapping *nd_mapping = &nd_region->mapping[0]; struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1; resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
/* "safe" because create_namespace_pmem() might list_move() label_ent */ /* "safe" because create_namespace_pmem() might list_move() label_ent */
...@@ -2355,7 +2334,7 @@ static struct device **scan_labels(struct nd_region *nd_region) ...@@ -2355,7 +2334,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
if (!nd_label) if (!nd_label)
continue; continue;
flags = __le32_to_cpu(nd_label->flags); flags = nsl_get_flags(ndd, nd_label);
if (is_nd_blk(&nd_region->dev) if (is_nd_blk(&nd_region->dev)
== !!(flags & NSLABEL_FLAG_LOCAL)) == !!(flags & NSLABEL_FLAG_LOCAL))
/* pass, region matches label type */; /* pass, region matches label type */;
...@@ -2363,8 +2342,8 @@ static struct device **scan_labels(struct nd_region *nd_region) ...@@ -2363,8 +2342,8 @@ static struct device **scan_labels(struct nd_region *nd_region)
continue; continue;
/* skip labels that describe extents outside of the region */ /* skip labels that describe extents outside of the region */
if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start || if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
__le64_to_cpu(nd_label->dpa) > map_end) nsl_get_dpa(ndd, nd_label) > map_end)
continue; continue;
i = add_namespace_resource(nd_region, nd_label, devs, count); i = add_namespace_resource(nd_region, nd_label, devs, count);
...@@ -2381,13 +2360,9 @@ static struct device **scan_labels(struct nd_region *nd_region) ...@@ -2381,13 +2360,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
if (is_nd_blk(&nd_region->dev)) if (is_nd_blk(&nd_region->dev))
dev = create_namespace_blk(nd_region, nd_label, count); dev = create_namespace_blk(nd_region, nd_label, count);
else { else
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); dev = create_namespace_pmem(nd_region, nd_mapping,
struct nd_namespace_index *nsindex; nd_label);
nsindex = to_namespace_index(ndd, ndd->ns_current);
dev = create_namespace_pmem(nd_region, nsindex, nd_label);
}
if (IS_ERR(dev)) { if (IS_ERR(dev)) {
switch (PTR_ERR(dev)) { switch (PTR_ERR(dev)) {
...@@ -2571,10 +2546,10 @@ static int init_active_labels(struct nd_region *nd_region) ...@@ -2571,10 +2546,10 @@ static int init_active_labels(struct nd_region *nd_region)
break; break;
label = nd_label_active(ndd, j); label = nd_label_active(ndd, j);
if (test_bit(NDD_NOBLK, &nvdimm->flags)) { if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
u32 flags = __le32_to_cpu(label->flags); u32 flags = nsl_get_flags(ndd, label);
flags &= ~NSLABEL_FLAG_LOCAL; flags &= ~NSLABEL_FLAG_LOCAL;
label->flags = __cpu_to_le32(flags); nsl_set_flags(ndd, label, flags);
} }
label_ent->label = label; label_ent->label = label;
......
...@@ -35,6 +35,156 @@ struct nvdimm_drvdata { ...@@ -35,6 +35,156 @@ struct nvdimm_drvdata {
struct kref kref; struct kref kref;
}; };
static inline const u8 *nsl_ref_name(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
return nd_label->name;
}
static inline u8 *nsl_get_name(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, u8 *name)
{
return memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
}
static inline u8 *nsl_set_name(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, u8 *name)
{
if (!name)
return NULL;
return memcpy(nd_label->name, name, NSLABEL_NAME_LEN);
}
static inline u32 nsl_get_slot(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
return __le32_to_cpu(nd_label->slot);
}
static inline void nsl_set_slot(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, u32 slot)
{
nd_label->slot = __cpu_to_le32(slot);
}
static inline u64 nsl_get_checksum(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
return __le64_to_cpu(nd_label->checksum);
}
static inline void nsl_set_checksum(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
u64 checksum)
{
nd_label->checksum = __cpu_to_le64(checksum);
}
static inline u32 nsl_get_flags(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
return __le32_to_cpu(nd_label->flags);
}
static inline void nsl_set_flags(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, u32 flags)
{
nd_label->flags = __cpu_to_le32(flags);
}
static inline u64 nsl_get_dpa(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
return __le64_to_cpu(nd_label->dpa);
}
static inline void nsl_set_dpa(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, u64 dpa)
{
nd_label->dpa = __cpu_to_le64(dpa);
}
static inline u64 nsl_get_rawsize(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
return __le64_to_cpu(nd_label->rawsize);
}
static inline void nsl_set_rawsize(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
u64 rawsize)
{
nd_label->rawsize = __cpu_to_le64(rawsize);
}
static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
return __le64_to_cpu(nd_label->isetcookie);
}
static inline void nsl_set_isetcookie(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
u64 isetcookie)
{
nd_label->isetcookie = __cpu_to_le64(isetcookie);
}
static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
u64 cookie)
{
return cookie == __le64_to_cpu(nd_label->isetcookie);
}
static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
return __le16_to_cpu(nd_label->position);
}
static inline void nsl_set_position(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
u16 position)
{
nd_label->position = __cpu_to_le16(position);
}
static inline u16 nsl_get_nlabel(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
return __le16_to_cpu(nd_label->nlabel);
}
static inline void nsl_set_nlabel(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
u16 nlabel)
{
nd_label->nlabel = __cpu_to_le16(nlabel);
}
static inline u64 nsl_get_lbasize(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
return __le64_to_cpu(nd_label->lbasize);
}
static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
u64 lbasize)
{
nd_label->lbasize = __cpu_to_le64(lbasize);
}
bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
u64 isetcookie);
bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, guid_t *guid);
enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label);
struct nd_region_data { struct nd_region_data {
int ns_count; int ns_count;
int ns_active; int ns_active;
......
...@@ -50,7 +50,7 @@ enum { CXL_CMDS }; ...@@ -50,7 +50,7 @@ enum { CXL_CMDS };
#define ___C(a, b) { b } #define ___C(a, b) { b }
static const struct { static const struct {
const char *name; const char *name;
} cxl_command_names[] = { CXL_CMDS }; } cxl_command_names[] __attribute__((__unused__)) = { CXL_CMDS };
/* /*
* Here's how this actually breaks out: * Here's how this actually breaks out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment