Commit 80d10a6c authored by Ben Widawsky's avatar Ben Widawsky Committed by Dan Williams

cxl/region: Add interleave geometry attributes

Add ABI to allow the number of devices that comprise a region to be
set as well as the interleave granularity for the region.
Signed-off-by: default avatarBen Widawsky <bwidawsk@kernel.org>
[djbw: reword changelog]
Reviewed-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/20220624041950.559155-11-dan.j.williams@intel.comSigned-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent dd5ba0eb
......@@ -303,3 +303,24 @@ Description:
(RW) Write a unique identifier for the region. This field must
be set for persistent regions and it must not conflict with the
UUID of another region.
What: /sys/bus/cxl/devices/regionZ/interleave_granularity
Date: May, 2022
KernelVersion: v5.20
Contact: linux-cxl@vger.kernel.org
Description:
(RW) Set the number of consecutive bytes each device in the
interleave set will claim. The possible interleave granularity
values are determined by the CXL spec and the participating
devices.
What: /sys/bus/cxl/devices/regionZ/interleave_ways
Date: May, 2022
KernelVersion: v5.20
Contact: linux-cxl@vger.kernel.org
Description:
(RW) Configures the number of devices participating in the
region is set by writing this value. Each device will provide
1/interleave_ways of storage for the region.
......@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/uuid.h>
#include <linux/idr.h>
#include <cxlmem.h>
#include <cxl.h>
#include "core.h"
......@@ -21,6 +22,8 @@
*
* Region configuration has ordering constraints. UUID may be set at any time
* but is only visible for persistent regions.
* 1. Interleave granularity
* 2. Interleave size
*/
/*
......@@ -122,8 +125,135 @@ static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
return a->mode;
}
static ssize_t interleave_ways_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
ssize_t rc;
rc = down_read_interruptible(&cxl_region_rwsem);
if (rc)
return rc;
rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
up_read(&cxl_region_rwsem);
return rc;
}
static ssize_t interleave_ways_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
int rc, val;
u8 iw;
rc = kstrtoint(buf, 0, &val);
if (rc)
return rc;
rc = ways_to_cxl(val, &iw);
if (rc)
return rc;
/*
* Even for x3, x9, and x12 interleaves the region interleave must be a
* power of 2 multiple of the host bridge interleave.
*/
if (!is_power_of_2(val / cxld->interleave_ways) ||
(val % cxld->interleave_ways)) {
dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
return -EINVAL;
}
rc = down_write_killable(&cxl_region_rwsem);
if (rc)
return rc;
if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
rc = -EBUSY;
goto out;
}
p->interleave_ways = val;
out:
up_write(&cxl_region_rwsem);
if (rc)
return rc;
return len;
}
static DEVICE_ATTR_RW(interleave_ways);
static ssize_t interleave_granularity_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
ssize_t rc;
rc = down_read_interruptible(&cxl_region_rwsem);
if (rc)
return rc;
rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
up_read(&cxl_region_rwsem);
return rc;
}
static ssize_t interleave_granularity_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
int rc, val;
u16 ig;
rc = kstrtoint(buf, 0, &val);
if (rc)
return rc;
rc = granularity_to_cxl(val, &ig);
if (rc)
return rc;
/*
* Disallow region granularity less than root granularity to
* simplify the implementation. Otherwise, region's with a
* granularity less than the root interleave result in needing
* multiple endpoints to support a single slot in the
* interleave.
*/
if (val < cxld->interleave_granularity)
return -EINVAL;
rc = down_write_killable(&cxl_region_rwsem);
if (rc)
return rc;
if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
rc = -EBUSY;
goto out;
}
p->interleave_granularity = val;
out:
up_write(&cxl_region_rwsem);
if (rc)
return rc;
return len;
}
static DEVICE_ATTR_RW(interleave_granularity);
static struct attribute *cxl_region_attrs[] = {
&dev_attr_uuid.attr,
&dev_attr_interleave_ways.attr,
&dev_attr_interleave_granularity.attr,
NULL,
};
......@@ -216,6 +346,8 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
enum cxl_decoder_type type)
{
struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
struct cxl_region_params *p;
struct cxl_region *cxlr;
struct device *dev;
int rc;
......@@ -223,8 +355,10 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
cxlr = cxl_region_alloc(cxlrd, id);
if (IS_ERR(cxlr))
return cxlr;
p = &cxlr->params;
cxlr->mode = mode;
cxlr->type = type;
p->interleave_granularity = cxld->interleave_granularity;
dev = &cxlr->dev;
rc = dev_set_name(dev, "region%d", id);
......
......@@ -7,6 +7,7 @@
#include <linux/libnvdimm.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/io.h>
/**
......@@ -92,6 +93,31 @@ static inline int cxl_to_ways(u8 eniw, unsigned int *val)
return 0;
}
static inline int granularity_to_cxl(int g, u16 *ig)
{
if (g > SZ_16K || g < 256 || !is_power_of_2(g))
return -EINVAL;
*ig = ilog2(g) - 8;
return 0;
}
static inline int ways_to_cxl(int ways, u8 *iw)
{
if (ways > 16)
return -EINVAL;
if (is_power_of_2(ways)) {
*iw = ilog2(ways);
return 0;
}
if (ways % 3)
return -EINVAL;
ways /= 3;
if (!is_power_of_2(ways))
return -EINVAL;
*iw = ilog2(ways) + 8;
return 0;
}
/* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
#define CXLDEV_CAP_ARRAY_OFFSET 0x0
#define CXLDEV_CAP_ARRAY_CAP_ID 0
......@@ -298,11 +324,14 @@ struct cxl_root_decoder {
/*
* enum cxl_config_state - State machine for region configuration
* @CXL_CONFIG_IDLE: Any sysfs attribute can be written freely
* @CXL_CONFIG_INTERLEAVE_ACTIVE: region size has been set, no more
* changes to interleave_ways or interleave_granularity
* @CXL_CONFIG_ACTIVE: All targets have been added the region is now
* active
*/
enum cxl_config_state {
CXL_CONFIG_IDLE,
CXL_CONFIG_INTERLEAVE_ACTIVE,
CXL_CONFIG_ACTIVE,
};
......@@ -310,12 +339,16 @@ enum cxl_config_state {
* struct cxl_region_params - region settings
* @state: allow the driver to lockdown further parameter changes
* @uuid: unique id for persistent regions
* @interleave_ways: number of endpoints in the region
* @interleave_granularity: capacity each endpoint contributes to a stripe
*
* State transitions are protected by the cxl_region_rwsem
*/
struct cxl_region_params {
enum cxl_config_state state;
uuid_t uuid;
int interleave_ways;
int interleave_granularity;
};
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment