Commit 828bf6e9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'libnvdimm-for-4.19_misc' of gitolite.kernel.org:pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull libnvdimm updates from Dave Jiang:
 "Collection of misc libnvdimm patches for 4.19 submission:

   - Adding support to read locked nvdimm capacity.

   - Change test code to make DSM failure code injection an override.

   - Add support for calculate maximum contiguous area for namespace.

   - Add support for queueing a short ARS when there is on going ARS for
     nvdimm.

   - Allow NULL to be passed in to ->direct_access() for kaddr and pfn
     params.

   - Improve smart injection support for nvdimm emulation testing.

   - Fix test code that supports for emulating controller temperature.

   - Fix hang on error before devm_memremap_pages()

   - Fix a bug that causes user memory corruption when data returned to
     user for ars_status.

   - Maintainer updates for Ross Zwisler emails and adding Jan Kara to
     fsdax"

* tag 'libnvdimm-for-4.19_misc' of gitolite.kernel.org:pub/scm/linux/kernel/git/nvdimm/nvdimm:
  libnvdimm: fix ars_status output length calculation
  device-dax: avoid hang on error before devm_memremap_pages()
  tools/testing/nvdimm: improve emulation of smart injection
  filesystem-dax: Do not request kaddr and pfn when not required
  md/dm-writecache: Don't request pointer dummy_addr when not required
  dax/super: Do not request a pointer kaddr when not required
  tools/testing/nvdimm: kaddr and pfn can be NULL to ->direct_access()
  s390, dcssblk: kaddr and pfn can be NULL to ->direct_access()
  libnvdimm, pmem: kaddr and pfn can be NULL to ->direct_access()
  acpi/nfit: queue issuing of ars when an uc error notification comes in
  libnvdimm: Export max available extent
  libnvdimm: Use max contiguous area for namespace size
  MAINTAINERS: Add Jan Kara for filesystem DAX
  MAINTAINERS: update Ross Zwisler's email address
  tools/testing/nvdimm: Fix support for emulating controller temperature
  tools/testing/nvdimm: Make DSM failure code injection an override
  acpi, nfit: Prefer _DSM over _LSR for namespace label reads
  libnvdimm: Introduce locked DIMM capacity support
parents b3262720 286e8771
...@@ -159,6 +159,7 @@ Ralf Wildenhues <Ralf.Wildenhues@gmx.de> ...@@ -159,6 +159,7 @@ Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net> Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net>
Rémi Denis-Courmont <rdenis@simphalempin.com> Rémi Denis-Courmont <rdenis@simphalempin.com>
Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com> Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
Rudolf Marek <R.Marek@sh.cvut.cz> Rudolf Marek <R.Marek@sh.cvut.cz>
Rui Saraiva <rmps@joel.ist.utl.pt> Rui Saraiva <rmps@joel.ist.utl.pt>
Sachin P Sant <ssant@in.ibm.com> Sachin P Sant <ssant@in.ibm.com>
......
...@@ -4364,7 +4364,8 @@ F: drivers/i2c/busses/i2c-diolan-u2c.c ...@@ -4364,7 +4364,8 @@ F: drivers/i2c/busses/i2c-diolan-u2c.c
FILESYSTEM DIRECT ACCESS (DAX) FILESYSTEM DIRECT ACCESS (DAX)
M: Matthew Wilcox <mawilcox@microsoft.com> M: Matthew Wilcox <mawilcox@microsoft.com>
M: Ross Zwisler <ross.zwisler@linux.intel.com> M: Ross Zwisler <zwisler@kernel.org>
M: Jan Kara <jack@suse.cz>
L: linux-fsdevel@vger.kernel.org L: linux-fsdevel@vger.kernel.org
S: Supported S: Supported
F: fs/dax.c F: fs/dax.c
...@@ -4374,7 +4375,7 @@ F: include/trace/events/fs_dax.h ...@@ -4374,7 +4375,7 @@ F: include/trace/events/fs_dax.h
DEVICE DIRECT ACCESS (DAX) DEVICE DIRECT ACCESS (DAX)
M: Dan Williams <dan.j.williams@intel.com> M: Dan Williams <dan.j.williams@intel.com>
M: Dave Jiang <dave.jiang@intel.com> M: Dave Jiang <dave.jiang@intel.com>
M: Ross Zwisler <ross.zwisler@linux.intel.com> M: Ross Zwisler <zwisler@kernel.org>
M: Vishal Verma <vishal.l.verma@intel.com> M: Vishal Verma <vishal.l.verma@intel.com>
L: linux-nvdimm@lists.01.org L: linux-nvdimm@lists.01.org
S: Supported S: Supported
...@@ -8303,7 +8304,7 @@ S: Maintained ...@@ -8303,7 +8304,7 @@ S: Maintained
F: tools/lib/lockdep/ F: tools/lib/lockdep/
LIBNVDIMM BLK: MMIO-APERTURE DRIVER LIBNVDIMM BLK: MMIO-APERTURE DRIVER
M: Ross Zwisler <ross.zwisler@linux.intel.com> M: Ross Zwisler <zwisler@kernel.org>
M: Dan Williams <dan.j.williams@intel.com> M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com> M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com> M: Dave Jiang <dave.jiang@intel.com>
...@@ -8316,7 +8317,7 @@ F: drivers/nvdimm/region_devs.c ...@@ -8316,7 +8317,7 @@ F: drivers/nvdimm/region_devs.c
LIBNVDIMM BTT: BLOCK TRANSLATION TABLE LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
M: Vishal Verma <vishal.l.verma@intel.com> M: Vishal Verma <vishal.l.verma@intel.com>
M: Dan Williams <dan.j.williams@intel.com> M: Dan Williams <dan.j.williams@intel.com>
M: Ross Zwisler <ross.zwisler@linux.intel.com> M: Ross Zwisler <zwisler@kernel.org>
M: Dave Jiang <dave.jiang@intel.com> M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org L: linux-nvdimm@lists.01.org
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
...@@ -8324,7 +8325,7 @@ S: Supported ...@@ -8324,7 +8325,7 @@ S: Supported
F: drivers/nvdimm/btt* F: drivers/nvdimm/btt*
LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER
M: Ross Zwisler <ross.zwisler@linux.intel.com> M: Ross Zwisler <zwisler@kernel.org>
M: Dan Williams <dan.j.williams@intel.com> M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com> M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com> M: Dave Jiang <dave.jiang@intel.com>
...@@ -8343,7 +8344,7 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt ...@@ -8343,7 +8344,7 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt
LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
M: Dan Williams <dan.j.williams@intel.com> M: Dan Williams <dan.j.williams@intel.com>
M: Ross Zwisler <ross.zwisler@linux.intel.com> M: Ross Zwisler <zwisler@kernel.org>
M: Vishal Verma <vishal.l.verma@intel.com> M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com> M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org L: linux-nvdimm@lists.01.org
......
...@@ -1699,7 +1699,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, ...@@ -1699,7 +1699,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
{ {
struct acpi_device *adev, *adev_dimm; struct acpi_device *adev, *adev_dimm;
struct device *dev = acpi_desc->dev; struct device *dev = acpi_desc->dev;
unsigned long dsm_mask; unsigned long dsm_mask, label_mask;
const guid_t *guid; const guid_t *guid;
int i; int i;
int family = -1; int family = -1;
...@@ -1771,6 +1771,16 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, ...@@ -1771,6 +1771,16 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1ULL << i)) 1ULL << i))
set_bit(i, &nfit_mem->dsm_mask); set_bit(i, &nfit_mem->dsm_mask);
/*
* Prefer the NVDIMM_FAMILY_INTEL label read commands if present
* due to their better semantics handling locked capacity.
*/
label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA
| 1 << ND_CMD_SET_CONFIG_DATA;
if (family == NVDIMM_FAMILY_INTEL
&& (dsm_mask & label_mask) == label_mask)
return 0;
if (acpi_nvdimm_has_method(adev_dimm, "_LSI") if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
&& acpi_nvdimm_has_method(adev_dimm, "_LSR")) { && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
...@@ -2559,7 +2569,12 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc, ...@@ -2559,7 +2569,12 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc,
test_bit(ARS_SHORT, &nfit_spa->ars_state) test_bit(ARS_SHORT, &nfit_spa->ars_state)
? "short" : "long"); ? "short" : "long");
clear_bit(ARS_SHORT, &nfit_spa->ars_state); clear_bit(ARS_SHORT, &nfit_spa->ars_state);
set_bit(ARS_DONE, &nfit_spa->ars_state); if (test_and_clear_bit(ARS_REQ_REDO, &nfit_spa->ars_state)) {
set_bit(ARS_SHORT, &nfit_spa->ars_state);
set_bit(ARS_REQ, &nfit_spa->ars_state);
dev_dbg(dev, "ARS: processing scrub request received while in progress\n");
} else
set_bit(ARS_DONE, &nfit_spa->ars_state);
} }
static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
...@@ -3256,9 +3271,10 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) ...@@ -3256,9 +3271,10 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
continue; continue;
if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) {
busy++; busy++;
else { set_bit(ARS_REQ_REDO, &nfit_spa->ars_state);
} else {
if (test_bit(ARS_SHORT, &flags)) if (test_bit(ARS_SHORT, &flags))
set_bit(ARS_SHORT, &nfit_spa->ars_state); set_bit(ARS_SHORT, &nfit_spa->ars_state);
scheduled++; scheduled++;
......
...@@ -119,6 +119,7 @@ enum nfit_dimm_notifiers { ...@@ -119,6 +119,7 @@ enum nfit_dimm_notifiers {
enum nfit_ars_state { enum nfit_ars_state {
ARS_REQ, ARS_REQ,
ARS_REQ_REDO,
ARS_DONE, ARS_DONE,
ARS_SHORT, ARS_SHORT,
ARS_FAILED, ARS_FAILED,
......
...@@ -105,15 +105,19 @@ static int dax_pmem_probe(struct device *dev) ...@@ -105,15 +105,19 @@ static int dax_pmem_probe(struct device *dev)
if (rc) if (rc)
return rc; return rc;
rc = devm_add_action_or_reset(dev, dax_pmem_percpu_exit, rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
&dax_pmem->ref); if (rc) {
if (rc) percpu_ref_exit(&dax_pmem->ref);
return rc; return rc;
}
dax_pmem->pgmap.ref = &dax_pmem->ref; dax_pmem->pgmap.ref = &dax_pmem->ref;
addr = devm_memremap_pages(dev, &dax_pmem->pgmap); addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
if (IS_ERR(addr)) if (IS_ERR(addr)) {
devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
percpu_ref_exit(&dax_pmem->ref);
return PTR_ERR(addr); return PTR_ERR(addr);
}
rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill, rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
&dax_pmem->ref); &dax_pmem->ref);
......
...@@ -89,7 +89,6 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) ...@@ -89,7 +89,6 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
struct request_queue *q; struct request_queue *q;
pgoff_t pgoff; pgoff_t pgoff;
int err, id; int err, id;
void *kaddr;
pfn_t pfn; pfn_t pfn;
long len; long len;
char buf[BDEVNAME_SIZE]; char buf[BDEVNAME_SIZE];
...@@ -122,7 +121,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) ...@@ -122,7 +121,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
} }
id = dax_read_lock(); id = dax_read_lock();
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn);
dax_read_unlock(id); dax_read_unlock(id);
put_dax(dax_dev); put_dax(dax_dev);
......
...@@ -268,9 +268,8 @@ static int persistent_memory_claim(struct dm_writecache *wc) ...@@ -268,9 +268,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
i = 0; i = 0;
do { do {
long daa; long daa;
void *dummy_addr;
daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i, daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
&dummy_addr, &pfn); NULL, &pfn);
if (daa <= 0) { if (daa <= 0) {
r = daa ? daa : -EINVAL; r = daa ? daa : -EINVAL;
goto err3; goto err3;
......
...@@ -812,9 +812,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, ...@@ -812,9 +812,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
* overshoots the remainder by 4 bytes, assume it was * overshoots the remainder by 4 bytes, assume it was
* including 'status'. * including 'status'.
*/ */
if (out_field[1] - 8 == remainder) if (out_field[1] - 4 == remainder)
return remainder; return remainder;
return out_field[1] - 4; return out_field[1] - 8;
} else if (cmd == ND_CMD_CALL) { } else if (cmd == ND_CMD_CALL) {
struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field; struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
......
...@@ -34,6 +34,9 @@ static int nvdimm_probe(struct device *dev) ...@@ -34,6 +34,9 @@ static int nvdimm_probe(struct device *dev)
return rc; return rc;
} }
/* reset locked, to be validated below... */
nvdimm_clear_locked(dev);
ndd = kzalloc(sizeof(*ndd), GFP_KERNEL); ndd = kzalloc(sizeof(*ndd), GFP_KERNEL);
if (!ndd) if (!ndd)
return -ENOMEM; return -ENOMEM;
...@@ -48,12 +51,30 @@ static int nvdimm_probe(struct device *dev) ...@@ -48,12 +51,30 @@ static int nvdimm_probe(struct device *dev)
get_device(dev); get_device(dev);
kref_init(&ndd->kref); kref_init(&ndd->kref);
/*
* EACCES failures reading the namespace label-area-properties
* are interpreted as the DIMM capacity being locked but the
* namespace labels themselves being accessible.
*/
rc = nvdimm_init_nsarea(ndd); rc = nvdimm_init_nsarea(ndd);
if (rc == -EACCES) if (rc == -EACCES) {
/*
* See nvdimm_namespace_common_probe() where we fail to
* allow namespaces to probe while the DIMM is locked,
* but we do allow for namespace enumeration.
*/
nvdimm_set_locked(dev); nvdimm_set_locked(dev);
rc = 0;
}
if (rc) if (rc)
goto err; goto err;
/*
* EACCES failures reading the namespace label-data are
* interpreted as the label area being locked in addition to the
* DIMM capacity. We fail the dimm probe to prevent regions from
* attempting to parse the label area.
*/
rc = nvdimm_init_config_data(ndd); rc = nvdimm_init_config_data(ndd);
if (rc == -EACCES) if (rc == -EACCES)
nvdimm_set_locked(dev); nvdimm_set_locked(dev);
...@@ -72,7 +93,6 @@ static int nvdimm_probe(struct device *dev) ...@@ -72,7 +93,6 @@ static int nvdimm_probe(struct device *dev)
if (rc == 0) if (rc == 0)
nvdimm_set_aliasing(dev); nvdimm_set_aliasing(dev);
} }
nvdimm_clear_locked(dev);
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
if (rc) if (rc)
......
...@@ -536,6 +536,37 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) ...@@ -536,6 +536,37 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
return info.available; return info.available;
} }
/**
* nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
* contiguous unallocated dpa range.
* @nd_region: constrain available space check to this reference region
* @nd_mapping: container of dpa-resource-root + labels
*/
resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nvdimm_bus *nvdimm_bus;
resource_size_t max = 0;
struct resource *res;
/* if a dimm is disabled the available capacity is zero */
if (!ndd)
return 0;
nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
return 0;
for_each_dpa_resource(ndd, res) {
if (strcmp(res->name, "pmem-reserve") != 0)
continue;
if (resource_size(res) > max)
max = resource_size(res);
}
release_free_pmem(nvdimm_bus, nd_mapping);
return max;
}
/** /**
* nd_pmem_available_dpa - for the given dimm+region account unallocated dpa * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
* @nd_mapping: container of dpa-resource-root + labels * @nd_mapping: container of dpa-resource-root + labels
......
...@@ -799,7 +799,7 @@ static int merge_dpa(struct nd_region *nd_region, ...@@ -799,7 +799,7 @@ static int merge_dpa(struct nd_region *nd_region,
return 0; return 0;
} }
static int __reserve_free_pmem(struct device *dev, void *data) int __reserve_free_pmem(struct device *dev, void *data)
{ {
struct nvdimm *nvdimm = data; struct nvdimm *nvdimm = data;
struct nd_region *nd_region; struct nd_region *nd_region;
...@@ -836,7 +836,7 @@ static int __reserve_free_pmem(struct device *dev, void *data) ...@@ -836,7 +836,7 @@ static int __reserve_free_pmem(struct device *dev, void *data)
return 0; return 0;
} }
static void release_free_pmem(struct nvdimm_bus *nvdimm_bus, void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
struct nd_mapping *nd_mapping) struct nd_mapping *nd_mapping)
{ {
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
...@@ -1032,7 +1032,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) ...@@ -1032,7 +1032,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
allocated += nvdimm_allocated_dpa(ndd, &label_id); allocated += nvdimm_allocated_dpa(ndd, &label_id);
} }
available = nd_region_available_dpa(nd_region); available = nd_region_allocatable_dpa(nd_region);
if (val > available + allocated) if (val > available + allocated)
return -ENOSPC; return -ENOSPC;
...@@ -1144,6 +1144,26 @@ resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns) ...@@ -1144,6 +1144,26 @@ resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
} }
EXPORT_SYMBOL(nvdimm_namespace_capacity); EXPORT_SYMBOL(nvdimm_namespace_capacity);
bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
{
int i;
bool locked = false;
struct device *dev = &ndns->dev;
struct nd_region *nd_region = to_nd_region(dev->parent);
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
locked = true;
}
}
return locked;
}
EXPORT_SYMBOL(nvdimm_namespace_locked);
static ssize_t size_show(struct device *dev, static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
...@@ -1695,6 +1715,9 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) ...@@ -1695,6 +1715,9 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
} }
} }
if (nvdimm_namespace_locked(ndns))
return ERR_PTR(-EACCES);
size = nvdimm_namespace_capacity(ndns); size = nvdimm_namespace_capacity(ndns);
if (size < ND_MIN_NAMESPACE_SIZE) { if (size < ND_MIN_NAMESPACE_SIZE) {
dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n", dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
......
...@@ -100,6 +100,14 @@ struct nd_region; ...@@ -100,6 +100,14 @@ struct nd_region;
struct nvdimm_drvdata; struct nvdimm_drvdata;
struct nd_mapping; struct nd_mapping;
void nd_mapping_free_labels(struct nd_mapping *nd_mapping); void nd_mapping_free_labels(struct nd_mapping *nd_mapping);
int __reserve_free_pmem(struct device *dev, void *data);
void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
struct nd_mapping *nd_mapping);
resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping);
resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, resource_size_t *overlap); struct nd_mapping *nd_mapping, resource_size_t *overlap);
resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
......
...@@ -357,6 +357,7 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, ...@@ -357,6 +357,7 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
struct nd_label_id *label_id, resource_size_t start, struct nd_label_id *label_id, resource_size_t start,
resource_size_t n); resource_size_t n);
resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns); resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
bool nvdimm_namespace_locked(struct nd_namespace_common *ndns);
struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev); struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns); int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt); int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
......
...@@ -226,8 +226,11 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, ...@@ -226,8 +226,11 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
PFN_PHYS(nr_pages)))) PFN_PHYS(nr_pages))))
return -EIO; return -EIO;
*kaddr = pmem->virt_addr + offset;
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); if (kaddr)
*kaddr = pmem->virt_addr + offset;
if (pfn)
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
/* /*
* If badblocks are present, limit known good range to the * If badblocks are present, limit known good range to the
......
...@@ -389,6 +389,30 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region) ...@@ -389,6 +389,30 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
return available; return available;
} }
resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
{
resource_size_t available = 0;
int i;
if (is_memory(&nd_region->dev))
available = PHYS_ADDR_MAX;
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
if (is_memory(&nd_region->dev))
available = min(available,
nd_pmem_max_contiguous_dpa(nd_region,
nd_mapping));
else if (is_nd_blk(&nd_region->dev))
available += nd_blk_available_dpa(nd_region);
}
if (is_memory(&nd_region->dev))
return available * nd_region->ndr_mappings;
return available;
}
static ssize_t available_size_show(struct device *dev, static ssize_t available_size_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
...@@ -410,6 +434,21 @@ static ssize_t available_size_show(struct device *dev, ...@@ -410,6 +434,21 @@ static ssize_t available_size_show(struct device *dev,
} }
static DEVICE_ATTR_RO(available_size); static DEVICE_ATTR_RO(available_size);
static ssize_t max_available_extent_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long available = 0;
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_allocatable_dpa(nd_region);
nvdimm_bus_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
static DEVICE_ATTR_RO(max_available_extent);
static ssize_t init_namespaces_show(struct device *dev, static ssize_t init_namespaces_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
...@@ -561,6 +600,7 @@ static struct attribute *nd_region_attributes[] = { ...@@ -561,6 +600,7 @@ static struct attribute *nd_region_attributes[] = {
&dev_attr_read_only.attr, &dev_attr_read_only.attr,
&dev_attr_set_cookie.attr, &dev_attr_set_cookie.attr,
&dev_attr_available_size.attr, &dev_attr_available_size.attr,
&dev_attr_max_available_extent.attr,
&dev_attr_namespace_seed.attr, &dev_attr_namespace_seed.attr,
&dev_attr_init_namespaces.attr, &dev_attr_init_namespaces.attr,
&dev_attr_badblocks.attr, &dev_attr_badblocks.attr,
......
...@@ -922,9 +922,11 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, ...@@ -922,9 +922,11 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
unsigned long dev_sz; unsigned long dev_sz;
dev_sz = dev_info->end - dev_info->start + 1; dev_sz = dev_info->end - dev_info->start + 1;
*kaddr = (void *) dev_info->start + offset; if (kaddr)
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), *kaddr = (void *) dev_info->start + offset;
PFN_DEV|PFN_SPECIAL); if (pfn)
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
PFN_DEV|PFN_SPECIAL);
return (dev_sz - offset) / PAGE_SIZE; return (dev_sz - offset) / PAGE_SIZE;
} }
......
...@@ -655,7 +655,6 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, ...@@ -655,7 +655,6 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
{ {
void *vto, *kaddr; void *vto, *kaddr;
pgoff_t pgoff; pgoff_t pgoff;
pfn_t pfn;
long rc; long rc;
int id; int id;
...@@ -664,7 +663,7 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, ...@@ -664,7 +663,7 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
return rc; return rc;
id = dax_read_lock(); id = dax_read_lock();
rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
if (rc < 0) { if (rc < 0) {
dax_read_unlock(id); dax_read_unlock(id);
return rc; return rc;
...@@ -975,7 +974,6 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, ...@@ -975,7 +974,6 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
{ {
const sector_t sector = dax_iomap_sector(iomap, pos); const sector_t sector = dax_iomap_sector(iomap, pos);
pgoff_t pgoff; pgoff_t pgoff;
void *kaddr;
int id, rc; int id, rc;
long length; long length;
...@@ -984,7 +982,7 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, ...@@ -984,7 +982,7 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
return rc; return rc;
id = dax_read_lock(); id = dax_read_lock();
length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
&kaddr, pfnp); NULL, pfnp);
if (length < 0) { if (length < 0) {
rc = length; rc = length;
goto out; goto out;
...@@ -1060,15 +1058,13 @@ int __dax_zero_page_range(struct block_device *bdev, ...@@ -1060,15 +1058,13 @@ int __dax_zero_page_range(struct block_device *bdev,
pgoff_t pgoff; pgoff_t pgoff;
long rc, id; long rc, id;
void *kaddr; void *kaddr;
pfn_t pfn;
rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
if (rc) if (rc)
return rc; return rc;
id = dax_read_lock(); id = dax_read_lock();
rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
&pfn);
if (rc < 0) { if (rc < 0) {
dax_read_unlock(id); dax_read_unlock(id);
return rc; return rc;
...@@ -1124,7 +1120,6 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, ...@@ -1124,7 +1120,6 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
ssize_t map_len; ssize_t map_len;
pgoff_t pgoff; pgoff_t pgoff;
void *kaddr; void *kaddr;
pfn_t pfn;
if (fatal_signal_pending(current)) { if (fatal_signal_pending(current)) {
ret = -EINTR; ret = -EINTR;
...@@ -1136,7 +1131,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, ...@@ -1136,7 +1131,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
break; break;
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
&kaddr, &pfn); &kaddr, NULL);
if (map_len < 0) { if (map_len < 0) {
ret = map_len; ret = map_len;
break; break;
......
...@@ -31,17 +31,21 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, ...@@ -31,17 +31,21 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
if (get_nfit_res(pmem->phys_addr + offset)) { if (get_nfit_res(pmem->phys_addr + offset)) {
struct page *page; struct page *page;
*kaddr = pmem->virt_addr + offset; if (kaddr)
*kaddr = pmem->virt_addr + offset;
page = vmalloc_to_page(pmem->virt_addr + offset); page = vmalloc_to_page(pmem->virt_addr + offset);
*pfn = page_to_pfn_t(page); if (pfn)
*pfn = page_to_pfn_t(page);
pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n", pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
__func__, pmem, pgoff, page_to_pfn(page)); __func__, pmem, pgoff, page_to_pfn(page));
return 1; return 1;
} }
*kaddr = pmem->virt_addr + offset; if (kaddr)
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); *kaddr = pmem->virt_addr + offset;
if (pfn)
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
/* /*
* If badblocks are present, limit known good range to the * If badblocks are present, limit known good range to the
......
...@@ -142,6 +142,28 @@ static u32 handle[] = { ...@@ -142,6 +142,28 @@ static u32 handle[] = {
static unsigned long dimm_fail_cmd_flags[NUM_DCR]; static unsigned long dimm_fail_cmd_flags[NUM_DCR];
static int dimm_fail_cmd_code[NUM_DCR]; static int dimm_fail_cmd_code[NUM_DCR];
static const struct nd_intel_smart smart_def = {
.flags = ND_INTEL_SMART_HEALTH_VALID
| ND_INTEL_SMART_SPARES_VALID
| ND_INTEL_SMART_ALARM_VALID
| ND_INTEL_SMART_USED_VALID
| ND_INTEL_SMART_SHUTDOWN_VALID
| ND_INTEL_SMART_MTEMP_VALID
| ND_INTEL_SMART_CTEMP_VALID,
.health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
.media_temperature = 23 * 16,
.ctrl_temperature = 25 * 16,
.pmic_temperature = 40 * 16,
.spares = 75,
.alarm_flags = ND_INTEL_SMART_SPARE_TRIP
| ND_INTEL_SMART_TEMP_TRIP,
.ait_status = 1,
.life_used = 5,
.shutdown_state = 0,
.vendor_size = 0,
.shutdown_count = 100,
};
struct nfit_test_fw { struct nfit_test_fw {
enum intel_fw_update_state state; enum intel_fw_update_state state;
u32 context; u32 context;
...@@ -752,15 +774,30 @@ static int nfit_test_cmd_smart_inject( ...@@ -752,15 +774,30 @@ static int nfit_test_cmd_smart_inject(
if (buf_len != sizeof(*inj)) if (buf_len != sizeof(*inj))
return -EINVAL; return -EINVAL;
if (inj->mtemp_enable) if (inj->flags & ND_INTEL_SMART_INJECT_MTEMP) {
smart->media_temperature = inj->media_temperature; if (inj->mtemp_enable)
if (inj->spare_enable) smart->media_temperature = inj->media_temperature;
smart->spares = inj->spares; else
if (inj->fatal_enable) smart->media_temperature = smart_def.media_temperature;
smart->health = ND_INTEL_SMART_FATAL_HEALTH; }
if (inj->unsafe_shutdown_enable) { if (inj->flags & ND_INTEL_SMART_INJECT_SPARE) {
smart->shutdown_state = 1; if (inj->spare_enable)
smart->shutdown_count++; smart->spares = inj->spares;
else
smart->spares = smart_def.spares;
}
if (inj->flags & ND_INTEL_SMART_INJECT_FATAL) {
if (inj->fatal_enable)
smart->health = ND_INTEL_SMART_FATAL_HEALTH;
else
smart->health = ND_INTEL_SMART_NON_CRITICAL_HEALTH;
}
if (inj->flags & ND_INTEL_SMART_INJECT_SHUTDOWN) {
if (inj->unsafe_shutdown_enable) {
smart->shutdown_state = 1;
smart->shutdown_count++;
} else
smart->shutdown_state = 0;
} }
inj->status = 0; inj->status = 0;
smart_notify(bus_dev, dimm_dev, smart, thresh); smart_notify(bus_dev, dimm_dev, smart, thresh);
...@@ -884,6 +921,16 @@ static int nd_intel_test_cmd_set_lss_status(struct nfit_test *t, ...@@ -884,6 +921,16 @@ static int nd_intel_test_cmd_set_lss_status(struct nfit_test *t,
return 0; return 0;
} }
static int override_return_code(int dimm, unsigned int func, int rc)
{
if ((1 << func) & dimm_fail_cmd_flags[dimm]) {
if (dimm_fail_cmd_code[dimm])
return dimm_fail_cmd_code[dimm];
return -EIO;
}
return rc;
}
static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func) static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
{ {
int i; int i;
...@@ -894,13 +941,6 @@ static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func) ...@@ -894,13 +941,6 @@ static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
break; break;
if (i >= ARRAY_SIZE(handle)) if (i >= ARRAY_SIZE(handle))
return -ENXIO; return -ENXIO;
if ((1 << func) & dimm_fail_cmd_flags[i]) {
if (dimm_fail_cmd_code[i])
return dimm_fail_cmd_code[i];
return -EIO;
}
return i; return i;
} }
...@@ -939,48 +979,59 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, ...@@ -939,48 +979,59 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
switch (func) { switch (func) {
case ND_INTEL_ENABLE_LSS_STATUS: case ND_INTEL_ENABLE_LSS_STATUS:
return nd_intel_test_cmd_set_lss_status(t, rc = nd_intel_test_cmd_set_lss_status(t,
buf, buf_len); buf, buf_len);
break;
case ND_INTEL_FW_GET_INFO: case ND_INTEL_FW_GET_INFO:
return nd_intel_test_get_fw_info(t, buf, rc = nd_intel_test_get_fw_info(t, buf,
buf_len, i - t->dcr_idx); buf_len, i - t->dcr_idx);
break;
case ND_INTEL_FW_START_UPDATE: case ND_INTEL_FW_START_UPDATE:
return nd_intel_test_start_update(t, buf, rc = nd_intel_test_start_update(t, buf,
buf_len, i - t->dcr_idx); buf_len, i - t->dcr_idx);
break;
case ND_INTEL_FW_SEND_DATA: case ND_INTEL_FW_SEND_DATA:
return nd_intel_test_send_data(t, buf, rc = nd_intel_test_send_data(t, buf,
buf_len, i - t->dcr_idx); buf_len, i - t->dcr_idx);
break;
case ND_INTEL_FW_FINISH_UPDATE: case ND_INTEL_FW_FINISH_UPDATE:
return nd_intel_test_finish_fw(t, buf, rc = nd_intel_test_finish_fw(t, buf,
buf_len, i - t->dcr_idx); buf_len, i - t->dcr_idx);
break;
case ND_INTEL_FW_FINISH_QUERY: case ND_INTEL_FW_FINISH_QUERY:
return nd_intel_test_finish_query(t, buf, rc = nd_intel_test_finish_query(t, buf,
buf_len, i - t->dcr_idx); buf_len, i - t->dcr_idx);
break;
case ND_INTEL_SMART: case ND_INTEL_SMART:
return nfit_test_cmd_smart(buf, buf_len, rc = nfit_test_cmd_smart(buf, buf_len,
&t->smart[i - t->dcr_idx]); &t->smart[i - t->dcr_idx]);
break;
case ND_INTEL_SMART_THRESHOLD: case ND_INTEL_SMART_THRESHOLD:
return nfit_test_cmd_smart_threshold(buf, rc = nfit_test_cmd_smart_threshold(buf,
buf_len, buf_len,
&t->smart_threshold[i - &t->smart_threshold[i -
t->dcr_idx]); t->dcr_idx]);
break;
case ND_INTEL_SMART_SET_THRESHOLD: case ND_INTEL_SMART_SET_THRESHOLD:
return nfit_test_cmd_smart_set_threshold(buf, rc = nfit_test_cmd_smart_set_threshold(buf,
buf_len, buf_len,
&t->smart_threshold[i - &t->smart_threshold[i -
t->dcr_idx], t->dcr_idx],
&t->smart[i - t->dcr_idx], &t->smart[i - t->dcr_idx],
&t->pdev.dev, t->dimm_dev[i]); &t->pdev.dev, t->dimm_dev[i]);
break;
case ND_INTEL_SMART_INJECT: case ND_INTEL_SMART_INJECT:
return nfit_test_cmd_smart_inject(buf, rc = nfit_test_cmd_smart_inject(buf,
buf_len, buf_len,
&t->smart_threshold[i - &t->smart_threshold[i -
t->dcr_idx], t->dcr_idx],
&t->smart[i - t->dcr_idx], &t->smart[i - t->dcr_idx],
&t->pdev.dev, t->dimm_dev[i]); &t->pdev.dev, t->dimm_dev[i]);
break;
default: default:
return -ENOTTY; return -ENOTTY;
} }
return override_return_code(i, func, rc);
} }
if (!test_bit(cmd, &cmd_mask) if (!test_bit(cmd, &cmd_mask)
...@@ -1006,6 +1057,7 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, ...@@ -1006,6 +1057,7 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
default: default:
return -ENOTTY; return -ENOTTY;
} }
return override_return_code(i, func, rc);
} else { } else {
struct ars_state *ars_state = &t->ars_state; struct ars_state *ars_state = &t->ars_state;
struct nd_cmd_pkg *call_pkg = buf; struct nd_cmd_pkg *call_pkg = buf;
...@@ -1302,29 +1354,9 @@ static void smart_init(struct nfit_test *t) ...@@ -1302,29 +1354,9 @@ static void smart_init(struct nfit_test *t)
.ctrl_temperature = 30 * 16, .ctrl_temperature = 30 * 16,
.spares = 5, .spares = 5,
}; };
const struct nd_intel_smart smart_data = {
.flags = ND_INTEL_SMART_HEALTH_VALID
| ND_INTEL_SMART_SPARES_VALID
| ND_INTEL_SMART_ALARM_VALID
| ND_INTEL_SMART_USED_VALID
| ND_INTEL_SMART_SHUTDOWN_VALID
| ND_INTEL_SMART_MTEMP_VALID,
.health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
.media_temperature = 23 * 16,
.ctrl_temperature = 25 * 16,
.pmic_temperature = 40 * 16,
.spares = 75,
.alarm_flags = ND_INTEL_SMART_SPARE_TRIP
| ND_INTEL_SMART_TEMP_TRIP,
.ait_status = 1,
.life_used = 5,
.shutdown_state = 0,
.vendor_size = 0,
.shutdown_count = 100,
};
for (i = 0; i < t->num_dcr; i++) { for (i = 0; i < t->num_dcr; i++) {
memcpy(&t->smart[i], &smart_data, sizeof(smart_data)); memcpy(&t->smart[i], &smart_def, sizeof(smart_def));
memcpy(&t->smart_threshold[i], &smart_t_data, memcpy(&t->smart_threshold[i], &smart_t_data,
sizeof(smart_t_data)); sizeof(smart_t_data));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment