Commit b6ffe9ba authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'libnvdimm-for-4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull libnvdimm updates from Dan Williams:
 "libnvdimm updates for the latest ACPI and UEFI specifications. This
  pull request also includes new 'struct dax_operations' enabling to
  undo the abuse of copy_user_nocache() for copy operations to pmem.

  The dax work originally missed 4.12 to address concerns raised by Al.

  Summary:

   - Introduce the _flushcache() family of memory copy helpers and use
     them for persistent memory write operations on x86. The
     _flushcache() semantic indicates that the cache is either bypassed
     for the copy operation (movnt) or any lines dirtied by the copy
     operation are written back (clwb, clflushopt, or clflush).

   - Extend dax_operations with ->copy_from_iter() and ->flush()
     operations. These operations and other infrastructure updates allow
     all persistent memory specific dax functionality to be pushed into
     libnvdimm and the pmem driver directly. It also allows dax-specific
     sysfs attributes to be linked to a host device, for example:
     /sys/block/pmem0/dax/write_cache

   - Add support for the new NVDIMM platform/firmware mechanisms
     introduced in ACPI 6.2 and UEFI 2.7. This support includes the v1.2
     namespace label format, extensions to the address-range-scrub
     command set, new error injection commands, and a new BTT
     (block-translation-table) layout. These updates support inter-OS
     and pre-OS compatibility.

   - Fix a longstanding memory corruption bug in nfit_test.

   - Make the pmem and nvdimm-region 'badblocks' sysfs files poll(2)
     capable.

   - Miscellaneous fixes and small updates across libnvdimm and the nfit
     driver.

  Acknowledgements that came after the branch was pushed: commit
  6aa734a2 ("libnvdimm, region, pmem: fix 'badblocks'
  sysfs_get_dirent() reference lifetime") was reviewed by Toshi Kani
  <toshi.kani@hpe.com>"

* tag 'libnvdimm-for-4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (42 commits)
  libnvdimm, namespace: record 'lbasize' for pmem namespaces
  acpi/nfit: Issue Start ARS to retrieve existing records
  libnvdimm: New ACPI 6.2 DSM functions
  acpi, nfit: Show bus_dsm_mask in sysfs
  libnvdimm, acpi, nfit: Add bus level dsm mask for pass thru.
  acpi, nfit: Enable DSM pass thru for root functions.
  libnvdimm: passthru functions clear to send
  libnvdimm, btt: convert some info messages to warn/err
  libnvdimm, region, pmem: fix 'badblocks' sysfs_get_dirent() reference lifetime
  libnvdimm: fix the clear-error check in nsio_rw_bytes
  libnvdimm, btt: fix btt_rw_page not returning errors
  acpi, nfit: quiet invalid block-aperture-region warnings
  libnvdimm, btt: BTT updates for UEFI 2.7 format
  acpi, nfit: constify *_attribute_group
  libnvdimm, pmem: disable dax flushing when pmem is fronting a volatile region
  libnvdimm, pmem, dax: export a cache control attribute
  dax: convert to bitmask for flags
  dax: remove default copy_from_iter fallback
  libnvdimm, nfit: enable support for volatile ranges
  libnvdimm, pmem: fix persistence warning
  ...
parents 9f45efb9 9d92573f
...@@ -7680,9 +7680,7 @@ M: Ross Zwisler <ross.zwisler@linux.intel.com> ...@@ -7680,9 +7680,7 @@ M: Ross Zwisler <ross.zwisler@linux.intel.com>
L: linux-nvdimm@lists.01.org L: linux-nvdimm@lists.01.org
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported S: Supported
F: drivers/nvdimm/pmem.c F: drivers/nvdimm/pmem*
F: include/linux/pmem.h
F: arch/*/include/asm/pmem.h
LIGHTNVM PLATFORM SUPPORT LIGHTNVM PLATFORM SUPPORT
M: Matias Bjorling <mb@lightnvm.io> M: Matias Bjorling <mb@lightnvm.io>
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/uio.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/prom.h> #include <asm/prom.h>
...@@ -163,8 +164,15 @@ axon_ram_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pa ...@@ -163,8 +164,15 @@ axon_ram_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pa
return __axon_ram_direct_access(bank, pgoff, nr_pages, kaddr, pfn); return __axon_ram_direct_access(bank, pgoff, nr_pages, kaddr, pfn);
} }
static size_t axon_ram_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
return copy_from_iter(addr, bytes, i);
}
static const struct dax_operations axon_ram_dax_ops = { static const struct dax_operations axon_ram_dax_ops = {
.direct_access = axon_ram_dax_direct_access, .direct_access = axon_ram_dax_direct_access,
.copy_from_iter = axon_ram_copy_from_iter,
}; };
/** /**
......
...@@ -54,6 +54,7 @@ config X86 ...@@ -54,6 +54,7 @@ config X86
select ARCH_HAS_KCOV if X86_64 select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_MMIO_FLUSH select ARCH_HAS_MMIO_FLUSH
select ARCH_HAS_PMEM_API if X86_64 select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_KERNEL_RWX
......
/*
* Copyright(c) 2015 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __ASM_X86_PMEM_H__
#define __ASM_X86_PMEM_H__
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
#ifdef CONFIG_ARCH_HAS_PMEM_API
/**
* arch_memcpy_to_pmem - copy data to persistent memory
* @dst: destination buffer for the copy
* @src: source buffer for the copy
* @n: length of the copy in bytes
*
* Copy data to persistent memory media via non-temporal stores so that
* a subsequent pmem driver flush operation will drain posted write queues.
*/
static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
{
int rem;
/*
* We are copying between two kernel buffers, if
* __copy_from_user_inatomic_nocache() returns an error (page
* fault) we would have already reported a general protection fault
* before the WARN+BUG.
*/
rem = __copy_from_user_inatomic_nocache(dst, (void __user *) src, n);
if (WARN(rem, "%s: fault copying %p <- %p unwritten: %d\n",
__func__, dst, src, rem))
BUG();
}
/**
* arch_wb_cache_pmem - write back a cache range with CLWB
* @vaddr: virtual start address
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
* instruction. Note that @size is internally rounded up to be cache
* line size aligned.
*/
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
unsigned long clflush_mask = x86_clflush_size - 1;
void *vend = addr + size;
void *p;
for (p = (void *)((unsigned long)addr & ~clflush_mask);
p < vend; p += x86_clflush_size)
clwb(p);
}
/**
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
* @bytes: number of bytes to copy
* @i: iterator with source data
*
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
*/
static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
size_t len;
/* TODO: skip the write-back by always using non-temporal stores */
len = copy_from_iter_nocache(addr, bytes, i);
/*
* In the iovec case on x86_64 copy_from_iter_nocache() uses
* non-temporal stores for the bulk of the transfer, but we need
* to manually flush if the transfer is unaligned. A cached
* memory copy is used when destination or size is not naturally
* aligned. That is:
* - Require 8-byte alignment when size is 8 bytes or larger.
* - Require 4-byte alignment when size is 4 bytes.
*
* In the non-iovec case the entire destination needs to be
* flushed.
*/
if (iter_is_iovec(i)) {
unsigned long flushed, dest = (unsigned long) addr;
if (bytes < 8) {
if (!IS_ALIGNED(dest, 4) || (bytes != 4))
arch_wb_cache_pmem(addr, bytes);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
arch_wb_cache_pmem(addr, 1);
}
flushed = dest - (unsigned long) addr;
if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
arch_wb_cache_pmem(addr + bytes - 1, 1);
}
} else
arch_wb_cache_pmem(addr, bytes);
return len;
}
/**
* arch_clear_pmem - zero a PMEM memory range
* @addr: virtual start address
* @size: number of bytes to zero
*
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
*/
static inline void arch_clear_pmem(void *addr, size_t size)
{
memset(addr, 0, size);
arch_wb_cache_pmem(addr, size);
}
static inline void arch_invalidate_pmem(void *addr, size_t size)
{
clflush_cache_range(addr, size);
}
#endif /* CONFIG_ARCH_HAS_PMEM_API */
#endif /* __ASM_X86_PMEM_H__ */
...@@ -109,6 +109,11 @@ memcpy_mcsafe(void *dst, const void *src, size_t cnt) ...@@ -109,6 +109,11 @@ memcpy_mcsafe(void *dst, const void *src, size_t cnt)
return 0; return 0;
} }
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
void memcpy_flushcache(void *dst, const void *src, size_t cnt);
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_X86_STRING_64_H */ #endif /* _ASM_X86_STRING_64_H */
...@@ -171,6 +171,10 @@ unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigne ...@@ -171,6 +171,10 @@ unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigne
extern long __copy_user_nocache(void *dst, const void __user *src, extern long __copy_user_nocache(void *dst, const void __user *src,
unsigned size, int zerorest); unsigned size, int zerorest);
extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
size_t len);
static inline int static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src, __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size) unsigned size)
...@@ -179,6 +183,13 @@ __copy_from_user_inatomic_nocache(void *dst, const void __user *src, ...@@ -179,6 +183,13 @@ __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
return __copy_user_nocache(dst, src, size, 0); return __copy_user_nocache(dst, src, size, 0);
} }
static inline int
__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
{
kasan_check_write(dst, size);
return __copy_user_flushcache(dst, src, size);
}
unsigned long unsigned long
copy_user_handle_tail(char *to, char *from, unsigned len); copy_user_handle_tail(char *to, char *from, unsigned len);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
*/ */
#include <linux/export.h> #include <linux/export.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/highmem.h>
/* /*
* Zero Userspace * Zero Userspace
...@@ -73,3 +74,136 @@ copy_user_handle_tail(char *to, char *from, unsigned len) ...@@ -73,3 +74,136 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
clac(); clac();
return len; return len;
} }
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/**
* clean_cache_range - write back a cache range with CLWB
* @vaddr: virtual start address
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
* instruction. Note that @size is internally rounded up to be cache
* line size aligned.
*/
static void clean_cache_range(void *addr, size_t size)
{
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
unsigned long clflush_mask = x86_clflush_size - 1;
void *vend = addr + size;
void *p;
for (p = (void *)((unsigned long)addr & ~clflush_mask);
p < vend; p += x86_clflush_size)
clwb(p);
}
void arch_wb_cache_pmem(void *addr, size_t size)
{
clean_cache_range(addr, size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
{
unsigned long flushed, dest = (unsigned long) dst;
long rc = __copy_user_nocache(dst, src, size, 0);
/*
* __copy_user_nocache() uses non-temporal stores for the bulk
* of the transfer, but we need to manually flush if the
* transfer is unaligned. A cached memory copy is used when
* destination or size is not naturally aligned. That is:
* - Require 8-byte alignment when size is 8 bytes or larger.
* - Require 4-byte alignment when size is 4 bytes.
*/
if (size < 8) {
if (!IS_ALIGNED(dest, 4) || size != 4)
clean_cache_range(dst, 1);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
clean_cache_range(dst, 1);
}
flushed = dest - (unsigned long) dst;
if (size > flushed && !IS_ALIGNED(size - flushed, 8))
clean_cache_range(dst + size - 1, 1);
}
return rc;
}
void memcpy_flushcache(void *_dst, const void *_src, size_t size)
{
unsigned long dest = (unsigned long) _dst;
unsigned long source = (unsigned long) _src;
/* cache copy and flush to align dest */
if (!IS_ALIGNED(dest, 8)) {
unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
memcpy((void *) dest, (void *) source, len);
clean_cache_range((void *) dest, len);
dest += len;
source += len;
size -= len;
if (!size)
return;
}
/* 4x8 movnti loop */
while (size >= 32) {
asm("movq (%0), %%r8\n"
"movq 8(%0), %%r9\n"
"movq 16(%0), %%r10\n"
"movq 24(%0), %%r11\n"
"movnti %%r8, (%1)\n"
"movnti %%r9, 8(%1)\n"
"movnti %%r10, 16(%1)\n"
"movnti %%r11, 24(%1)\n"
:: "r" (source), "r" (dest)
: "memory", "r8", "r9", "r10", "r11");
dest += 32;
source += 32;
size -= 32;
}
/* 1x8 movnti loop */
while (size >= 8) {
asm("movq (%0), %%r8\n"
"movnti %%r8, (%1)\n"
:: "r" (source), "r" (dest)
: "memory", "r8");
dest += 8;
source += 8;
size -= 8;
}
/* 1x4 movnti loop */
while (size >= 4) {
asm("movl (%0), %%r8d\n"
"movnti %%r8d, (%1)\n"
:: "r" (source), "r" (dest)
: "memory", "r8");
dest += 4;
source += 4;
size -= 4;
}
/* cache copy for remaining bytes */
if (size) {
memcpy((void *) dest, (void *) source, size);
clean_cache_range((void *) dest, size);
}
}
EXPORT_SYMBOL_GPL(memcpy_flushcache);
void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
size_t len)
{
char *from = kmap_atomic(page);
memcpy_flushcache(to, from + offset, len);
kunmap_atomic(from);
}
#endif
...@@ -150,6 +150,12 @@ void clflush_cache_range(void *vaddr, unsigned int size) ...@@ -150,6 +150,12 @@ void clflush_cache_range(void *vaddr, unsigned int size)
} }
EXPORT_SYMBOL_GPL(clflush_cache_range); EXPORT_SYMBOL_GPL(clflush_cache_range);
void arch_invalidate_pmem(void *addr, size_t size)
{
clflush_cache_range(addr, size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
static void __cpa_flush_all(void *arg) static void __cpa_flush_all(void *arg)
{ {
unsigned long cache = (unsigned long)arg; unsigned long cache = (unsigned long)arg;
......
This diff is collapsed.
...@@ -79,7 +79,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, ...@@ -79,7 +79,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
* already in progress, just let that be the last * already in progress, just let that be the last
* authoritative one * authoritative one
*/ */
acpi_nfit_ars_rescan(acpi_desc); acpi_nfit_ars_rescan(acpi_desc, 0);
} }
break; break;
} }
......
...@@ -79,6 +79,7 @@ enum { ...@@ -79,6 +79,7 @@ enum {
enum nfit_root_notifiers { enum nfit_root_notifiers {
NFIT_NOTIFY_UPDATE = 0x80, NFIT_NOTIFY_UPDATE = 0x80,
NFIT_NOTIFY_UC_MEMORY_ERROR = 0x81,
}; };
enum nfit_dimm_notifiers { enum nfit_dimm_notifiers {
...@@ -154,6 +155,7 @@ struct acpi_nfit_desc { ...@@ -154,6 +155,7 @@ struct acpi_nfit_desc {
struct list_head idts; struct list_head idts;
struct nvdimm_bus *nvdimm_bus; struct nvdimm_bus *nvdimm_bus;
struct device *dev; struct device *dev;
u8 ars_start_flags;
struct nd_cmd_ars_status *ars_status; struct nd_cmd_ars_status *ars_status;
size_t ars_status_size; size_t ars_status_size;
struct work_struct work; struct work_struct work;
...@@ -206,7 +208,7 @@ struct nfit_blk { ...@@ -206,7 +208,7 @@ struct nfit_blk {
extern struct list_head acpi_descs; extern struct list_head acpi_descs;
extern struct mutex acpi_desc_lock; extern struct mutex acpi_desc_lock;
int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc); int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, u8 flags);
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
void nfit_mce_register(void); void nfit_mce_register(void);
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#ifdef CONFIG_BLK_DEV_RAM_DAX #ifdef CONFIG_BLK_DEV_RAM_DAX
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/dax.h> #include <linux/dax.h>
#include <linux/uio.h>
#endif #endif
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -354,8 +355,15 @@ static long brd_dax_direct_access(struct dax_device *dax_dev, ...@@ -354,8 +355,15 @@ static long brd_dax_direct_access(struct dax_device *dax_dev,
return __brd_direct_access(brd, pgoff, nr_pages, kaddr, pfn); return __brd_direct_access(brd, pgoff, nr_pages, kaddr, pfn);
} }
static size_t brd_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
return copy_from_iter(addr, bytes, i);
}
static const struct dax_operations brd_dax_ops = { static const struct dax_operations brd_dax_ops = {
.direct_access = brd_dax_direct_access, .direct_access = brd_dax_direct_access,
.copy_from_iter = brd_dax_copy_from_iter,
}; };
#endif #endif
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/cdev.h> #include <linux/cdev.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/uio.h>
#include <linux/dax.h> #include <linux/dax.h>
#include <linux/fs.h> #include <linux/fs.h>
...@@ -115,13 +116,20 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize) ...@@ -115,13 +116,20 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize)
EXPORT_SYMBOL_GPL(__bdev_dax_supported); EXPORT_SYMBOL_GPL(__bdev_dax_supported);
#endif #endif
enum dax_device_flags {
/* !alive + rcu grace period == no new operations / mappings */
DAXDEV_ALIVE,
/* gate whether dax_flush() calls the low level flush routine */
DAXDEV_WRITE_CACHE,
};
/** /**
* struct dax_device - anchor object for dax services * struct dax_device - anchor object for dax services
* @inode: core vfs * @inode: core vfs
* @cdev: optional character interface for "device dax" * @cdev: optional character interface for "device dax"
* @host: optional name for lookups where the device path is not available * @host: optional name for lookups where the device path is not available
* @private: dax driver private data * @private: dax driver private data
* @alive: !alive + rcu grace period == no new operations / mappings * @flags: state and boolean properties
*/ */
struct dax_device { struct dax_device {
struct hlist_node list; struct hlist_node list;
...@@ -129,10 +137,75 @@ struct dax_device { ...@@ -129,10 +137,75 @@ struct dax_device {
struct cdev cdev; struct cdev cdev;
const char *host; const char *host;
void *private; void *private;
bool alive; unsigned long flags;
const struct dax_operations *ops; const struct dax_operations *ops;
}; };
static ssize_t write_cache_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
ssize_t rc;
WARN_ON_ONCE(!dax_dev);
if (!dax_dev)
return -ENXIO;
rc = sprintf(buf, "%d\n", !!test_bit(DAXDEV_WRITE_CACHE,
&dax_dev->flags));
put_dax(dax_dev);
return rc;
}
static ssize_t write_cache_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
bool write_cache;
int rc = strtobool(buf, &write_cache);
struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
WARN_ON_ONCE(!dax_dev);
if (!dax_dev)
return -ENXIO;
if (rc)
len = rc;
else if (write_cache)
set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
else
clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
put_dax(dax_dev);
return len;
}
static DEVICE_ATTR_RW(write_cache);
static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, typeof(*dev), kobj);
struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
WARN_ON_ONCE(!dax_dev);
if (!dax_dev)
return 0;
if (a == &dev_attr_write_cache.attr && !dax_dev->ops->flush)
return 0;
return a->mode;
}
static struct attribute *dax_attributes[] = {
&dev_attr_write_cache.attr,
NULL,
};
struct attribute_group dax_attribute_group = {
.name = "dax",
.attrs = dax_attributes,
.is_visible = dax_visible,
};
EXPORT_SYMBOL_GPL(dax_attribute_group);
/** /**
* dax_direct_access() - translate a device pgoff to an absolute pfn * dax_direct_access() - translate a device pgoff to an absolute pfn
* @dax_dev: a dax_device instance representing the logical memory range * @dax_dev: a dax_device instance representing the logical memory range
...@@ -172,10 +245,43 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, ...@@ -172,10 +245,43 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
} }
EXPORT_SYMBOL_GPL(dax_direct_access); EXPORT_SYMBOL_GPL(dax_direct_access);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i)
{
if (!dax_alive(dax_dev))
return 0;
return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
}
EXPORT_SYMBOL_GPL(dax_copy_from_iter);
void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t size)
{
if (!dax_alive(dax_dev))
return;
if (!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))
return;
if (dax_dev->ops->flush)
dax_dev->ops->flush(dax_dev, pgoff, addr, size);
}
EXPORT_SYMBOL_GPL(dax_flush);
void dax_write_cache(struct dax_device *dax_dev, bool wc)
{
if (wc)
set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
else
clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
}
EXPORT_SYMBOL_GPL(dax_write_cache);
bool dax_alive(struct dax_device *dax_dev) bool dax_alive(struct dax_device *dax_dev)
{ {
lockdep_assert_held(&dax_srcu); lockdep_assert_held(&dax_srcu);
return dax_dev->alive; return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
} }
EXPORT_SYMBOL_GPL(dax_alive); EXPORT_SYMBOL_GPL(dax_alive);
...@@ -195,7 +301,7 @@ void kill_dax(struct dax_device *dax_dev) ...@@ -195,7 +301,7 @@ void kill_dax(struct dax_device *dax_dev)
if (!dax_dev) if (!dax_dev)
return; return;
dax_dev->alive = false; clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
synchronize_srcu(&dax_srcu); synchronize_srcu(&dax_srcu);
...@@ -239,7 +345,7 @@ static void dax_destroy_inode(struct inode *inode) ...@@ -239,7 +345,7 @@ static void dax_destroy_inode(struct inode *inode)
{ {
struct dax_device *dax_dev = to_dax_dev(inode); struct dax_device *dax_dev = to_dax_dev(inode);
WARN_ONCE(dax_dev->alive, WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
"kill_dax() must be called before final iput()\n"); "kill_dax() must be called before final iput()\n");
call_rcu(&inode->i_rcu, dax_i_callback); call_rcu(&inode->i_rcu, dax_i_callback);
} }
...@@ -291,7 +397,7 @@ static struct dax_device *dax_dev_get(dev_t devt) ...@@ -291,7 +397,7 @@ static struct dax_device *dax_dev_get(dev_t devt)
dax_dev = to_dax_dev(inode); dax_dev = to_dax_dev(inode);
if (inode->i_state & I_NEW) { if (inode->i_state & I_NEW) {
dax_dev->alive = true; set_bit(DAXDEV_ALIVE, &dax_dev->flags);
inode->i_cdev = &dax_dev->cdev; inode->i_cdev = &dax_dev->cdev;
inode->i_mode = S_IFCHR; inode->i_mode = S_IFCHR;
inode->i_flags = S_DAX; inode->i_flags = S_DAX;
......
...@@ -170,6 +170,34 @@ static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, ...@@ -170,6 +170,34 @@ static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
} }
static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
struct linear_c *lc = ti->private;
struct block_device *bdev = lc->dev->bdev;
struct dax_device *dax_dev = lc->dev->dax_dev;
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
dev_sector = linear_map_sector(ti, sector);
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
return 0;
return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
}
static void linear_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
size_t size)
{
struct linear_c *lc = ti->private;
struct block_device *bdev = lc->dev->bdev;
struct dax_device *dax_dev = lc->dev->dax_dev;
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
dev_sector = linear_map_sector(ti, sector);
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
return;
dax_flush(dax_dev, pgoff, addr, size);
}
static struct target_type linear_target = { static struct target_type linear_target = {
.name = "linear", .name = "linear",
.version = {1, 4, 0}, .version = {1, 4, 0},
...@@ -183,6 +211,8 @@ static struct target_type linear_target = { ...@@ -183,6 +211,8 @@ static struct target_type linear_target = {
.prepare_ioctl = linear_prepare_ioctl, .prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices, .iterate_devices = linear_iterate_devices,
.direct_access = linear_dax_direct_access, .direct_access = linear_dax_direct_access,
.dax_copy_from_iter = linear_dax_copy_from_iter,
.dax_flush = linear_dax_flush,
}; };
int __init dm_linear_init(void) int __init dm_linear_init(void)
......
...@@ -332,6 +332,44 @@ static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, ...@@ -332,6 +332,44 @@ static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
} }
static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
struct dax_device *dax_dev;
struct block_device *bdev;
uint32_t stripe;
stripe_map_sector(sc, sector, &stripe, &dev_sector);
dev_sector += sc->stripe[stripe].physical_start;
dax_dev = sc->stripe[stripe].dev->dax_dev;
bdev = sc->stripe[stripe].dev->bdev;
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
return 0;
return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
}
static void stripe_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
size_t size)
{
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
struct dax_device *dax_dev;
struct block_device *bdev;
uint32_t stripe;
stripe_map_sector(sc, sector, &stripe, &dev_sector);
dev_sector += sc->stripe[stripe].physical_start;
dax_dev = sc->stripe[stripe].dev->dax_dev;
bdev = sc->stripe[stripe].dev->bdev;
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
return;
dax_flush(dax_dev, pgoff, addr, size);
}
/* /*
* Stripe status: * Stripe status:
* *
...@@ -452,6 +490,8 @@ static struct target_type stripe_target = { ...@@ -452,6 +490,8 @@ static struct target_type stripe_target = {
.iterate_devices = stripe_iterate_devices, .iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints, .io_hints = stripe_io_hints,
.direct_access = stripe_dax_direct_access, .direct_access = stripe_dax_direct_access,
.dax_copy_from_iter = stripe_dax_copy_from_iter,
.dax_flush = stripe_dax_flush,
}; };
int __init dm_stripe_init(void) int __init dm_stripe_init(void)
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/dax.h> #include <linux/dax.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/uio.h>
#include <linux/hdreg.h> #include <linux/hdreg.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/wait.h> #include <linux/wait.h>
...@@ -972,6 +973,48 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, ...@@ -972,6 +973,48 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
return ret; return ret;
} }
static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
struct dm_target *ti;
long ret = 0;
int srcu_idx;
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
if (!ti)
goto out;
if (!ti->type->dax_copy_from_iter) {
ret = copy_from_iter(addr, bytes, i);
goto out;
}
ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
out:
dm_put_live_table(md, srcu_idx);
return ret;
}
static void dm_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t size)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
struct dm_target *ti;
int srcu_idx;
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
if (!ti)
goto out;
if (ti->type->dax_flush)
ti->type->dax_flush(ti, pgoff, addr, size);
out:
dm_put_live_table(md, srcu_idx);
}
/* /*
* A target may call dm_accept_partial_bio only from the map routine. It is * A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH. * allowed for all bio types except REQ_PREFLUSH.
...@@ -2958,6 +3001,8 @@ static const struct block_device_operations dm_blk_dops = { ...@@ -2958,6 +3001,8 @@ static const struct block_device_operations dm_blk_dops = {
static const struct dax_operations dm_dax_ops = { static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access, .direct_access = dm_dax_direct_access,
.copy_from_iter = dm_dax_copy_from_iter,
.flush = dm_dax_flush,
}; };
/* /*
......
...@@ -37,8 +37,8 @@ static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, ...@@ -37,8 +37,8 @@ static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
struct nd_btt *nd_btt = arena->nd_btt; struct nd_btt *nd_btt = arena->nd_btt;
struct nd_namespace_common *ndns = nd_btt->ndns; struct nd_namespace_common *ndns = nd_btt->ndns;
/* arena offsets are 4K from the base of the device */ /* arena offsets may be shifted from the base of the device */
offset += SZ_4K; offset += arena->nd_btt->initial_offset;
return nvdimm_read_bytes(ndns, offset, buf, n, flags); return nvdimm_read_bytes(ndns, offset, buf, n, flags);
} }
...@@ -48,8 +48,8 @@ static int arena_write_bytes(struct arena_info *arena, resource_size_t offset, ...@@ -48,8 +48,8 @@ static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
struct nd_btt *nd_btt = arena->nd_btt; struct nd_btt *nd_btt = arena->nd_btt;
struct nd_namespace_common *ndns = nd_btt->ndns; struct nd_namespace_common *ndns = nd_btt->ndns;
/* arena offsets are 4K from the base of the device */ /* arena offsets may be shifted from the base of the device */
offset += SZ_4K; offset += arena->nd_btt->initial_offset;
return nvdimm_write_bytes(ndns, offset, buf, n, flags); return nvdimm_write_bytes(ndns, offset, buf, n, flags);
} }
...@@ -323,7 +323,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane, ...@@ -323,7 +323,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
old_ent = btt_log_get_old(log); old_ent = btt_log_get_old(log);
if (old_ent < 0 || old_ent > 1) { if (old_ent < 0 || old_ent > 1) {
dev_info(to_dev(arena), dev_err(to_dev(arena),
"log corruption (%d): lane %d seq [%d, %d]\n", "log corruption (%d): lane %d seq [%d, %d]\n",
old_ent, lane, log[0].seq, log[1].seq); old_ent, lane, log[0].seq, log[1].seq);
/* TODO set error state? */ /* TODO set error state? */
...@@ -576,8 +576,8 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size, ...@@ -576,8 +576,8 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
arena->internal_lbasize = roundup(arena->external_lbasize, arena->internal_lbasize = roundup(arena->external_lbasize,
INT_LBASIZE_ALIGNMENT); INT_LBASIZE_ALIGNMENT);
arena->nfree = BTT_DEFAULT_NFREE; arena->nfree = BTT_DEFAULT_NFREE;
arena->version_major = 1; arena->version_major = btt->nd_btt->version_major;
arena->version_minor = 1; arena->version_minor = btt->nd_btt->version_minor;
if (available % BTT_PG_SIZE) if (available % BTT_PG_SIZE)
available -= (available % BTT_PG_SIZE); available -= (available % BTT_PG_SIZE);
...@@ -684,7 +684,7 @@ static int discover_arenas(struct btt *btt) ...@@ -684,7 +684,7 @@ static int discover_arenas(struct btt *btt)
dev_info(to_dev(arena), "No existing arenas\n"); dev_info(to_dev(arena), "No existing arenas\n");
goto out; goto out;
} else { } else {
dev_info(to_dev(arena), dev_err(to_dev(arena),
"Found corrupted metadata!\n"); "Found corrupted metadata!\n");
ret = -ENODEV; ret = -ENODEV;
goto out; goto out;
...@@ -1227,7 +1227,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) ...@@ -1227,7 +1227,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
op_is_write(bio_op(bio)), iter.bi_sector); op_is_write(bio_op(bio)), iter.bi_sector);
if (err) { if (err) {
dev_info(&btt->nd_btt->dev, dev_err(&btt->nd_btt->dev,
"io error in %s sector %lld, len %d,\n", "io error in %s sector %lld, len %d,\n",
(op_is_write(bio_op(bio))) ? "WRITE" : (op_is_write(bio_op(bio))) ? "WRITE" :
"READ", "READ",
...@@ -1248,10 +1248,13 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector, ...@@ -1248,10 +1248,13 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, bool is_write) struct page *page, bool is_write)
{ {
struct btt *btt = bdev->bd_disk->private_data; struct btt *btt = bdev->bd_disk->private_data;
int rc;
btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector); rc = btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
if (rc == 0)
page_endio(page, is_write, 0); page_endio(page, is_write, 0);
return 0;
return rc;
} }
...@@ -1369,7 +1372,7 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize, ...@@ -1369,7 +1372,7 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
} }
if (btt->init_state != INIT_READY && nd_region->ro) { if (btt->init_state != INIT_READY && nd_region->ro) {
dev_info(dev, "%s is read-only, unable to init btt metadata\n", dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
dev_name(&nd_region->dev)); dev_name(&nd_region->dev));
return NULL; return NULL;
} else if (btt->init_state != INIT_READY) { } else if (btt->init_state != INIT_READY) {
...@@ -1424,6 +1427,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns) ...@@ -1424,6 +1427,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
{ {
struct nd_btt *nd_btt = to_nd_btt(ndns->claim); struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
struct nd_region *nd_region; struct nd_region *nd_region;
struct btt_sb *btt_sb;
struct btt *btt; struct btt *btt;
size_t rawsize; size_t rawsize;
...@@ -1432,10 +1436,21 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns) ...@@ -1432,10 +1436,21 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
return -ENODEV; return -ENODEV;
} }
rawsize = nvdimm_namespace_capacity(ndns) - SZ_4K; btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
/*
* If this returns < 0, that is ok as it just means there wasn't
* an existing BTT, and we're creating a new one. We still need to
* call this as we need the version dependent fields in nd_btt to be
* set correctly based on the holder class
*/
nd_btt_version(nd_btt, ndns, btt_sb);
rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
if (rawsize < ARENA_MIN_SIZE) { if (rawsize < ARENA_MIN_SIZE) {
dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n", dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
dev_name(&ndns->dev), ARENA_MIN_SIZE + SZ_4K); dev_name(&ndns->dev),
ARENA_MIN_SIZE + nd_btt->initial_offset);
return -ENXIO; return -ENXIO;
} }
nd_region = to_nd_region(nd_btt->dev.parent); nd_region = to_nd_region(nd_btt->dev.parent);
......
...@@ -184,5 +184,7 @@ struct btt { ...@@ -184,5 +184,7 @@ struct btt {
}; };
bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super); bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super);
int nd_btt_version(struct nd_btt *nd_btt, struct nd_namespace_common *ndns,
struct btt_sb *btt_sb);
#endif #endif
...@@ -260,20 +260,55 @@ bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super) ...@@ -260,20 +260,55 @@ bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super)
} }
EXPORT_SYMBOL(nd_btt_arena_is_valid); EXPORT_SYMBOL(nd_btt_arena_is_valid);
int nd_btt_version(struct nd_btt *nd_btt, struct nd_namespace_common *ndns,
struct btt_sb *btt_sb)
{
if (ndns->claim_class == NVDIMM_CCLASS_BTT2) {
/* Probe/setup for BTT v2.0 */
nd_btt->initial_offset = 0;
nd_btt->version_major = 2;
nd_btt->version_minor = 0;
if (nvdimm_read_bytes(ndns, 0, btt_sb, sizeof(*btt_sb), 0))
return -ENXIO;
if (!nd_btt_arena_is_valid(nd_btt, btt_sb))
return -ENODEV;
if ((le16_to_cpu(btt_sb->version_major) != 2) ||
(le16_to_cpu(btt_sb->version_minor) != 0))
return -ENODEV;
} else {
/*
* Probe/setup for BTT v1.1 (NVDIMM_CCLASS_NONE or
* NVDIMM_CCLASS_BTT)
*/
nd_btt->initial_offset = SZ_4K;
nd_btt->version_major = 1;
nd_btt->version_minor = 1;
if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb), 0))
return -ENXIO;
if (!nd_btt_arena_is_valid(nd_btt, btt_sb))
return -ENODEV;
if ((le16_to_cpu(btt_sb->version_major) != 1) ||
(le16_to_cpu(btt_sb->version_minor) != 1))
return -ENODEV;
}
return 0;
}
EXPORT_SYMBOL(nd_btt_version);
static int __nd_btt_probe(struct nd_btt *nd_btt, static int __nd_btt_probe(struct nd_btt *nd_btt,
struct nd_namespace_common *ndns, struct btt_sb *btt_sb) struct nd_namespace_common *ndns, struct btt_sb *btt_sb)
{ {
int rc;
if (!btt_sb || !ndns || !nd_btt) if (!btt_sb || !ndns || !nd_btt)
return -ENODEV; return -ENODEV;
if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb), 0))
return -ENXIO;
if (nvdimm_namespace_capacity(ndns) < SZ_16M) if (nvdimm_namespace_capacity(ndns) < SZ_16M)
return -ENXIO; return -ENXIO;
if (!nd_btt_arena_is_valid(nd_btt, btt_sb)) rc = nd_btt_version(nd_btt, ndns, btt_sb);
return -ENODEV; if (rc < 0)
return rc;
nd_btt->lbasize = le32_to_cpu(btt_sb->external_lbasize); nd_btt->lbasize = le32_to_cpu(btt_sb->external_lbasize);
nd_btt->uuid = kmemdup(btt_sb->uuid, 16, GFP_KERNEL); nd_btt->uuid = kmemdup(btt_sb->uuid, 16, GFP_KERNEL);
...@@ -295,6 +330,15 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns) ...@@ -295,6 +330,15 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
if (ndns->force_raw) if (ndns->force_raw)
return -ENODEV; return -ENODEV;
switch (ndns->claim_class) {
case NVDIMM_CCLASS_NONE:
case NVDIMM_CCLASS_BTT:
case NVDIMM_CCLASS_BTT2:
break;
default:
return -ENODEV;
}
nvdimm_bus_lock(&ndns->dev); nvdimm_bus_lock(&ndns->dev);
btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns); btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
nvdimm_bus_unlock(&ndns->dev); nvdimm_bus_unlock(&ndns->dev);
......
...@@ -38,13 +38,13 @@ static int to_nd_device_type(struct device *dev) ...@@ -38,13 +38,13 @@ static int to_nd_device_type(struct device *dev)
{ {
if (is_nvdimm(dev)) if (is_nvdimm(dev))
return ND_DEVICE_DIMM; return ND_DEVICE_DIMM;
else if (is_nd_pmem(dev)) else if (is_memory(dev))
return ND_DEVICE_REGION_PMEM; return ND_DEVICE_REGION_PMEM;
else if (is_nd_blk(dev)) else if (is_nd_blk(dev))
return ND_DEVICE_REGION_BLK; return ND_DEVICE_REGION_BLK;
else if (is_nd_dax(dev)) else if (is_nd_dax(dev))
return ND_DEVICE_DAX_PMEM; return ND_DEVICE_DAX_PMEM;
else if (is_nd_pmem(dev->parent) || is_nd_blk(dev->parent)) else if (is_nd_region(dev->parent))
return nd_region_to_nstype(to_nd_region(dev->parent)); return nd_region_to_nstype(to_nd_region(dev->parent));
return 0; return 0;
...@@ -56,7 +56,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env) ...@@ -56,7 +56,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
* Ensure that region devices always have their numa node set as * Ensure that region devices always have their numa node set as
* early as possible. * early as possible.
*/ */
if (is_nd_pmem(dev) || is_nd_blk(dev)) if (is_nd_region(dev))
set_dev_node(dev, to_nd_region(dev)->numa_node); set_dev_node(dev, to_nd_region(dev)->numa_node);
return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT, return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
to_nd_device_type(dev)); to_nd_device_type(dev));
...@@ -65,7 +65,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env) ...@@ -65,7 +65,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
static struct module *to_bus_provider(struct device *dev) static struct module *to_bus_provider(struct device *dev)
{ {
/* pin bus providers while regions are enabled */ /* pin bus providers while regions are enabled */
if (is_nd_pmem(dev) || is_nd_blk(dev)) { if (is_nd_region(dev)) {
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
return nvdimm_bus->nd_desc->module; return nvdimm_bus->nd_desc->module;
...@@ -198,6 +198,9 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data) ...@@ -198,6 +198,9 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
sector = (ctx->phys - nd_region->ndr_start) / 512; sector = (ctx->phys - nd_region->ndr_start) / 512;
badblocks_clear(&nd_region->bb, sector, ctx->cleared / 512); badblocks_clear(&nd_region->bb, sector, ctx->cleared / 512);
if (nd_region->bb_state)
sysfs_notify_dirent(nd_region->bb_state);
return 0; return 0;
} }
...@@ -907,6 +910,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, ...@@ -907,6 +910,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
static char in_env[ND_CMD_MAX_ENVELOPE]; static char in_env[ND_CMD_MAX_ENVELOPE];
const struct nd_cmd_desc *desc = NULL; const struct nd_cmd_desc *desc = NULL;
unsigned int cmd = _IOC_NR(ioctl_cmd); unsigned int cmd = _IOC_NR(ioctl_cmd);
unsigned int func = cmd;
void __user *p = (void __user *) arg; void __user *p = (void __user *) arg;
struct device *dev = &nvdimm_bus->dev; struct device *dev = &nvdimm_bus->dev;
struct nd_cmd_pkg pkg; struct nd_cmd_pkg pkg;
...@@ -972,6 +976,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, ...@@ -972,6 +976,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
} }
if (cmd == ND_CMD_CALL) { if (cmd == ND_CMD_CALL) {
func = pkg.nd_command;
dev_dbg(dev, "%s:%s, idx: %llu, in: %zu, out: %zu, len %zu\n", dev_dbg(dev, "%s:%s, idx: %llu, in: %zu, out: %zu, len %zu\n",
__func__, dimm_name, pkg.nd_command, __func__, dimm_name, pkg.nd_command,
in_len, out_len, buf_len); in_len, out_len, buf_len);
...@@ -1020,7 +1025,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, ...@@ -1020,7 +1025,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
} }
nvdimm_bus_lock(&nvdimm_bus->dev); nvdimm_bus_lock(&nvdimm_bus->dev);
rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, cmd, buf); rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
if (rc) if (rc)
goto out_unlock; goto out_unlock;
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
*/ */
#include <linux/device.h> #include <linux/device.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/pmem.h>
#include "nd-core.h" #include "nd-core.h"
#include "pmem.h"
#include "pfn.h" #include "pfn.h"
#include "btt.h" #include "btt.h"
#include "nd.h" #include "nd.h"
...@@ -184,6 +184,35 @@ ssize_t nd_namespace_store(struct device *dev, ...@@ -184,6 +184,35 @@ ssize_t nd_namespace_store(struct device *dev,
} }
ndns = to_ndns(found); ndns = to_ndns(found);
switch (ndns->claim_class) {
case NVDIMM_CCLASS_NONE:
break;
case NVDIMM_CCLASS_BTT:
case NVDIMM_CCLASS_BTT2:
if (!is_nd_btt(dev)) {
len = -EBUSY;
goto out_attach;
}
break;
case NVDIMM_CCLASS_PFN:
if (!is_nd_pfn(dev)) {
len = -EBUSY;
goto out_attach;
}
break;
case NVDIMM_CCLASS_DAX:
if (!is_nd_dax(dev)) {
len = -EBUSY;
goto out_attach;
}
break;
default:
len = -EBUSY;
goto out_attach;
break;
}
if (__nvdimm_namespace_capacity(ndns) < SZ_16M) { if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
dev_dbg(dev, "%s too small to host\n", name); dev_dbg(dev, "%s too small to host\n", name);
len = -ENXIO; len = -ENXIO;
...@@ -260,8 +289,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, ...@@ -260,8 +289,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
* work around this collision. * work around this collision.
*/ */
if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
&& !(flags & NVDIMM_IO_ATOMIC) && !(flags & NVDIMM_IO_ATOMIC)) {
&& !ndns->claim) {
long cleared; long cleared;
cleared = nvdimm_clear_poison(&ndns->dev, cleared = nvdimm_clear_poison(&ndns->dev,
...@@ -272,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, ...@@ -272,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
cleared /= 512; cleared /= 512;
badblocks_clear(&nsio->bb, sector, cleared); badblocks_clear(&nsio->bb, sector, cleared);
} }
invalidate_pmem(nsio->addr + offset, size); arch_invalidate_pmem(nsio->addr + offset, size);
} else } else
rc = -EIO; rc = -EIO;
} }
memcpy_to_pmem(nsio->addr + offset, buf, size); memcpy_flushcache(nsio->addr + offset, buf, size);
nvdimm_flush(to_nd_region(ndns->dev.parent)); nvdimm_flush(to_nd_region(ndns->dev.parent));
return rc; return rc;
......
...@@ -504,7 +504,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region, ...@@ -504,7 +504,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct nvdimm_bus *nvdimm_bus; struct nvdimm_bus *nvdimm_bus;
struct list_head *poison_list; struct list_head *poison_list;
if (!is_nd_pmem(&nd_region->dev)) { if (!is_memory(&nd_region->dev)) {
dev_WARN_ONCE(&nd_region->dev, 1, dev_WARN_ONCE(&nd_region->dev, 1,
"%s only valid for pmem regions\n", __func__); "%s only valid for pmem regions\n", __func__);
return; return;
...@@ -699,6 +699,9 @@ static __init int libnvdimm_init(void) ...@@ -699,6 +699,9 @@ static __init int libnvdimm_init(void)
rc = nd_region_init(); rc = nd_region_init();
if (rc) if (rc)
goto err_region; goto err_region;
nd_label_init();
return 0; return 0;
err_region: err_region:
nvdimm_exit(); nvdimm_exit();
......
...@@ -89,7 +89,7 @@ struct device *nd_dax_create(struct nd_region *nd_region) ...@@ -89,7 +89,7 @@ struct device *nd_dax_create(struct nd_region *nd_region)
struct device *dev = NULL; struct device *dev = NULL;
struct nd_dax *nd_dax; struct nd_dax *nd_dax;
if (!is_nd_pmem(&nd_region->dev)) if (!is_memory(&nd_region->dev))
return NULL; return NULL;
nd_dax = nd_dax_alloc(nd_region); nd_dax = nd_dax_alloc(nd_region);
...@@ -111,6 +111,14 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns) ...@@ -111,6 +111,14 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
if (ndns->force_raw) if (ndns->force_raw)
return -ENODEV; return -ENODEV;
switch (ndns->claim_class) {
case NVDIMM_CCLASS_NONE:
case NVDIMM_CCLASS_DAX:
break;
default:
return -ENODEV;
}
nvdimm_bus_lock(&ndns->dev); nvdimm_bus_lock(&ndns->dev);
nd_dax = nd_dax_alloc(nd_region); nd_dax = nd_dax_alloc(nd_region);
nd_pfn = &nd_dax->nd_pfn; nd_pfn = &nd_dax->nd_pfn;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include "nd-core.h" #include "nd-core.h"
#include "label.h" #include "label.h"
#include "pmem.h"
#include "nd.h" #include "nd.h"
static DEFINE_IDA(dimm_ida); static DEFINE_IDA(dimm_ida);
...@@ -235,6 +236,13 @@ struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) ...@@ -235,6 +236,13 @@ struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
} }
EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
{
/* pmem mapping properties are private to libnvdimm */
return ARCH_MEMREMAP_PMEM;
}
EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
{ {
struct nvdimm *nvdimm = nd_mapping->nvdimm; struct nvdimm *nvdimm = nd_mapping->nvdimm;
...@@ -411,7 +419,7 @@ int alias_dpa_busy(struct device *dev, void *data) ...@@ -411,7 +419,7 @@ int alias_dpa_busy(struct device *dev, void *data)
struct resource *res; struct resource *res;
int i; int i;
if (!is_nd_pmem(dev)) if (!is_memory(dev))
return 0; return 0;
nd_region = to_nd_region(dev); nd_region = to_nd_region(dev);
......
This diff is collapsed.
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/ndctl.h> #include <linux/ndctl.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/uuid.h>
#include <linux/io.h> #include <linux/io.h>
enum { enum {
...@@ -60,7 +61,8 @@ static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0"; ...@@ -60,7 +61,8 @@ static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
*/ */
struct nd_namespace_index { struct nd_namespace_index {
u8 sig[NSINDEX_SIG_LEN]; u8 sig[NSINDEX_SIG_LEN];
__le32 flags; u8 flags[3];
u8 labelsize;
__le32 seq; __le32 seq;
__le64 myoff; __le64 myoff;
__le64 mysize; __le64 mysize;
...@@ -98,9 +100,23 @@ struct nd_namespace_label { ...@@ -98,9 +100,23 @@ struct nd_namespace_label {
__le64 dpa; __le64 dpa;
__le64 rawsize; __le64 rawsize;
__le32 slot; __le32 slot;
__le32 unused; /*
* Accessing fields past this point should be gated by a
* namespace_label_has() check.
*/
u8 align;
u8 reserved[3];
guid_t type_guid;
guid_t abstraction_guid;
u8 reserved2[88];
__le64 checksum;
}; };
#define NVDIMM_BTT_GUID "8aed63a2-29a2-4c66-8b12-f05d15d3922a"
#define NVDIMM_BTT2_GUID "18633bfc-1735-4217-8ac9-17239282d3f8"
#define NVDIMM_PFN_GUID "266400ba-fb9f-4677-bcb0-968f11d0d225"
#define NVDIMM_DAX_GUID "97a86d9c-3cdd-4eda-986f-5068b4f80088"
/** /**
* struct nd_label_id - identifier string for dpa allocation * struct nd_label_id - identifier string for dpa allocation
* @id: "{blk|pmem}-<namespace uuid>" * @id: "{blk|pmem}-<namespace uuid>"
...@@ -131,6 +147,7 @@ struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n); ...@@ -131,6 +147,7 @@ struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n);
u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd); u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd);
bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot); bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot);
u32 nd_label_nfree(struct nvdimm_drvdata *ndd); u32 nd_label_nfree(struct nvdimm_drvdata *ndd);
enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid);
struct nd_region; struct nd_region;
struct nd_namespace_pmem; struct nd_namespace_pmem;
struct nd_namespace_blk; struct nd_namespace_blk;
......
This diff is collapsed.
...@@ -64,7 +64,16 @@ struct blk_alloc_info { ...@@ -64,7 +64,16 @@ struct blk_alloc_info {
bool is_nvdimm(struct device *dev); bool is_nvdimm(struct device *dev);
bool is_nd_pmem(struct device *dev); bool is_nd_pmem(struct device *dev);
bool is_nd_volatile(struct device *dev);
bool is_nd_blk(struct device *dev); bool is_nd_blk(struct device *dev);
static inline bool is_nd_region(struct device *dev)
{
return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev);
}
static inline bool is_memory(struct device *dev)
{
return is_nd_pmem(dev) || is_nd_volatile(dev);
}
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev); struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
int __init nvdimm_bus_init(void); int __init nvdimm_bus_init(void);
void nvdimm_bus_exit(void); void nvdimm_bus_exit(void);
......
...@@ -42,7 +42,7 @@ struct nd_poison { ...@@ -42,7 +42,7 @@ struct nd_poison {
struct nvdimm_drvdata { struct nvdimm_drvdata {
struct device *dev; struct device *dev;
int nsindex_size; int nsindex_size, nslabel_size;
struct nd_cmd_get_config_size nsarea; struct nd_cmd_get_config_size nsarea;
void *data; void *data;
int ns_current, ns_next; int ns_current, ns_next;
...@@ -96,6 +96,12 @@ static inline struct nd_namespace_index *to_next_namespace_index( ...@@ -96,6 +96,12 @@ static inline struct nd_namespace_index *to_next_namespace_index(
return to_namespace_index(ndd, ndd->ns_next); return to_namespace_index(ndd, ndd->ns_next);
} }
unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
#define namespace_label_has(ndd, field) \
(offsetof(struct nd_namespace_label, field) \
< sizeof_namespace_label(ndd))
#define nd_dbg_dpa(r, d, res, fmt, arg...) \ #define nd_dbg_dpa(r, d, res, fmt, arg...) \
dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \ dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
(r) ? dev_name((d)->dev) : "", res ? res->name : "null", \ (r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
...@@ -155,6 +161,7 @@ struct nd_region { ...@@ -155,6 +161,7 @@ struct nd_region {
u64 ndr_start; u64 ndr_start;
int id, num_lanes, ro, numa_node; int id, num_lanes, ro, numa_node;
void *provider_data; void *provider_data;
struct kernfs_node *bb_state;
struct badblocks bb; struct badblocks bb;
struct nd_interleave_set *nd_set; struct nd_interleave_set *nd_set;
struct nd_percpu_lane __percpu *lane; struct nd_percpu_lane __percpu *lane;
...@@ -188,6 +195,9 @@ struct nd_btt { ...@@ -188,6 +195,9 @@ struct nd_btt {
u64 size; u64 size;
u8 *uuid; u8 *uuid;
int id; int id;
int initial_offset;
u16 version_major;
u16 version_minor;
}; };
enum nd_pfn_mode { enum nd_pfn_mode {
...@@ -229,6 +239,7 @@ ssize_t nd_sector_size_store(struct device *dev, const char *buf, ...@@ -229,6 +239,7 @@ ssize_t nd_sector_size_store(struct device *dev, const char *buf,
unsigned long *current_lbasize, const unsigned long *supported); unsigned long *current_lbasize, const unsigned long *supported);
int __init nvdimm_init(void); int __init nvdimm_init(void);
int __init nd_region_init(void); int __init nd_region_init(void);
int __init nd_label_init(void);
void nvdimm_exit(void); void nvdimm_exit(void);
void nd_region_exit(void); void nd_region_exit(void);
struct nvdimm; struct nvdimm;
...@@ -330,7 +341,8 @@ static inline struct device *nd_dax_create(struct nd_region *nd_region) ...@@ -330,7 +341,8 @@ static inline struct device *nd_dax_create(struct nd_region *nd_region)
struct nd_region *to_nd_region(struct device *dev); struct nd_region *to_nd_region(struct device *dev);
int nd_region_to_nstype(struct nd_region *nd_region); int nd_region_to_nstype(struct nd_region *nd_region);
int nd_region_register_namespaces(struct nd_region *nd_region, int *err); int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
u64 nd_region_interleave_set_cookie(struct nd_region *nd_region); u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
struct nd_namespace_index *nsindex);
u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region); u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
void nvdimm_bus_lock(struct device *dev); void nvdimm_bus_lock(struct device *dev);
void nvdimm_bus_unlock(struct device *dev); void nvdimm_bus_unlock(struct device *dev);
...@@ -349,6 +361,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns); ...@@ -349,6 +361,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt); int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name); char *name);
unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
void nvdimm_badblocks_populate(struct nd_region *nd_region, void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, const struct resource *res); struct badblocks *bb, const struct resource *res);
#if IS_ENABLED(CONFIG_ND_CLAIM) #if IS_ENABLED(CONFIG_ND_CLAIM)
......
...@@ -331,7 +331,7 @@ struct device *nd_pfn_create(struct nd_region *nd_region) ...@@ -331,7 +331,7 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
struct nd_pfn *nd_pfn; struct nd_pfn *nd_pfn;
struct device *dev; struct device *dev;
if (!is_nd_pmem(&nd_region->dev)) if (!is_memory(&nd_region->dev))
return NULL; return NULL;
nd_pfn = nd_pfn_alloc(nd_region); nd_pfn = nd_pfn_alloc(nd_region);
...@@ -354,7 +354,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) ...@@ -354,7 +354,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
if (!pfn_sb || !ndns) if (!pfn_sb || !ndns)
return -ENODEV; return -ENODEV;
if (!is_nd_pmem(nd_pfn->dev.parent)) if (!is_memory(nd_pfn->dev.parent))
return -ENODEV; return -ENODEV;
if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0)) if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0))
...@@ -471,6 +471,14 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns) ...@@ -471,6 +471,14 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
if (ndns->force_raw) if (ndns->force_raw)
return -ENODEV; return -ENODEV;
switch (ndns->claim_class) {
case NVDIMM_CCLASS_NONE:
case NVDIMM_CCLASS_PFN:
break;
default:
return -ENODEV;
}
nvdimm_bus_lock(&ndns->dev); nvdimm_bus_lock(&ndns->dev);
nd_pfn = nd_pfn_alloc(nd_region); nd_pfn = nd_pfn_alloc(nd_region);
pfn_dev = nd_pfn_devinit(nd_pfn, ndns); pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pmem.h> #include <linux/uio.h>
#include <linux/dax.h> #include <linux/dax.h>
#include <linux/nd.h> #include <linux/nd.h>
#include "pmem.h" #include "pmem.h"
...@@ -68,9 +68,11 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem, ...@@ -68,9 +68,11 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
(unsigned long long) sector, cleared, (unsigned long long) sector, cleared,
cleared > 1 ? "s" : ""); cleared > 1 ? "s" : "");
badblocks_clear(&pmem->bb, sector, cleared); badblocks_clear(&pmem->bb, sector, cleared);
if (pmem->bb_state)
sysfs_notify_dirent(pmem->bb_state);
} }
invalidate_pmem(pmem->virt_addr + offset, len); arch_invalidate_pmem(pmem->virt_addr + offset, len);
return rc; return rc;
} }
...@@ -80,7 +82,7 @@ static void write_pmem(void *pmem_addr, struct page *page, ...@@ -80,7 +82,7 @@ static void write_pmem(void *pmem_addr, struct page *page,
{ {
void *mem = kmap_atomic(page); void *mem = kmap_atomic(page);
memcpy_to_pmem(pmem_addr, mem + off, len); memcpy_flushcache(pmem_addr, mem + off, len);
kunmap_atomic(mem); kunmap_atomic(mem);
} }
...@@ -235,8 +237,27 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev, ...@@ -235,8 +237,27 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
} }
static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
return copy_from_iter_flushcache(addr, bytes, i);
}
static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t size)
{
arch_wb_cache_pmem(addr, size);
}
static const struct dax_operations pmem_dax_ops = { static const struct dax_operations pmem_dax_ops = {
.direct_access = pmem_dax_direct_access, .direct_access = pmem_dax_direct_access,
.copy_from_iter = pmem_copy_from_iter,
.flush = pmem_dax_flush,
};
static const struct attribute_group *pmem_attribute_groups[] = {
&dax_attribute_group,
NULL,
}; };
static void pmem_release_queue(void *q) static void pmem_release_queue(void *q)
...@@ -265,14 +286,15 @@ static int pmem_attach_disk(struct device *dev, ...@@ -265,14 +286,15 @@ static int pmem_attach_disk(struct device *dev,
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_region *nd_region = to_nd_region(dev->parent);
struct vmem_altmap __altmap, *altmap = NULL; struct vmem_altmap __altmap, *altmap = NULL;
int nid = dev_to_node(dev), fua, wbc;
struct resource *res = &nsio->res; struct resource *res = &nsio->res;
struct nd_pfn *nd_pfn = NULL; struct nd_pfn *nd_pfn = NULL;
struct dax_device *dax_dev; struct dax_device *dax_dev;
int nid = dev_to_node(dev);
struct nd_pfn_sb *pfn_sb; struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem; struct pmem_device *pmem;
struct resource pfn_res; struct resource pfn_res;
struct request_queue *q; struct request_queue *q;
struct device *gendev;
struct gendisk *disk; struct gendisk *disk;
void *addr; void *addr;
...@@ -294,8 +316,12 @@ static int pmem_attach_disk(struct device *dev, ...@@ -294,8 +316,12 @@ static int pmem_attach_disk(struct device *dev,
dev_set_drvdata(dev, pmem); dev_set_drvdata(dev, pmem);
pmem->phys_addr = res->start; pmem->phys_addr = res->start;
pmem->size = resource_size(res); pmem->size = resource_size(res);
if (nvdimm_has_flush(nd_region) < 0) fua = nvdimm_has_flush(nd_region);
if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
dev_warn(dev, "unable to guarantee persistence of writes\n"); dev_warn(dev, "unable to guarantee persistence of writes\n");
fua = 0;
}
wbc = nvdimm_has_cache(nd_region);
if (!devm_request_mem_region(dev, res->start, resource_size(res), if (!devm_request_mem_region(dev, res->start, resource_size(res),
dev_name(&ndns->dev))) { dev_name(&ndns->dev))) {
...@@ -339,9 +365,10 @@ static int pmem_attach_disk(struct device *dev, ...@@ -339,9 +365,10 @@ static int pmem_attach_disk(struct device *dev,
return PTR_ERR(addr); return PTR_ERR(addr);
pmem->virt_addr = addr; pmem->virt_addr = addr;
blk_queue_write_cache(q, true, true); blk_queue_write_cache(q, wbc, fua);
blk_queue_make_request(q, pmem_make_request); blk_queue_make_request(q, pmem_make_request);
blk_queue_physical_block_size(q, PAGE_SIZE); blk_queue_physical_block_size(q, PAGE_SIZE);
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_max_hw_sectors(q, UINT_MAX);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
...@@ -368,14 +395,23 @@ static int pmem_attach_disk(struct device *dev, ...@@ -368,14 +395,23 @@ static int pmem_attach_disk(struct device *dev,
put_disk(disk); put_disk(disk);
return -ENOMEM; return -ENOMEM;
} }
dax_write_cache(dax_dev, wbc);
pmem->dax_dev = dax_dev; pmem->dax_dev = dax_dev;
gendev = disk_to_dev(disk);
gendev->groups = pmem_attribute_groups;
device_add_disk(dev, disk); device_add_disk(dev, disk);
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM; return -ENOMEM;
revalidate_disk(disk); revalidate_disk(disk);
pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
"badblocks");
if (!pmem->bb_state)
dev_warn(dev, "'badblocks' notification disabled\n");
return 0; return 0;
} }
...@@ -407,8 +443,18 @@ static int nd_pmem_probe(struct device *dev) ...@@ -407,8 +443,18 @@ static int nd_pmem_probe(struct device *dev)
static int nd_pmem_remove(struct device *dev) static int nd_pmem_remove(struct device *dev)
{ {
struct pmem_device *pmem = dev_get_drvdata(dev);
if (is_nd_btt(dev)) if (is_nd_btt(dev))
nvdimm_namespace_detach_btt(to_nd_btt(dev)); nvdimm_namespace_detach_btt(to_nd_btt(dev));
else {
/*
* Note, this assumes device_lock() context to not race
* nd_pmem_notify()
*/
sysfs_put(pmem->bb_state);
pmem->bb_state = NULL;
}
nvdimm_flush(to_nd_region(dev->parent)); nvdimm_flush(to_nd_region(dev->parent));
return 0; return 0;
...@@ -427,6 +473,7 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) ...@@ -427,6 +473,7 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
struct nd_namespace_io *nsio; struct nd_namespace_io *nsio;
struct resource res; struct resource res;
struct badblocks *bb; struct badblocks *bb;
struct kernfs_node *bb_state;
if (event != NVDIMM_REVALIDATE_POISON) if (event != NVDIMM_REVALIDATE_POISON)
return; return;
...@@ -438,11 +485,13 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) ...@@ -438,11 +485,13 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
nd_region = to_nd_region(ndns->dev.parent); nd_region = to_nd_region(ndns->dev.parent);
nsio = to_nd_namespace_io(&ndns->dev); nsio = to_nd_namespace_io(&ndns->dev);
bb = &nsio->bb; bb = &nsio->bb;
bb_state = NULL;
} else { } else {
struct pmem_device *pmem = dev_get_drvdata(dev); struct pmem_device *pmem = dev_get_drvdata(dev);
nd_region = to_region(pmem); nd_region = to_region(pmem);
bb = &pmem->bb; bb = &pmem->bb;
bb_state = pmem->bb_state;
if (is_nd_pfn(dev)) { if (is_nd_pfn(dev)) {
struct nd_pfn *nd_pfn = to_nd_pfn(dev); struct nd_pfn *nd_pfn = to_nd_pfn(dev);
...@@ -462,6 +511,8 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) ...@@ -462,6 +511,8 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
res.start = nsio->res.start + offset; res.start = nsio->res.start + offset;
res.end = nsio->res.end - end_trunc; res.end = nsio->res.end - end_trunc;
nvdimm_badblocks_populate(nd_region, bb, &res); nvdimm_badblocks_populate(nd_region, bb, &res);
if (bb_state)
sysfs_notify_dirent(bb_state);
} }
MODULE_ALIAS("pmem"); MODULE_ALIAS("pmem");
......
...@@ -5,6 +5,20 @@ ...@@ -5,6 +5,20 @@
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/fs.h> #include <linux/fs.h>
#ifdef CONFIG_ARCH_HAS_PMEM_API
#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
void arch_wb_cache_pmem(void *addr, size_t size);
void arch_invalidate_pmem(void *addr, size_t size);
#else
#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
}
static inline void arch_invalidate_pmem(void *addr, size_t size)
{
}
#endif
/* this definition is in it's own header for tools/testing/nvdimm to consume */ /* this definition is in it's own header for tools/testing/nvdimm to consume */
struct pmem_device { struct pmem_device {
/* One contiguous memory region per device */ /* One contiguous memory region per device */
...@@ -17,6 +31,7 @@ struct pmem_device { ...@@ -17,6 +31,7 @@ struct pmem_device {
size_t size; size_t size;
/* trim size when namespace capacity has been section aligned */ /* trim size when namespace capacity has been section aligned */
u32 pfn_pad; u32 pfn_pad;
struct kernfs_node *bb_state;
struct badblocks bb; struct badblocks bb;
struct dax_device *dax_dev; struct dax_device *dax_dev;
struct gendisk *disk; struct gendisk *disk;
......
...@@ -58,10 +58,14 @@ static int nd_region_probe(struct device *dev) ...@@ -58,10 +58,14 @@ static int nd_region_probe(struct device *dev)
if (devm_init_badblocks(dev, &nd_region->bb)) if (devm_init_badblocks(dev, &nd_region->bb))
return -ENODEV; return -ENODEV;
nd_region->bb_state = sysfs_get_dirent(nd_region->dev.kobj.sd,
"badblocks");
if (!nd_region->bb_state)
dev_warn(&nd_region->dev,
"'badblocks' notification disabled\n");
ndr_res.start = nd_region->ndr_start; ndr_res.start = nd_region->ndr_start;
ndr_res.end = nd_region->ndr_start + nd_region->ndr_size - 1; ndr_res.end = nd_region->ndr_start + nd_region->ndr_size - 1;
nvdimm_badblocks_populate(nd_region, nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
&nd_region->bb, &ndr_res);
} }
nd_region->btt_seed = nd_btt_create(nd_region); nd_region->btt_seed = nd_btt_create(nd_region);
...@@ -105,6 +109,13 @@ static int nd_region_remove(struct device *dev) ...@@ -105,6 +109,13 @@ static int nd_region_remove(struct device *dev)
dev_set_drvdata(dev, NULL); dev_set_drvdata(dev, NULL);
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
/*
* Note, this assumes device_lock() context to not race
* nd_region_notify()
*/
sysfs_put(nd_region->bb_state);
nd_region->bb_state = NULL;
return 0; return 0;
} }
...@@ -126,6 +137,8 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event) ...@@ -126,6 +137,8 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
nd_region->ndr_size - 1; nd_region->ndr_size - 1;
nvdimm_badblocks_populate(nd_region, nvdimm_badblocks_populate(nd_region,
&nd_region->bb, &res); &nd_region->bb, &res);
if (nd_region->bb_state)
sysfs_notify_dirent(nd_region->bb_state);
} }
} }
device_for_each_child(dev, &event, child_notify); device_for_each_child(dev, &event, child_notify);
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/pmem.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/nd.h> #include <linux/nd.h>
...@@ -169,6 +168,11 @@ bool is_nd_blk(struct device *dev) ...@@ -169,6 +168,11 @@ bool is_nd_blk(struct device *dev)
return dev ? dev->type == &nd_blk_device_type : false; return dev ? dev->type == &nd_blk_device_type : false;
} }
bool is_nd_volatile(struct device *dev)
{
return dev ? dev->type == &nd_volatile_device_type : false;
}
struct nd_region *to_nd_region(struct device *dev) struct nd_region *to_nd_region(struct device *dev)
{ {
struct nd_region *nd_region = container_of(dev, struct nd_region, dev); struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
...@@ -215,7 +219,7 @@ EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); ...@@ -215,7 +219,7 @@ EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
*/ */
int nd_region_to_nstype(struct nd_region *nd_region) int nd_region_to_nstype(struct nd_region *nd_region)
{ {
if (is_nd_pmem(&nd_region->dev)) { if (is_memory(&nd_region->dev)) {
u16 i, alias; u16 i, alias;
for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
...@@ -243,7 +247,7 @@ static ssize_t size_show(struct device *dev, ...@@ -243,7 +247,7 @@ static ssize_t size_show(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev); struct nd_region *nd_region = to_nd_region(dev);
unsigned long long size = 0; unsigned long long size = 0;
if (is_nd_pmem(dev)) { if (is_memory(dev)) {
size = nd_region->ndr_size; size = nd_region->ndr_size;
} else if (nd_region->ndr_mappings == 1) { } else if (nd_region->ndr_mappings == 1) {
struct nd_mapping *nd_mapping = &nd_region->mapping[0]; struct nd_mapping *nd_mapping = &nd_region->mapping[0];
...@@ -307,13 +311,41 @@ static ssize_t set_cookie_show(struct device *dev, ...@@ -307,13 +311,41 @@ static ssize_t set_cookie_show(struct device *dev,
{ {
struct nd_region *nd_region = to_nd_region(dev); struct nd_region *nd_region = to_nd_region(dev);
struct nd_interleave_set *nd_set = nd_region->nd_set; struct nd_interleave_set *nd_set = nd_region->nd_set;
ssize_t rc = 0;
if (is_nd_pmem(dev) && nd_set) if (is_memory(dev) && nd_set)
/* pass, should be precluded by region_visible */; /* pass, should be precluded by region_visible */;
else else
return -ENXIO; return -ENXIO;
return sprintf(buf, "%#llx\n", nd_set->cookie); /*
* The cookie to show depends on which specification of the
* labels we are using. If there are not labels then default to
* the v1.1 namespace label cookie definition. To read all this
* data we need to wait for probing to settle.
*/
device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
if (nd_region->ndr_mappings) {
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
if (ndd) {
struct nd_namespace_index *nsindex;
nsindex = to_namespace_index(ndd, ndd->ns_current);
rc = sprintf(buf, "%#llx\n",
nd_region_interleave_set_cookie(nd_region,
nsindex));
}
}
nvdimm_bus_unlock(dev);
device_unlock(dev);
if (rc)
return rc;
return sprintf(buf, "%#llx\n", nd_set->cookie1);
} }
static DEVICE_ATTR_RO(set_cookie); static DEVICE_ATTR_RO(set_cookie);
...@@ -335,7 +367,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region) ...@@ -335,7 +367,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
if (!ndd) if (!ndd)
return 0; return 0;
if (is_nd_pmem(&nd_region->dev)) { if (is_memory(&nd_region->dev)) {
available += nd_pmem_available_dpa(nd_region, available += nd_pmem_available_dpa(nd_region,
nd_mapping, &overlap); nd_mapping, &overlap);
if (overlap > blk_max_overlap) { if (overlap > blk_max_overlap) {
...@@ -521,10 +553,10 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) ...@@ -521,10 +553,10 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
struct nd_interleave_set *nd_set = nd_region->nd_set; struct nd_interleave_set *nd_set = nd_region->nd_set;
int type = nd_region_to_nstype(nd_region); int type = nd_region_to_nstype(nd_region);
if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr) if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
return 0; return 0;
if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr) if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
return 0; return 0;
if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr) if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
...@@ -552,7 +584,7 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) ...@@ -552,7 +584,7 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
|| type == ND_DEVICE_NAMESPACE_BLK) || type == ND_DEVICE_NAMESPACE_BLK)
&& a == &dev_attr_available_size.attr) && a == &dev_attr_available_size.attr)
return a->mode; return a->mode;
else if (is_nd_pmem(dev) && nd_set) else if (is_memory(dev) && nd_set)
return a->mode; return a->mode;
return 0; return 0;
...@@ -564,13 +596,18 @@ struct attribute_group nd_region_attribute_group = { ...@@ -564,13 +596,18 @@ struct attribute_group nd_region_attribute_group = {
}; };
EXPORT_SYMBOL_GPL(nd_region_attribute_group); EXPORT_SYMBOL_GPL(nd_region_attribute_group);
u64 nd_region_interleave_set_cookie(struct nd_region *nd_region) u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
struct nd_namespace_index *nsindex)
{ {
struct nd_interleave_set *nd_set = nd_region->nd_set; struct nd_interleave_set *nd_set = nd_region->nd_set;
if (nd_set) if (!nd_set)
return nd_set->cookie;
return 0; return 0;
if (nsindex && __le16_to_cpu(nsindex->major) == 1
&& __le16_to_cpu(nsindex->minor) == 1)
return nd_set->cookie1;
return nd_set->cookie2;
} }
u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
...@@ -604,7 +641,7 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, ...@@ -604,7 +641,7 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
{ {
struct nd_region *nd_region; struct nd_region *nd_region;
if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { if (!probe && is_nd_region(dev)) {
int i; int i;
nd_region = to_nd_region(dev); nd_region = to_nd_region(dev);
...@@ -622,12 +659,8 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, ...@@ -622,12 +659,8 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
if (ndd) if (ndd)
atomic_dec(&nvdimm->busy); atomic_dec(&nvdimm->busy);
} }
if (is_nd_pmem(dev))
return;
} }
if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent)) if (dev->parent && is_nd_region(dev->parent) && probe) {
&& probe) {
nd_region = to_nd_region(dev->parent); nd_region = to_nd_region(dev->parent);
nvdimm_bus_lock(dev); nvdimm_bus_lock(dev);
if (nd_region->ns_seed == dev) if (nd_region->ns_seed == dev)
...@@ -800,7 +833,7 @@ int nd_blk_region_init(struct nd_region *nd_region) ...@@ -800,7 +833,7 @@ int nd_blk_region_init(struct nd_region *nd_region)
return 0; return 0;
if (nd_region->ndr_mappings < 1) { if (nd_region->ndr_mappings < 1) {
dev_err(dev, "invalid BLK region\n"); dev_dbg(dev, "invalid BLK region\n");
return -ENXIO; return -ENXIO;
} }
...@@ -1015,8 +1048,8 @@ void nvdimm_flush(struct nd_region *nd_region) ...@@ -1015,8 +1048,8 @@ void nvdimm_flush(struct nd_region *nd_region)
* The first wmb() is needed to 'sfence' all previous writes * The first wmb() is needed to 'sfence' all previous writes
* such that they are architecturally visible for the platform * such that they are architecturally visible for the platform
* buffer flush. Note that we've already arranged for pmem * buffer flush. Note that we've already arranged for pmem
* writes to avoid the cache via arch_memcpy_to_pmem(). The * writes to avoid the cache via memcpy_flushcache(). The final
* final wmb() ensures ordering for the NVDIMM flush write. * wmb() ensures ordering for the NVDIMM flush write.
*/ */
wmb(); wmb();
for (i = 0; i < nd_region->ndr_mappings; i++) for (i = 0; i < nd_region->ndr_mappings; i++)
...@@ -1038,8 +1071,9 @@ int nvdimm_has_flush(struct nd_region *nd_region) ...@@ -1038,8 +1071,9 @@ int nvdimm_has_flush(struct nd_region *nd_region)
{ {
int i; int i;
/* no nvdimm == flushing capability unknown */ /* no nvdimm or pmem api == flushing capability unknown */
if (nd_region->ndr_mappings == 0) if (nd_region->ndr_mappings == 0
|| !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
return -ENXIO; return -ENXIO;
for (i = 0; i < nd_region->ndr_mappings; i++) { for (i = 0; i < nd_region->ndr_mappings; i++) {
...@@ -1059,6 +1093,12 @@ int nvdimm_has_flush(struct nd_region *nd_region) ...@@ -1059,6 +1093,12 @@ int nvdimm_has_flush(struct nd_region *nd_region)
} }
EXPORT_SYMBOL_GPL(nvdimm_has_flush); EXPORT_SYMBOL_GPL(nvdimm_has_flush);
int nvdimm_has_cache(struct nd_region *nd_region)
{
return is_nd_pmem(&nd_region->dev);
}
EXPORT_SYMBOL_GPL(nvdimm_has_cache);
void __exit nd_region_devs_exit(void) void __exit nd_region_devs_exit(void)
{ {
ida_destroy(&region_ida); ida_destroy(&region_ida);
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/uio.h>
#include <linux/dax.h> #include <linux/dax.h>
#include <asm/extmem.h> #include <asm/extmem.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -43,8 +44,15 @@ static const struct block_device_operations dcssblk_devops = { ...@@ -43,8 +44,15 @@ static const struct block_device_operations dcssblk_devops = {
.release = dcssblk_release, .release = dcssblk_release,
}; };
static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev,
pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
{
return copy_from_iter(addr, bytes, i);
}
static const struct dax_operations dcssblk_dax_ops = { static const struct dax_operations dcssblk_dax_ops = {
.direct_access = dcssblk_dax_direct_access, .direct_access = dcssblk_dax_direct_access,
.copy_from_iter = dcssblk_dax_copy_from_iter,
}; };
struct dcssblk_dev_info { struct dcssblk_dev_info {
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/pagevec.h> #include <linux/pagevec.h>
#include <linux/pmem.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/uio.h> #include <linux/uio.h>
...@@ -784,7 +783,7 @@ static int dax_writeback_one(struct block_device *bdev, ...@@ -784,7 +783,7 @@ static int dax_writeback_one(struct block_device *bdev,
} }
dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn)); dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
wb_cache_pmem(kaddr, size); dax_flush(dax_dev, pgoff, kaddr, size);
/* /*
* After we have flushed the cache, we can clear the dirty tag. There * After we have flushed the cache, we can clear the dirty tag. There
* cannot be new dirty data in the pfn after the flush has completed as * cannot be new dirty data in the pfn after the flush has completed as
...@@ -976,7 +975,8 @@ int __dax_zero_page_range(struct block_device *bdev, ...@@ -976,7 +975,8 @@ int __dax_zero_page_range(struct block_device *bdev,
dax_read_unlock(id); dax_read_unlock(id);
return rc; return rc;
} }
clear_pmem(kaddr + offset, size); memset(kaddr + offset, 0, size);
dax_flush(dax_dev, pgoff, kaddr + offset, size);
dax_read_unlock(id); dax_read_unlock(id);
} }
return 0; return 0;
...@@ -1055,7 +1055,8 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, ...@@ -1055,7 +1055,8 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
map_len = end - pos; map_len = end - pos;
if (iov_iter_rw(iter) == WRITE) if (iov_iter_rw(iter) == WRITE)
map_len = copy_from_iter_pmem(kaddr, map_len, iter); map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
map_len, iter);
else else
map_len = copy_to_iter(kaddr, map_len, iter); map_len = copy_to_iter(kaddr, map_len, iter);
if (map_len <= 0) { if (map_len <= 0) {
......
...@@ -16,8 +16,15 @@ struct dax_operations { ...@@ -16,8 +16,15 @@ struct dax_operations {
*/ */
long (*direct_access)(struct dax_device *, pgoff_t, long, long (*direct_access)(struct dax_device *, pgoff_t, long,
void **, pfn_t *); void **, pfn_t *);
/* copy_from_iter: required operation for fs-dax direct-i/o */
size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
struct iov_iter *);
/* flush: optional driver-specific cache management after writes */
void (*flush)(struct dax_device *, pgoff_t, void *, size_t);
}; };
extern struct attribute_group dax_attribute_group;
#if IS_ENABLED(CONFIG_DAX) #if IS_ENABLED(CONFIG_DAX)
struct dax_device *dax_get_by_host(const char *host); struct dax_device *dax_get_by_host(const char *host);
void put_dax(struct dax_device *dax_dev); void put_dax(struct dax_device *dax_dev);
...@@ -75,6 +82,11 @@ void kill_dax(struct dax_device *dax_dev); ...@@ -75,6 +82,11 @@ void kill_dax(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
void **kaddr, pfn_t *pfn); void **kaddr, pfn_t *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t size);
void dax_write_cache(struct dax_device *dax_dev, bool wc);
/* /*
* We use lowest available bit in exceptional entry for locking, one bit for * We use lowest available bit in exceptional entry for locking, one bit for
......
...@@ -132,6 +132,10 @@ typedef int (*dm_busy_fn) (struct dm_target *ti); ...@@ -132,6 +132,10 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
*/ */
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn); long nr_pages, void **kaddr, pfn_t *pfn);
typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i);
typedef void (*dm_dax_flush_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr,
size_t size);
#define PAGE_SECTORS (PAGE_SIZE / 512) #define PAGE_SECTORS (PAGE_SIZE / 512)
void dm_error(const char *message); void dm_error(const char *message);
...@@ -181,6 +185,8 @@ struct target_type { ...@@ -181,6 +185,8 @@ struct target_type {
dm_iterate_devices_fn iterate_devices; dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints; dm_io_hints_fn io_hints;
dm_dax_direct_access_fn direct_access; dm_dax_direct_access_fn direct_access;
dm_dax_copy_from_iter_fn dax_copy_from_iter;
dm_dax_flush_fn dax_flush;
/* For internal device-mapper use. */ /* For internal device-mapper use. */
struct list_head list; struct list_head list;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/uuid.h>
enum { enum {
/* when a dimm supports both PMEM and BLK access a label is required */ /* when a dimm supports both PMEM and BLK access a label is required */
...@@ -54,6 +55,7 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, ...@@ -54,6 +55,7 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm_bus_descriptor { struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups; const struct attribute_group **attr_groups;
unsigned long bus_dsm_mask;
unsigned long cmd_mask; unsigned long cmd_mask;
struct module *module; struct module *module;
char *provider_name; char *provider_name;
...@@ -71,9 +73,14 @@ struct nd_cmd_desc { ...@@ -71,9 +73,14 @@ struct nd_cmd_desc {
}; };
struct nd_interleave_set { struct nd_interleave_set {
u64 cookie; /* v1.1 definition of the interleave-set-cookie algorithm */
u64 cookie1;
/* v1.2 definition of the interleave-set-cookie algorithm */
u64 cookie2;
/* compatibility with initial buggy Linux implementation */ /* compatibility with initial buggy Linux implementation */
u64 altcookie; u64 altcookie;
guid_t type_guid;
}; };
struct nd_mapping_desc { struct nd_mapping_desc {
...@@ -159,9 +166,11 @@ void *nd_region_provider_data(struct nd_region *nd_region); ...@@ -159,9 +166,11 @@ void *nd_region_provider_data(struct nd_region *nd_region);
void *nd_blk_region_provider_data(struct nd_blk_region *ndbr); void *nd_blk_region_provider_data(struct nd_blk_region *ndbr);
void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data); void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data);
struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr); struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr);
unsigned int nd_region_acquire_lane(struct nd_region *nd_region); unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
u64 nd_fletcher64(void *addr, size_t len, bool le); u64 nd_fletcher64(void *addr, size_t len, bool le);
void nvdimm_flush(struct nd_region *nd_region); void nvdimm_flush(struct nd_region *nd_region);
int nvdimm_has_flush(struct nd_region *nd_region); int nvdimm_has_flush(struct nd_region *nd_region);
int nvdimm_has_cache(struct nd_region *nd_region);
#endif /* __LIBNVDIMM_H__ */ #endif /* __LIBNVDIMM_H__ */
...@@ -21,6 +21,15 @@ enum nvdimm_event { ...@@ -21,6 +21,15 @@ enum nvdimm_event {
NVDIMM_REVALIDATE_POISON, NVDIMM_REVALIDATE_POISON,
}; };
enum nvdimm_claim_class {
NVDIMM_CCLASS_NONE,
NVDIMM_CCLASS_BTT,
NVDIMM_CCLASS_BTT2,
NVDIMM_CCLASS_PFN,
NVDIMM_CCLASS_DAX,
NVDIMM_CCLASS_UNKNOWN,
};
struct nd_device_driver { struct nd_device_driver {
struct device_driver drv; struct device_driver drv;
unsigned long type; unsigned long type;
...@@ -41,12 +50,14 @@ static inline struct nd_device_driver *to_nd_device_driver( ...@@ -41,12 +50,14 @@ static inline struct nd_device_driver *to_nd_device_driver(
* @force_raw: ignore other personalities for the namespace (e.g. btt) * @force_raw: ignore other personalities for the namespace (e.g. btt)
* @dev: device model node * @dev: device model node
* @claim: when set a another personality has taken ownership of the namespace * @claim: when set a another personality has taken ownership of the namespace
* @claim_class: restrict claim type to a given class
* @rw_bytes: access the raw namespace capacity with byte-aligned transfers * @rw_bytes: access the raw namespace capacity with byte-aligned transfers
*/ */
struct nd_namespace_common { struct nd_namespace_common {
int force_raw; int force_raw;
struct device dev; struct device dev;
struct device *claim; struct device *claim;
enum nvdimm_claim_class claim_class;
int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset, int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset,
void *buf, size_t size, int rw, unsigned long flags); void *buf, size_t size, int rw, unsigned long flags);
}; };
...@@ -75,12 +86,14 @@ struct nd_namespace_io { ...@@ -75,12 +86,14 @@ struct nd_namespace_io {
/** /**
* struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory
* @nsio: device and system physical address range to drive * @nsio: device and system physical address range to drive
* @lbasize: logical sector size for the namespace in block-device-mode
* @alt_name: namespace name supplied in the dimm label * @alt_name: namespace name supplied in the dimm label
* @uuid: namespace name supplied in the dimm label * @uuid: namespace name supplied in the dimm label
* @id: ida allocated id * @id: ida allocated id
*/ */
struct nd_namespace_pmem { struct nd_namespace_pmem {
struct nd_namespace_io nsio; struct nd_namespace_io nsio;
unsigned long lbasize;
char *alt_name; char *alt_name;
u8 *uuid; u8 *uuid;
int id; int id;
......
/*
* Copyright(c) 2015 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __PMEM_H__
#define __PMEM_H__
#include <linux/io.h>
#include <linux/uio.h>
#ifdef CONFIG_ARCH_HAS_PMEM_API
#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
#include <asm/pmem.h>
#else
#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
/*
* These are simply here to enable compilation, all call sites gate
* calling these symbols with arch_has_pmem_api() and redirect to the
* implementation in asm/pmem.h.
*/
static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
{
BUG();
}
static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
BUG();
return 0;
}
static inline void arch_clear_pmem(void *addr, size_t size)
{
BUG();
}
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
BUG();
}
static inline void arch_invalidate_pmem(void *addr, size_t size)
{
BUG();
}
#endif
static inline bool arch_has_pmem_api(void)
{
return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
}
/**
* memcpy_to_pmem - copy data to persistent memory
* @dst: destination buffer for the copy
* @src: source buffer for the copy
* @n: length of the copy in bytes
*
* Perform a memory copy that results in the destination of the copy
* being effectively evicted from, or never written to, the processor
* cache hierarchy after the copy completes. After memcpy_to_pmem()
* data may still reside in cpu or platform buffers, so this operation
* must be followed by a blkdev_issue_flush() on the pmem block device.
*/
static inline void memcpy_to_pmem(void *dst, const void *src, size_t n)
{
if (arch_has_pmem_api())
arch_memcpy_to_pmem(dst, src, n);
else
memcpy(dst, src, n);
}
/**
* copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
* @bytes: number of bytes to copy
* @i: iterator with source data
*
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
* See blkdev_issue_flush() note for memcpy_to_pmem().
*/
static inline size_t copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
if (arch_has_pmem_api())
return arch_copy_from_iter_pmem(addr, bytes, i);
return copy_from_iter_nocache(addr, bytes, i);
}
/**
* clear_pmem - zero a PMEM memory range
* @addr: virtual start address
* @size: number of bytes to zero
*
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
* See blkdev_issue_flush() note for memcpy_to_pmem().
*/
static inline void clear_pmem(void *addr, size_t size)
{
if (arch_has_pmem_api())
arch_clear_pmem(addr, size);
else
memset(addr, 0, size);
}
/**
* invalidate_pmem - flush a pmem range from the cache hierarchy
* @addr: virtual start address
* @size: bytes to invalidate (internally aligned to cache line size)
*
* For platforms that support clearing poison this flushes any poisoned
* ranges out of the cache
*/
static inline void invalidate_pmem(void *addr, size_t size)
{
if (arch_has_pmem_api())
arch_invalidate_pmem(addr, size);
}
/**
* wb_cache_pmem - write back processor cache for PMEM memory range
* @addr: virtual start address
* @size: number of bytes to write back
*
* Write back the processor cache range starting at 'addr' for 'size' bytes.
* See blkdev_issue_flush() note for memcpy_to_pmem().
*/
static inline void wb_cache_pmem(void *addr, size_t size)
{
if (arch_has_pmem_api())
arch_wb_cache_pmem(addr, size);
}
#endif /* __PMEM_H__ */
...@@ -122,6 +122,12 @@ static inline __must_check int memcpy_mcsafe(void *dst, const void *src, ...@@ -122,6 +122,12 @@ static inline __must_check int memcpy_mcsafe(void *dst, const void *src,
return 0; return 0;
} }
#endif #endif
#ifndef __HAVE_ARCH_MEMCPY_FLUSHCACHE
static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
{
memcpy(dst, src, cnt);
}
#endif
void *memchr_inv(const void *s, int c, size_t n); void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *s, char old, char new); char *strreplace(char *s, char old, char new);
......
...@@ -95,6 +95,21 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); ...@@ -95,6 +95,21 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/*
* Note, users like pmem that depend on the stricter semantics of
* copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
* IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
* destination is flushed from the cache on return.
*/
size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
#else
static inline size_t copy_from_iter_flushcache(void *addr, size_t bytes,
struct iov_iter *i)
{
return copy_from_iter_nocache(addr, bytes, i);
}
#endif
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
size_t iov_iter_zero(size_t bytes, struct iov_iter *); size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i); unsigned long iov_iter_alignment(const struct iov_iter *i);
......
...@@ -105,7 +105,8 @@ struct nd_cmd_ars_cap { ...@@ -105,7 +105,8 @@ struct nd_cmd_ars_cap {
__u32 status; __u32 status;
__u32 max_ars_out; __u32 max_ars_out;
__u32 clear_err_unit; __u32 clear_err_unit;
__u32 reserved; __u16 flags;
__u16 reserved;
} __packed; } __packed;
struct nd_cmd_ars_start { struct nd_cmd_ars_start {
...@@ -144,6 +145,43 @@ struct nd_cmd_clear_error { ...@@ -144,6 +145,43 @@ struct nd_cmd_clear_error {
__u64 cleared; __u64 cleared;
} __packed; } __packed;
struct nd_cmd_trans_spa {
__u64 spa;
__u32 status;
__u8 flags;
__u8 _reserved[3];
__u64 trans_length;
__u32 num_nvdimms;
struct nd_nvdimm_device {
__u32 nfit_device_handle;
__u32 _reserved;
__u64 dpa;
} __packed devices[0];
} __packed;
struct nd_cmd_ars_err_inj {
__u64 err_inj_spa_range_base;
__u64 err_inj_spa_range_length;
__u8 err_inj_options;
__u32 status;
} __packed;
struct nd_cmd_ars_err_inj_clr {
__u64 err_inj_clr_spa_range_base;
__u64 err_inj_clr_spa_range_length;
__u32 status;
} __packed;
struct nd_cmd_ars_err_inj_stat {
__u32 status;
__u32 inj_err_rec_count;
struct nd_error_stat_query_record {
__u64 err_inj_stat_spa_range_base;
__u64 err_inj_stat_spa_range_length;
} __packed record[0];
} __packed;
enum { enum {
ND_CMD_IMPLEMENTED = 0, ND_CMD_IMPLEMENTED = 0,
...@@ -169,6 +207,7 @@ enum { ...@@ -169,6 +207,7 @@ enum {
enum { enum {
ND_ARS_VOLATILE = 1, ND_ARS_VOLATILE = 1,
ND_ARS_PERSISTENT = 2, ND_ARS_PERSISTENT = 2,
ND_ARS_RETURN_PREV_DATA = 1 << 1,
ND_CONFIG_LOCKED = 1, ND_CONFIG_LOCKED = 1,
}; };
...@@ -179,6 +218,7 @@ static inline const char *nvdimm_bus_cmd_name(unsigned cmd) ...@@ -179,6 +218,7 @@ static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
[ND_CMD_ARS_START] = "ars_start", [ND_CMD_ARS_START] = "ars_start",
[ND_CMD_ARS_STATUS] = "ars_status", [ND_CMD_ARS_STATUS] = "ars_status",
[ND_CMD_CLEAR_ERROR] = "clear_error", [ND_CMD_CLEAR_ERROR] = "clear_error",
[ND_CMD_CALL] = "cmd_call",
}; };
if (cmd < ARRAY_SIZE(names) && names[cmd]) if (cmd < ARRAY_SIZE(names) && names[cmd])
......
...@@ -556,6 +556,9 @@ config ARCH_HAS_SG_CHAIN ...@@ -556,6 +556,9 @@ config ARCH_HAS_SG_CHAIN
config ARCH_HAS_PMEM_API config ARCH_HAS_PMEM_API
bool bool
config ARCH_HAS_UACCESS_FLUSHCACHE
bool
config ARCH_HAS_MMIO_FLUSH config ARCH_HAS_MMIO_FLUSH
bool bool
......
...@@ -615,6 +615,28 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) ...@@ -615,6 +615,28 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
} }
EXPORT_SYMBOL(copy_from_iter_nocache); EXPORT_SYMBOL(copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
iterate_and_advance(i, bytes, v,
__copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len),
memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
v.iov_len)
)
return bytes;
}
EXPORT_SYMBOL_GPL(copy_from_iter_flushcache);
#endif
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{ {
char *to = addr; char *to = addr;
......
...@@ -1943,7 +1943,7 @@ static __init int nfit_test_init(void) ...@@ -1943,7 +1943,7 @@ static __init int nfit_test_init(void)
nfit_test->setup = nfit_test0_setup; nfit_test->setup = nfit_test0_setup;
break; break;
case 1: case 1:
nfit_test->num_pm = 1; nfit_test->num_pm = 2;
nfit_test->dcr_idx = NUM_DCR; nfit_test->dcr_idx = NUM_DCR;
nfit_test->num_dcr = 2; nfit_test->num_dcr = 2;
nfit_test->alloc = nfit_test1_alloc; nfit_test->alloc = nfit_test1_alloc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment