Commit 9d92573f authored by Dan Williams's avatar Dan Williams

Merge branch 'for-4.13/dax' into libnvdimm-for-next

parents 2de5148f 0b277961
...@@ -7592,9 +7592,7 @@ M: Ross Zwisler <ross.zwisler@linux.intel.com> ...@@ -7592,9 +7592,7 @@ M: Ross Zwisler <ross.zwisler@linux.intel.com>
L: linux-nvdimm@lists.01.org L: linux-nvdimm@lists.01.org
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported S: Supported
F: drivers/nvdimm/pmem.c F: drivers/nvdimm/pmem*
F: include/linux/pmem.h
F: arch/*/include/asm/pmem.h
LIGHTNVM PLATFORM SUPPORT LIGHTNVM PLATFORM SUPPORT
M: Matias Bjorling <mb@lightnvm.io> M: Matias Bjorling <mb@lightnvm.io>
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/uio.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/prom.h> #include <asm/prom.h>
...@@ -163,8 +164,15 @@ axon_ram_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pa ...@@ -163,8 +164,15 @@ axon_ram_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pa
return __axon_ram_direct_access(bank, pgoff, nr_pages, kaddr, pfn); return __axon_ram_direct_access(bank, pgoff, nr_pages, kaddr, pfn);
} }
static size_t axon_ram_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
return copy_from_iter(addr, bytes, i);
}
static const struct dax_operations axon_ram_dax_ops = { static const struct dax_operations axon_ram_dax_ops = {
.direct_access = axon_ram_dax_direct_access, .direct_access = axon_ram_dax_direct_access,
.copy_from_iter = axon_ram_copy_from_iter,
}; };
/** /**
......
...@@ -54,6 +54,7 @@ config X86 ...@@ -54,6 +54,7 @@ config X86
select ARCH_HAS_KCOV if X86_64 select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_MMIO_FLUSH select ARCH_HAS_MMIO_FLUSH
select ARCH_HAS_PMEM_API if X86_64 select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_KERNEL_RWX
......
/*
* Copyright(c) 2015 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __ASM_X86_PMEM_H__
#define __ASM_X86_PMEM_H__
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
#ifdef CONFIG_ARCH_HAS_PMEM_API
/**
* arch_memcpy_to_pmem - copy data to persistent memory
* @dst: destination buffer for the copy
* @src: source buffer for the copy
* @n: length of the copy in bytes
*
* Copy data to persistent memory media via non-temporal stores so that
* a subsequent pmem driver flush operation will drain posted write queues.
*/
static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
{
int rem;
/*
* We are copying between two kernel buffers, if
* __copy_from_user_inatomic_nocache() returns an error (page
* fault) we would have already reported a general protection fault
* before the WARN+BUG.
*/
rem = __copy_from_user_inatomic_nocache(dst, (void __user *) src, n);
if (WARN(rem, "%s: fault copying %p <- %p unwritten: %d\n",
__func__, dst, src, rem))
BUG();
}
/**
* arch_wb_cache_pmem - write back a cache range with CLWB
* @vaddr: virtual start address
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
* instruction. Note that @size is internally rounded up to be cache
* line size aligned.
*/
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
unsigned long clflush_mask = x86_clflush_size - 1;
void *vend = addr + size;
void *p;
for (p = (void *)((unsigned long)addr & ~clflush_mask);
p < vend; p += x86_clflush_size)
clwb(p);
}
/**
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
* @bytes: number of bytes to copy
* @i: iterator with source data
*
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
*/
static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
size_t len;
/* TODO: skip the write-back by always using non-temporal stores */
len = copy_from_iter_nocache(addr, bytes, i);
/*
* In the iovec case on x86_64 copy_from_iter_nocache() uses
* non-temporal stores for the bulk of the transfer, but we need
* to manually flush if the transfer is unaligned. A cached
* memory copy is used when destination or size is not naturally
* aligned. That is:
* - Require 8-byte alignment when size is 8 bytes or larger.
* - Require 4-byte alignment when size is 4 bytes.
*
* In the non-iovec case the entire destination needs to be
* flushed.
*/
if (iter_is_iovec(i)) {
unsigned long flushed, dest = (unsigned long) addr;
if (bytes < 8) {
if (!IS_ALIGNED(dest, 4) || (bytes != 4))
arch_wb_cache_pmem(addr, bytes);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
arch_wb_cache_pmem(addr, 1);
}
flushed = dest - (unsigned long) addr;
if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
arch_wb_cache_pmem(addr + bytes - 1, 1);
}
} else
arch_wb_cache_pmem(addr, bytes);
return len;
}
/**
* arch_clear_pmem - zero a PMEM memory range
* @addr: virtual start address
* @size: number of bytes to zero
*
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
*/
static inline void arch_clear_pmem(void *addr, size_t size)
{
memset(addr, 0, size);
arch_wb_cache_pmem(addr, size);
}
static inline void arch_invalidate_pmem(void *addr, size_t size)
{
clflush_cache_range(addr, size);
}
#endif /* CONFIG_ARCH_HAS_PMEM_API */
#endif /* __ASM_X86_PMEM_H__ */
...@@ -109,6 +109,11 @@ memcpy_mcsafe(void *dst, const void *src, size_t cnt) ...@@ -109,6 +109,11 @@ memcpy_mcsafe(void *dst, const void *src, size_t cnt)
return 0; return 0;
} }
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
void memcpy_flushcache(void *dst, const void *src, size_t cnt);
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_X86_STRING_64_H */ #endif /* _ASM_X86_STRING_64_H */
...@@ -171,6 +171,10 @@ unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigne ...@@ -171,6 +171,10 @@ unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigne
extern long __copy_user_nocache(void *dst, const void __user *src, extern long __copy_user_nocache(void *dst, const void __user *src,
unsigned size, int zerorest); unsigned size, int zerorest);
extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
size_t len);
static inline int static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src, __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size) unsigned size)
...@@ -179,6 +183,13 @@ __copy_from_user_inatomic_nocache(void *dst, const void __user *src, ...@@ -179,6 +183,13 @@ __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
return __copy_user_nocache(dst, src, size, 0); return __copy_user_nocache(dst, src, size, 0);
} }
static inline int
__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
{
kasan_check_write(dst, size);
return __copy_user_flushcache(dst, src, size);
}
unsigned long unsigned long
copy_user_handle_tail(char *to, char *from, unsigned len); copy_user_handle_tail(char *to, char *from, unsigned len);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
*/ */
#include <linux/export.h> #include <linux/export.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/highmem.h>
/* /*
* Zero Userspace * Zero Userspace
...@@ -73,3 +74,136 @@ copy_user_handle_tail(char *to, char *from, unsigned len) ...@@ -73,3 +74,136 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
clac(); clac();
return len; return len;
} }
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/**
* clean_cache_range - write back a cache range with CLWB
* @vaddr: virtual start address
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
* instruction. Note that @size is internally rounded up to be cache
* line size aligned.
*/
static void clean_cache_range(void *addr, size_t size)
{
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
unsigned long clflush_mask = x86_clflush_size - 1;
void *vend = addr + size;
void *p;
for (p = (void *)((unsigned long)addr & ~clflush_mask);
p < vend; p += x86_clflush_size)
clwb(p);
}
void arch_wb_cache_pmem(void *addr, size_t size)
{
clean_cache_range(addr, size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
{
unsigned long flushed, dest = (unsigned long) dst;
long rc = __copy_user_nocache(dst, src, size, 0);
/*
* __copy_user_nocache() uses non-temporal stores for the bulk
* of the transfer, but we need to manually flush if the
* transfer is unaligned. A cached memory copy is used when
* destination or size is not naturally aligned. That is:
* - Require 8-byte alignment when size is 8 bytes or larger.
* - Require 4-byte alignment when size is 4 bytes.
*/
if (size < 8) {
if (!IS_ALIGNED(dest, 4) || size != 4)
clean_cache_range(dst, 1);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
clean_cache_range(dst, 1);
}
flushed = dest - (unsigned long) dst;
if (size > flushed && !IS_ALIGNED(size - flushed, 8))
clean_cache_range(dst + size - 1, 1);
}
return rc;
}
void memcpy_flushcache(void *_dst, const void *_src, size_t size)
{
unsigned long dest = (unsigned long) _dst;
unsigned long source = (unsigned long) _src;
/* cache copy and flush to align dest */
if (!IS_ALIGNED(dest, 8)) {
unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
memcpy((void *) dest, (void *) source, len);
clean_cache_range((void *) dest, len);
dest += len;
source += len;
size -= len;
if (!size)
return;
}
/* 4x8 movnti loop */
while (size >= 32) {
asm("movq (%0), %%r8\n"
"movq 8(%0), %%r9\n"
"movq 16(%0), %%r10\n"
"movq 24(%0), %%r11\n"
"movnti %%r8, (%1)\n"
"movnti %%r9, 8(%1)\n"
"movnti %%r10, 16(%1)\n"
"movnti %%r11, 24(%1)\n"
:: "r" (source), "r" (dest)
: "memory", "r8", "r9", "r10", "r11");
dest += 32;
source += 32;
size -= 32;
}
/* 1x8 movnti loop */
while (size >= 8) {
asm("movq (%0), %%r8\n"
"movnti %%r8, (%1)\n"
:: "r" (source), "r" (dest)
: "memory", "r8");
dest += 8;
source += 8;
size -= 8;
}
/* 1x4 movnti loop */
while (size >= 4) {
asm("movl (%0), %%r8d\n"
"movnti %%r8d, (%1)\n"
:: "r" (source), "r" (dest)
: "memory", "r8");
dest += 4;
source += 4;
size -= 4;
}
/* cache copy for remaining bytes */
if (size) {
memcpy((void *) dest, (void *) source, size);
clean_cache_range((void *) dest, size);
}
}
EXPORT_SYMBOL_GPL(memcpy_flushcache);
void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
size_t len)
{
char *from = kmap_atomic(page);
memcpy_flushcache(to, from + offset, len);
kunmap_atomic(from);
}
#endif
...@@ -150,6 +150,12 @@ void clflush_cache_range(void *vaddr, unsigned int size) ...@@ -150,6 +150,12 @@ void clflush_cache_range(void *vaddr, unsigned int size)
} }
EXPORT_SYMBOL_GPL(clflush_cache_range); EXPORT_SYMBOL_GPL(clflush_cache_range);
void arch_invalidate_pmem(void *addr, size_t size)
{
clflush_cache_range(addr, size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
static void __cpa_flush_all(void *arg) static void __cpa_flush_all(void *arg)
{ {
unsigned long cache = (unsigned long)arg; unsigned long cache = (unsigned long)arg;
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/pmem.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/nd.h> #include <linux/nd.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -1928,8 +1927,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, ...@@ -1928,8 +1927,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
} }
if (rw) if (rw)
memcpy_to_pmem(mmio->addr.aperture + offset, memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
iobuf + copied, c);
else { else {
if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
mmio_flush_range((void __force *) mmio_flush_range((void __force *)
...@@ -2043,7 +2041,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, ...@@ -2043,7 +2041,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
nfit_blk->bdw_offset = nfit_mem->bdw->offset; nfit_blk->bdw_offset = nfit_mem->bdw->offset;
mmio = &nfit_blk->mmio[BDW]; mmio = &nfit_blk->mmio[BDW];
mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM); nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
if (!mmio->addr.base) { if (!mmio->addr.base) {
dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
nvdimm_name(nvdimm)); nvdimm_name(nvdimm));
...@@ -2320,6 +2318,13 @@ static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) ...@@ -2320,6 +2318,13 @@ static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
nfit_spa_type(spa) == NFIT_SPA_PCD); nfit_spa_type(spa) == NFIT_SPA_PCD);
} }
static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
{
return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
nfit_spa_type(spa) == NFIT_SPA_VCD ||
nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
}
static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
struct nfit_spa *nfit_spa) struct nfit_spa *nfit_spa)
{ {
...@@ -2394,7 +2399,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, ...@@ -2394,7 +2399,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
ndr_desc); ndr_desc);
if (!nfit_spa->nd_region) if (!nfit_spa->nd_region)
rc = -ENOMEM; rc = -ENOMEM;
} else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) { } else if (nfit_spa_is_volatile(spa)) {
nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
ndr_desc); ndr_desc);
if (!nfit_spa->nd_region) if (!nfit_spa->nd_region)
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#ifdef CONFIG_BLK_DEV_RAM_DAX #ifdef CONFIG_BLK_DEV_RAM_DAX
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/dax.h> #include <linux/dax.h>
#include <linux/uio.h>
#endif #endif
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -354,8 +355,15 @@ static long brd_dax_direct_access(struct dax_device *dax_dev, ...@@ -354,8 +355,15 @@ static long brd_dax_direct_access(struct dax_device *dax_dev,
return __brd_direct_access(brd, pgoff, nr_pages, kaddr, pfn); return __brd_direct_access(brd, pgoff, nr_pages, kaddr, pfn);
} }
static size_t brd_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
return copy_from_iter(addr, bytes, i);
}
static const struct dax_operations brd_dax_ops = { static const struct dax_operations brd_dax_ops = {
.direct_access = brd_dax_direct_access, .direct_access = brd_dax_direct_access,
.copy_from_iter = brd_dax_copy_from_iter,
}; };
#endif #endif
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/cdev.h> #include <linux/cdev.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/uio.h>
#include <linux/dax.h> #include <linux/dax.h>
#include <linux/fs.h> #include <linux/fs.h>
...@@ -115,13 +116,20 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize) ...@@ -115,13 +116,20 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize)
EXPORT_SYMBOL_GPL(__bdev_dax_supported); EXPORT_SYMBOL_GPL(__bdev_dax_supported);
#endif #endif
enum dax_device_flags {
/* !alive + rcu grace period == no new operations / mappings */
DAXDEV_ALIVE,
/* gate whether dax_flush() calls the low level flush routine */
DAXDEV_WRITE_CACHE,
};
/** /**
* struct dax_device - anchor object for dax services * struct dax_device - anchor object for dax services
* @inode: core vfs * @inode: core vfs
* @cdev: optional character interface for "device dax" * @cdev: optional character interface for "device dax"
* @host: optional name for lookups where the device path is not available * @host: optional name for lookups where the device path is not available
* @private: dax driver private data * @private: dax driver private data
* @alive: !alive + rcu grace period == no new operations / mappings * @flags: state and boolean properties
*/ */
struct dax_device { struct dax_device {
struct hlist_node list; struct hlist_node list;
...@@ -129,10 +137,75 @@ struct dax_device { ...@@ -129,10 +137,75 @@ struct dax_device {
struct cdev cdev; struct cdev cdev;
const char *host; const char *host;
void *private; void *private;
bool alive; unsigned long flags;
const struct dax_operations *ops; const struct dax_operations *ops;
}; };
static ssize_t write_cache_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
ssize_t rc;
WARN_ON_ONCE(!dax_dev);
if (!dax_dev)
return -ENXIO;
rc = sprintf(buf, "%d\n", !!test_bit(DAXDEV_WRITE_CACHE,
&dax_dev->flags));
put_dax(dax_dev);
return rc;
}
static ssize_t write_cache_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
bool write_cache;
int rc = strtobool(buf, &write_cache);
struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
WARN_ON_ONCE(!dax_dev);
if (!dax_dev)
return -ENXIO;
if (rc)
len = rc;
else if (write_cache)
set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
else
clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
put_dax(dax_dev);
return len;
}
static DEVICE_ATTR_RW(write_cache);
static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, typeof(*dev), kobj);
struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
WARN_ON_ONCE(!dax_dev);
if (!dax_dev)
return 0;
if (a == &dev_attr_write_cache.attr && !dax_dev->ops->flush)
return 0;
return a->mode;
}
static struct attribute *dax_attributes[] = {
&dev_attr_write_cache.attr,
NULL,
};
struct attribute_group dax_attribute_group = {
.name = "dax",
.attrs = dax_attributes,
.is_visible = dax_visible,
};
EXPORT_SYMBOL_GPL(dax_attribute_group);
/** /**
* dax_direct_access() - translate a device pgoff to an absolute pfn * dax_direct_access() - translate a device pgoff to an absolute pfn
* @dax_dev: a dax_device instance representing the logical memory range * @dax_dev: a dax_device instance representing the logical memory range
...@@ -172,10 +245,43 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, ...@@ -172,10 +245,43 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
} }
EXPORT_SYMBOL_GPL(dax_direct_access); EXPORT_SYMBOL_GPL(dax_direct_access);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i)
{
if (!dax_alive(dax_dev))
return 0;
return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
}
EXPORT_SYMBOL_GPL(dax_copy_from_iter);
void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t size)
{
if (!dax_alive(dax_dev))
return;
if (!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))
return;
if (dax_dev->ops->flush)
dax_dev->ops->flush(dax_dev, pgoff, addr, size);
}
EXPORT_SYMBOL_GPL(dax_flush);
void dax_write_cache(struct dax_device *dax_dev, bool wc)
{
if (wc)
set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
else
clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
}
EXPORT_SYMBOL_GPL(dax_write_cache);
bool dax_alive(struct dax_device *dax_dev) bool dax_alive(struct dax_device *dax_dev)
{ {
lockdep_assert_held(&dax_srcu); lockdep_assert_held(&dax_srcu);
return dax_dev->alive; return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
} }
EXPORT_SYMBOL_GPL(dax_alive); EXPORT_SYMBOL_GPL(dax_alive);
...@@ -195,7 +301,7 @@ void kill_dax(struct dax_device *dax_dev) ...@@ -195,7 +301,7 @@ void kill_dax(struct dax_device *dax_dev)
if (!dax_dev) if (!dax_dev)
return; return;
dax_dev->alive = false; clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
synchronize_srcu(&dax_srcu); synchronize_srcu(&dax_srcu);
...@@ -235,7 +341,7 @@ static void dax_destroy_inode(struct inode *inode) ...@@ -235,7 +341,7 @@ static void dax_destroy_inode(struct inode *inode)
{ {
struct dax_device *dax_dev = to_dax_dev(inode); struct dax_device *dax_dev = to_dax_dev(inode);
WARN_ONCE(dax_dev->alive, WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
"kill_dax() must be called before final iput()\n"); "kill_dax() must be called before final iput()\n");
call_rcu(&inode->i_rcu, dax_i_callback); call_rcu(&inode->i_rcu, dax_i_callback);
} }
...@@ -287,7 +393,7 @@ static struct dax_device *dax_dev_get(dev_t devt) ...@@ -287,7 +393,7 @@ static struct dax_device *dax_dev_get(dev_t devt)
dax_dev = to_dax_dev(inode); dax_dev = to_dax_dev(inode);
if (inode->i_state & I_NEW) { if (inode->i_state & I_NEW) {
dax_dev->alive = true; set_bit(DAXDEV_ALIVE, &dax_dev->flags);
inode->i_cdev = &dax_dev->cdev; inode->i_cdev = &dax_dev->cdev;
inode->i_mode = S_IFCHR; inode->i_mode = S_IFCHR;
inode->i_flags = S_DAX; inode->i_flags = S_DAX;
......
...@@ -159,6 +159,34 @@ static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, ...@@ -159,6 +159,34 @@ static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
} }
static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
struct linear_c *lc = ti->private;
struct block_device *bdev = lc->dev->bdev;
struct dax_device *dax_dev = lc->dev->dax_dev;
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
dev_sector = linear_map_sector(ti, sector);
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
return 0;
return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
}
static void linear_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
size_t size)
{
struct linear_c *lc = ti->private;
struct block_device *bdev = lc->dev->bdev;
struct dax_device *dax_dev = lc->dev->dax_dev;
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
dev_sector = linear_map_sector(ti, sector);
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
return;
dax_flush(dax_dev, pgoff, addr, size);
}
static struct target_type linear_target = { static struct target_type linear_target = {
.name = "linear", .name = "linear",
.version = {1, 3, 0}, .version = {1, 3, 0},
...@@ -171,6 +199,8 @@ static struct target_type linear_target = { ...@@ -171,6 +199,8 @@ static struct target_type linear_target = {
.prepare_ioctl = linear_prepare_ioctl, .prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices, .iterate_devices = linear_iterate_devices,
.direct_access = linear_dax_direct_access, .direct_access = linear_dax_direct_access,
.dax_copy_from_iter = linear_dax_copy_from_iter,
.dax_flush = linear_dax_flush,
}; };
int __init dm_linear_init(void) int __init dm_linear_init(void)
......
...@@ -332,6 +332,44 @@ static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, ...@@ -332,6 +332,44 @@ static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
} }
static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
struct dax_device *dax_dev;
struct block_device *bdev;
uint32_t stripe;
stripe_map_sector(sc, sector, &stripe, &dev_sector);
dev_sector += sc->stripe[stripe].physical_start;
dax_dev = sc->stripe[stripe].dev->dax_dev;
bdev = sc->stripe[stripe].dev->bdev;
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
return 0;
return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
}
static void stripe_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
size_t size)
{
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
struct dax_device *dax_dev;
struct block_device *bdev;
uint32_t stripe;
stripe_map_sector(sc, sector, &stripe, &dev_sector);
dev_sector += sc->stripe[stripe].physical_start;
dax_dev = sc->stripe[stripe].dev->dax_dev;
bdev = sc->stripe[stripe].dev->bdev;
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
return;
dax_flush(dax_dev, pgoff, addr, size);
}
/* /*
* Stripe status: * Stripe status:
* *
...@@ -451,6 +489,8 @@ static struct target_type stripe_target = { ...@@ -451,6 +489,8 @@ static struct target_type stripe_target = {
.iterate_devices = stripe_iterate_devices, .iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints, .io_hints = stripe_io_hints,
.direct_access = stripe_dax_direct_access, .direct_access = stripe_dax_direct_access,
.dax_copy_from_iter = stripe_dax_copy_from_iter,
.dax_flush = stripe_dax_flush,
}; };
int __init dm_stripe_init(void) int __init dm_stripe_init(void)
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/dax.h> #include <linux/dax.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/uio.h>
#include <linux/hdreg.h> #include <linux/hdreg.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/wait.h> #include <linux/wait.h>
...@@ -969,6 +970,48 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, ...@@ -969,6 +970,48 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
return ret; return ret;
} }
static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
struct dm_target *ti;
long ret = 0;
int srcu_idx;
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
if (!ti)
goto out;
if (!ti->type->dax_copy_from_iter) {
ret = copy_from_iter(addr, bytes, i);
goto out;
}
ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
out:
dm_put_live_table(md, srcu_idx);
return ret;
}
static void dm_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t size)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
struct dm_target *ti;
int srcu_idx;
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
if (!ti)
goto out;
if (ti->type->dax_flush)
ti->type->dax_flush(ti, pgoff, addr, size);
out:
dm_put_live_table(md, srcu_idx);
}
/* /*
* A target may call dm_accept_partial_bio only from the map routine. It is * A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH. * allowed for all bio types except REQ_PREFLUSH.
...@@ -2859,6 +2902,8 @@ static const struct block_device_operations dm_blk_dops = { ...@@ -2859,6 +2902,8 @@ static const struct block_device_operations dm_blk_dops = {
static const struct dax_operations dm_dax_ops = { static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access, .direct_access = dm_dax_direct_access,
.copy_from_iter = dm_dax_copy_from_iter,
.flush = dm_dax_flush,
}; };
/* /*
......
...@@ -38,13 +38,13 @@ static int to_nd_device_type(struct device *dev) ...@@ -38,13 +38,13 @@ static int to_nd_device_type(struct device *dev)
{ {
if (is_nvdimm(dev)) if (is_nvdimm(dev))
return ND_DEVICE_DIMM; return ND_DEVICE_DIMM;
else if (is_nd_pmem(dev)) else if (is_memory(dev))
return ND_DEVICE_REGION_PMEM; return ND_DEVICE_REGION_PMEM;
else if (is_nd_blk(dev)) else if (is_nd_blk(dev))
return ND_DEVICE_REGION_BLK; return ND_DEVICE_REGION_BLK;
else if (is_nd_dax(dev)) else if (is_nd_dax(dev))
return ND_DEVICE_DAX_PMEM; return ND_DEVICE_DAX_PMEM;
else if (is_nd_pmem(dev->parent) || is_nd_blk(dev->parent)) else if (is_nd_region(dev->parent))
return nd_region_to_nstype(to_nd_region(dev->parent)); return nd_region_to_nstype(to_nd_region(dev->parent));
return 0; return 0;
...@@ -56,7 +56,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env) ...@@ -56,7 +56,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
* Ensure that region devices always have their numa node set as * Ensure that region devices always have their numa node set as
* early as possible. * early as possible.
*/ */
if (is_nd_pmem(dev) || is_nd_blk(dev)) if (is_nd_region(dev))
set_dev_node(dev, to_nd_region(dev)->numa_node); set_dev_node(dev, to_nd_region(dev)->numa_node);
return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT, return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
to_nd_device_type(dev)); to_nd_device_type(dev));
...@@ -65,7 +65,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env) ...@@ -65,7 +65,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
static struct module *to_bus_provider(struct device *dev) static struct module *to_bus_provider(struct device *dev)
{ {
/* pin bus providers while regions are enabled */ /* pin bus providers while regions are enabled */
if (is_nd_pmem(dev) || is_nd_blk(dev)) { if (is_nd_region(dev)) {
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
return nvdimm_bus->nd_desc->module; return nvdimm_bus->nd_desc->module;
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
*/ */
#include <linux/device.h> #include <linux/device.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/pmem.h>
#include "nd-core.h" #include "nd-core.h"
#include "pmem.h"
#include "pfn.h" #include "pfn.h"
#include "btt.h" #include "btt.h"
#include "nd.h" #include "nd.h"
...@@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, ...@@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
cleared /= 512; cleared /= 512;
badblocks_clear(&nsio->bb, sector, cleared); badblocks_clear(&nsio->bb, sector, cleared);
} }
invalidate_pmem(nsio->addr + offset, size); arch_invalidate_pmem(nsio->addr + offset, size);
} else } else
rc = -EIO; rc = -EIO;
} }
memcpy_to_pmem(nsio->addr + offset, buf, size); memcpy_flushcache(nsio->addr + offset, buf, size);
nvdimm_flush(to_nd_region(ndns->dev.parent)); nvdimm_flush(to_nd_region(ndns->dev.parent));
return rc; return rc;
......
...@@ -504,7 +504,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region, ...@@ -504,7 +504,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct nvdimm_bus *nvdimm_bus; struct nvdimm_bus *nvdimm_bus;
struct list_head *poison_list; struct list_head *poison_list;
if (!is_nd_pmem(&nd_region->dev)) { if (!is_memory(&nd_region->dev)) {
dev_WARN_ONCE(&nd_region->dev, 1, dev_WARN_ONCE(&nd_region->dev, 1,
"%s only valid for pmem regions\n", __func__); "%s only valid for pmem regions\n", __func__);
return; return;
......
...@@ -89,7 +89,7 @@ struct device *nd_dax_create(struct nd_region *nd_region) ...@@ -89,7 +89,7 @@ struct device *nd_dax_create(struct nd_region *nd_region)
struct device *dev = NULL; struct device *dev = NULL;
struct nd_dax *nd_dax; struct nd_dax *nd_dax;
if (!is_nd_pmem(&nd_region->dev)) if (!is_memory(&nd_region->dev))
return NULL; return NULL;
nd_dax = nd_dax_alloc(nd_region); nd_dax = nd_dax_alloc(nd_region);
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include "nd-core.h" #include "nd-core.h"
#include "label.h" #include "label.h"
#include "pmem.h"
#include "nd.h" #include "nd.h"
static DEFINE_IDA(dimm_ida); static DEFINE_IDA(dimm_ida);
...@@ -235,6 +236,13 @@ struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) ...@@ -235,6 +236,13 @@ struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
} }
EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
{
/* pmem mapping properties are private to libnvdimm */
return ARCH_MEMREMAP_PMEM;
}
EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
{ {
struct nvdimm *nvdimm = nd_mapping->nvdimm; struct nvdimm *nvdimm = nd_mapping->nvdimm;
...@@ -411,7 +419,7 @@ int alias_dpa_busy(struct device *dev, void *data) ...@@ -411,7 +419,7 @@ int alias_dpa_busy(struct device *dev, void *data)
struct resource *res; struct resource *res;
int i; int i;
if (!is_nd_pmem(dev)) if (!is_memory(dev))
return 0; return 0;
nd_region = to_nd_region(dev); nd_region = to_nd_region(dev);
......
...@@ -14,10 +14,10 @@ ...@@ -14,10 +14,10 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pmem.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/nd.h> #include <linux/nd.h>
#include "nd-core.h" #include "nd-core.h"
#include "pmem.h"
#include "nd.h" #include "nd.h"
static void namespace_io_release(struct device *dev) static void namespace_io_release(struct device *dev)
...@@ -112,7 +112,7 @@ static int is_uuid_busy(struct device *dev, void *data) ...@@ -112,7 +112,7 @@ static int is_uuid_busy(struct device *dev, void *data)
static int is_namespace_uuid_busy(struct device *dev, void *data) static int is_namespace_uuid_busy(struct device *dev, void *data)
{ {
if (is_nd_pmem(dev) || is_nd_blk(dev)) if (is_nd_region(dev))
return device_for_each_child(dev, data, is_uuid_busy); return device_for_each_child(dev, data, is_uuid_busy);
return 0; return 0;
} }
...@@ -155,11 +155,7 @@ bool pmem_should_map_pages(struct device *dev) ...@@ -155,11 +155,7 @@ bool pmem_should_map_pages(struct device *dev)
IORES_DESC_NONE) == REGION_MIXED) IORES_DESC_NONE) == REGION_MIXED)
return false; return false;
#ifdef ARCH_MEMREMAP_PMEM
return ARCH_MEMREMAP_PMEM == MEMREMAP_WB; return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
#else
return false;
#endif
} }
EXPORT_SYMBOL(pmem_should_map_pages); EXPORT_SYMBOL(pmem_should_map_pages);
...@@ -810,7 +806,7 @@ static int __reserve_free_pmem(struct device *dev, void *data) ...@@ -810,7 +806,7 @@ static int __reserve_free_pmem(struct device *dev, void *data)
struct nd_label_id label_id; struct nd_label_id label_id;
int i; int i;
if (!is_nd_pmem(dev)) if (!is_memory(dev))
return 0; return 0;
nd_region = to_nd_region(dev); nd_region = to_nd_region(dev);
...@@ -2057,7 +2053,7 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region) ...@@ -2057,7 +2053,7 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
struct resource *res; struct resource *res;
struct device *dev; struct device *dev;
if (!is_nd_pmem(&nd_region->dev)) if (!is_memory(&nd_region->dev))
return NULL; return NULL;
nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
...@@ -2360,7 +2356,7 @@ static struct device **scan_labels(struct nd_region *nd_region) ...@@ -2360,7 +2356,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
} }
dev->parent = &nd_region->dev; dev->parent = &nd_region->dev;
devs[count++] = dev; devs[count++] = dev;
} else if (is_nd_pmem(&nd_region->dev)) { } else if (is_memory(&nd_region->dev)) {
/* clean unselected labels */ /* clean unselected labels */
for (i = 0; i < nd_region->ndr_mappings; i++) { for (i = 0; i < nd_region->ndr_mappings; i++) {
struct list_head *l, *e; struct list_head *l, *e;
......
...@@ -64,7 +64,16 @@ struct blk_alloc_info { ...@@ -64,7 +64,16 @@ struct blk_alloc_info {
bool is_nvdimm(struct device *dev); bool is_nvdimm(struct device *dev);
bool is_nd_pmem(struct device *dev); bool is_nd_pmem(struct device *dev);
bool is_nd_volatile(struct device *dev);
bool is_nd_blk(struct device *dev); bool is_nd_blk(struct device *dev);
static inline bool is_nd_region(struct device *dev)
{
return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev);
}
static inline bool is_memory(struct device *dev)
{
return is_nd_pmem(dev) || is_nd_volatile(dev);
}
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev); struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
int __init nvdimm_bus_init(void); int __init nvdimm_bus_init(void);
void nvdimm_bus_exit(void); void nvdimm_bus_exit(void);
......
...@@ -331,7 +331,7 @@ struct device *nd_pfn_create(struct nd_region *nd_region) ...@@ -331,7 +331,7 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
struct nd_pfn *nd_pfn; struct nd_pfn *nd_pfn;
struct device *dev; struct device *dev;
if (!is_nd_pmem(&nd_region->dev)) if (!is_memory(&nd_region->dev))
return NULL; return NULL;
nd_pfn = nd_pfn_alloc(nd_region); nd_pfn = nd_pfn_alloc(nd_region);
...@@ -354,7 +354,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) ...@@ -354,7 +354,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
if (!pfn_sb || !ndns) if (!pfn_sb || !ndns)
return -ENODEV; return -ENODEV;
if (!is_nd_pmem(nd_pfn->dev.parent)) if (!is_memory(nd_pfn->dev.parent))
return -ENODEV; return -ENODEV;
if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0)) if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0))
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pmem.h> #include <linux/uio.h>
#include <linux/dax.h> #include <linux/dax.h>
#include <linux/nd.h> #include <linux/nd.h>
#include "pmem.h" #include "pmem.h"
...@@ -72,7 +72,7 @@ static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, ...@@ -72,7 +72,7 @@ static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
sysfs_notify_dirent(pmem->bb_state); sysfs_notify_dirent(pmem->bb_state);
} }
invalidate_pmem(pmem->virt_addr + offset, len); arch_invalidate_pmem(pmem->virt_addr + offset, len);
return rc; return rc;
} }
...@@ -82,7 +82,7 @@ static void write_pmem(void *pmem_addr, struct page *page, ...@@ -82,7 +82,7 @@ static void write_pmem(void *pmem_addr, struct page *page,
{ {
void *mem = kmap_atomic(page); void *mem = kmap_atomic(page);
memcpy_to_pmem(pmem_addr, mem + off, len); memcpy_flushcache(pmem_addr, mem + off, len);
kunmap_atomic(mem); kunmap_atomic(mem);
} }
...@@ -237,8 +237,27 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev, ...@@ -237,8 +237,27 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
} }
static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
return copy_from_iter_flushcache(addr, bytes, i);
}
static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t size)
{
arch_wb_cache_pmem(addr, size);
}
static const struct dax_operations pmem_dax_ops = { static const struct dax_operations pmem_dax_ops = {
.direct_access = pmem_dax_direct_access, .direct_access = pmem_dax_direct_access,
.copy_from_iter = pmem_copy_from_iter,
.flush = pmem_dax_flush,
};
static const struct attribute_group *pmem_attribute_groups[] = {
&dax_attribute_group,
NULL,
}; };
static void pmem_release_queue(void *q) static void pmem_release_queue(void *q)
...@@ -267,14 +286,15 @@ static int pmem_attach_disk(struct device *dev, ...@@ -267,14 +286,15 @@ static int pmem_attach_disk(struct device *dev,
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_region *nd_region = to_nd_region(dev->parent);
struct vmem_altmap __altmap, *altmap = NULL; struct vmem_altmap __altmap, *altmap = NULL;
int nid = dev_to_node(dev), fua, wbc;
struct resource *res = &nsio->res; struct resource *res = &nsio->res;
struct nd_pfn *nd_pfn = NULL; struct nd_pfn *nd_pfn = NULL;
struct dax_device *dax_dev; struct dax_device *dax_dev;
int nid = dev_to_node(dev);
struct nd_pfn_sb *pfn_sb; struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem; struct pmem_device *pmem;
struct resource pfn_res; struct resource pfn_res;
struct request_queue *q; struct request_queue *q;
struct device *gendev;
struct gendisk *disk; struct gendisk *disk;
void *addr; void *addr;
...@@ -296,8 +316,12 @@ static int pmem_attach_disk(struct device *dev, ...@@ -296,8 +316,12 @@ static int pmem_attach_disk(struct device *dev,
dev_set_drvdata(dev, pmem); dev_set_drvdata(dev, pmem);
pmem->phys_addr = res->start; pmem->phys_addr = res->start;
pmem->size = resource_size(res); pmem->size = resource_size(res);
if (nvdimm_has_flush(nd_region) < 0) fua = nvdimm_has_flush(nd_region);
if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
dev_warn(dev, "unable to guarantee persistence of writes\n"); dev_warn(dev, "unable to guarantee persistence of writes\n");
fua = 0;
}
wbc = nvdimm_has_cache(nd_region);
if (!devm_request_mem_region(dev, res->start, resource_size(res), if (!devm_request_mem_region(dev, res->start, resource_size(res),
dev_name(&ndns->dev))) { dev_name(&ndns->dev))) {
...@@ -341,7 +365,7 @@ static int pmem_attach_disk(struct device *dev, ...@@ -341,7 +365,7 @@ static int pmem_attach_disk(struct device *dev,
return PTR_ERR(addr); return PTR_ERR(addr);
pmem->virt_addr = addr; pmem->virt_addr = addr;
blk_queue_write_cache(q, true, true); blk_queue_write_cache(q, wbc, fua);
blk_queue_make_request(q, pmem_make_request); blk_queue_make_request(q, pmem_make_request);
blk_queue_physical_block_size(q, PAGE_SIZE); blk_queue_physical_block_size(q, PAGE_SIZE);
blk_queue_logical_block_size(q, pmem_sector_size(ndns)); blk_queue_logical_block_size(q, pmem_sector_size(ndns));
...@@ -372,8 +396,12 @@ static int pmem_attach_disk(struct device *dev, ...@@ -372,8 +396,12 @@ static int pmem_attach_disk(struct device *dev,
put_disk(disk); put_disk(disk);
return -ENOMEM; return -ENOMEM;
} }
dax_write_cache(dax_dev, wbc);
pmem->dax_dev = dax_dev; pmem->dax_dev = dax_dev;
gendev = disk_to_dev(disk);
gendev->groups = pmem_attribute_groups;
device_add_disk(dev, disk); device_add_disk(dev, disk);
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM; return -ENOMEM;
......
...@@ -5,6 +5,20 @@ ...@@ -5,6 +5,20 @@
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/fs.h> #include <linux/fs.h>
#ifdef CONFIG_ARCH_HAS_PMEM_API
#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
void arch_wb_cache_pmem(void *addr, size_t size);
void arch_invalidate_pmem(void *addr, size_t size);
#else
#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
}
static inline void arch_invalidate_pmem(void *addr, size_t size)
{
}
#endif
/* this definition is in it's own header for tools/testing/nvdimm to consume */ /* this definition is in it's own header for tools/testing/nvdimm to consume */
struct pmem_device { struct pmem_device {
/* One contiguous memory region per device */ /* One contiguous memory region per device */
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/pmem.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/nd.h> #include <linux/nd.h>
...@@ -169,6 +168,11 @@ bool is_nd_blk(struct device *dev) ...@@ -169,6 +168,11 @@ bool is_nd_blk(struct device *dev)
return dev ? dev->type == &nd_blk_device_type : false; return dev ? dev->type == &nd_blk_device_type : false;
} }
bool is_nd_volatile(struct device *dev)
{
return dev ? dev->type == &nd_volatile_device_type : false;
}
struct nd_region *to_nd_region(struct device *dev) struct nd_region *to_nd_region(struct device *dev)
{ {
struct nd_region *nd_region = container_of(dev, struct nd_region, dev); struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
...@@ -215,7 +219,7 @@ EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); ...@@ -215,7 +219,7 @@ EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
*/ */
int nd_region_to_nstype(struct nd_region *nd_region) int nd_region_to_nstype(struct nd_region *nd_region)
{ {
if (is_nd_pmem(&nd_region->dev)) { if (is_memory(&nd_region->dev)) {
u16 i, alias; u16 i, alias;
for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
...@@ -243,7 +247,7 @@ static ssize_t size_show(struct device *dev, ...@@ -243,7 +247,7 @@ static ssize_t size_show(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev); struct nd_region *nd_region = to_nd_region(dev);
unsigned long long size = 0; unsigned long long size = 0;
if (is_nd_pmem(dev)) { if (is_memory(dev)) {
size = nd_region->ndr_size; size = nd_region->ndr_size;
} else if (nd_region->ndr_mappings == 1) { } else if (nd_region->ndr_mappings == 1) {
struct nd_mapping *nd_mapping = &nd_region->mapping[0]; struct nd_mapping *nd_mapping = &nd_region->mapping[0];
...@@ -309,7 +313,7 @@ static ssize_t set_cookie_show(struct device *dev, ...@@ -309,7 +313,7 @@ static ssize_t set_cookie_show(struct device *dev,
struct nd_interleave_set *nd_set = nd_region->nd_set; struct nd_interleave_set *nd_set = nd_region->nd_set;
ssize_t rc = 0; ssize_t rc = 0;
if (is_nd_pmem(dev) && nd_set) if (is_memory(dev) && nd_set)
/* pass, should be precluded by region_visible */; /* pass, should be precluded by region_visible */;
else else
return -ENXIO; return -ENXIO;
...@@ -363,7 +367,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region) ...@@ -363,7 +367,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
if (!ndd) if (!ndd)
return 0; return 0;
if (is_nd_pmem(&nd_region->dev)) { if (is_memory(&nd_region->dev)) {
available += nd_pmem_available_dpa(nd_region, available += nd_pmem_available_dpa(nd_region,
nd_mapping, &overlap); nd_mapping, &overlap);
if (overlap > blk_max_overlap) { if (overlap > blk_max_overlap) {
...@@ -549,10 +553,10 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) ...@@ -549,10 +553,10 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
struct nd_interleave_set *nd_set = nd_region->nd_set; struct nd_interleave_set *nd_set = nd_region->nd_set;
int type = nd_region_to_nstype(nd_region); int type = nd_region_to_nstype(nd_region);
if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr) if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
return 0; return 0;
if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr) if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
return 0; return 0;
if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr) if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
...@@ -580,7 +584,7 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) ...@@ -580,7 +584,7 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
|| type == ND_DEVICE_NAMESPACE_BLK) || type == ND_DEVICE_NAMESPACE_BLK)
&& a == &dev_attr_available_size.attr) && a == &dev_attr_available_size.attr)
return a->mode; return a->mode;
else if (is_nd_pmem(dev) && nd_set) else if (is_memory(dev) && nd_set)
return a->mode; return a->mode;
return 0; return 0;
...@@ -637,7 +641,7 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, ...@@ -637,7 +641,7 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
{ {
struct nd_region *nd_region; struct nd_region *nd_region;
if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { if (!probe && is_nd_region(dev)) {
int i; int i;
nd_region = to_nd_region(dev); nd_region = to_nd_region(dev);
...@@ -655,12 +659,8 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, ...@@ -655,12 +659,8 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
if (ndd) if (ndd)
atomic_dec(&nvdimm->busy); atomic_dec(&nvdimm->busy);
} }
if (is_nd_pmem(dev))
return;
} }
if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent)) if (dev->parent && is_nd_region(dev->parent) && probe) {
&& probe) {
nd_region = to_nd_region(dev->parent); nd_region = to_nd_region(dev->parent);
nvdimm_bus_lock(dev); nvdimm_bus_lock(dev);
if (nd_region->ns_seed == dev) if (nd_region->ns_seed == dev)
...@@ -1048,8 +1048,8 @@ void nvdimm_flush(struct nd_region *nd_region) ...@@ -1048,8 +1048,8 @@ void nvdimm_flush(struct nd_region *nd_region)
* The first wmb() is needed to 'sfence' all previous writes * The first wmb() is needed to 'sfence' all previous writes
* such that they are architecturally visible for the platform * such that they are architecturally visible for the platform
* buffer flush. Note that we've already arranged for pmem * buffer flush. Note that we've already arranged for pmem
* writes to avoid the cache via arch_memcpy_to_pmem(). The * writes to avoid the cache via memcpy_flushcache(). The final
* final wmb() ensures ordering for the NVDIMM flush write. * wmb() ensures ordering for the NVDIMM flush write.
*/ */
wmb(); wmb();
for (i = 0; i < nd_region->ndr_mappings; i++) for (i = 0; i < nd_region->ndr_mappings; i++)
...@@ -1071,8 +1071,9 @@ int nvdimm_has_flush(struct nd_region *nd_region) ...@@ -1071,8 +1071,9 @@ int nvdimm_has_flush(struct nd_region *nd_region)
{ {
int i; int i;
/* no nvdimm == flushing capability unknown */ /* no nvdimm or pmem api == flushing capability unknown */
if (nd_region->ndr_mappings == 0) if (nd_region->ndr_mappings == 0
|| !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
return -ENXIO; return -ENXIO;
for (i = 0; i < nd_region->ndr_mappings; i++) { for (i = 0; i < nd_region->ndr_mappings; i++) {
...@@ -1092,6 +1093,12 @@ int nvdimm_has_flush(struct nd_region *nd_region) ...@@ -1092,6 +1093,12 @@ int nvdimm_has_flush(struct nd_region *nd_region)
} }
EXPORT_SYMBOL_GPL(nvdimm_has_flush); EXPORT_SYMBOL_GPL(nvdimm_has_flush);
int nvdimm_has_cache(struct nd_region *nd_region)
{
return is_nd_pmem(&nd_region->dev);
}
EXPORT_SYMBOL_GPL(nvdimm_has_cache);
void __exit nd_region_devs_exit(void) void __exit nd_region_devs_exit(void)
{ {
ida_destroy(&region_ida); ida_destroy(&region_ida);
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/uio.h>
#include <linux/dax.h> #include <linux/dax.h>
#include <asm/extmem.h> #include <asm/extmem.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -43,8 +44,15 @@ static const struct block_device_operations dcssblk_devops = { ...@@ -43,8 +44,15 @@ static const struct block_device_operations dcssblk_devops = {
.release = dcssblk_release, .release = dcssblk_release,
}; };
static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev,
pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
{
return copy_from_iter(addr, bytes, i);
}
static const struct dax_operations dcssblk_dax_ops = { static const struct dax_operations dcssblk_dax_ops = {
.direct_access = dcssblk_dax_direct_access, .direct_access = dcssblk_dax_direct_access,
.copy_from_iter = dcssblk_dax_copy_from_iter,
}; };
struct dcssblk_dev_info { struct dcssblk_dev_info {
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/pagevec.h> #include <linux/pagevec.h>
#include <linux/pmem.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/uio.h> #include <linux/uio.h>
...@@ -784,7 +783,7 @@ static int dax_writeback_one(struct block_device *bdev, ...@@ -784,7 +783,7 @@ static int dax_writeback_one(struct block_device *bdev,
} }
dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn)); dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
wb_cache_pmem(kaddr, size); dax_flush(dax_dev, pgoff, kaddr, size);
/* /*
* After we have flushed the cache, we can clear the dirty tag. There * After we have flushed the cache, we can clear the dirty tag. There
* cannot be new dirty data in the pfn after the flush has completed as * cannot be new dirty data in the pfn after the flush has completed as
...@@ -975,7 +974,8 @@ int __dax_zero_page_range(struct block_device *bdev, ...@@ -975,7 +974,8 @@ int __dax_zero_page_range(struct block_device *bdev,
dax_read_unlock(id); dax_read_unlock(id);
return rc; return rc;
} }
clear_pmem(kaddr + offset, size); memset(kaddr + offset, 0, size);
dax_flush(dax_dev, pgoff, kaddr + offset, size);
dax_read_unlock(id); dax_read_unlock(id);
} }
return 0; return 0;
...@@ -1054,7 +1054,8 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, ...@@ -1054,7 +1054,8 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
map_len = end - pos; map_len = end - pos;
if (iov_iter_rw(iter) == WRITE) if (iov_iter_rw(iter) == WRITE)
map_len = copy_from_iter_pmem(kaddr, map_len, iter); map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
map_len, iter);
else else
map_len = copy_to_iter(kaddr, map_len, iter); map_len = copy_to_iter(kaddr, map_len, iter);
if (map_len <= 0) { if (map_len <= 0) {
......
...@@ -16,8 +16,15 @@ struct dax_operations { ...@@ -16,8 +16,15 @@ struct dax_operations {
*/ */
long (*direct_access)(struct dax_device *, pgoff_t, long, long (*direct_access)(struct dax_device *, pgoff_t, long,
void **, pfn_t *); void **, pfn_t *);
/* copy_from_iter: required operation for fs-dax direct-i/o */
size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
struct iov_iter *);
/* flush: optional driver-specific cache management after writes */
void (*flush)(struct dax_device *, pgoff_t, void *, size_t);
}; };
extern struct attribute_group dax_attribute_group;
#if IS_ENABLED(CONFIG_DAX) #if IS_ENABLED(CONFIG_DAX)
struct dax_device *dax_get_by_host(const char *host); struct dax_device *dax_get_by_host(const char *host);
void put_dax(struct dax_device *dax_dev); void put_dax(struct dax_device *dax_dev);
...@@ -75,6 +82,11 @@ void kill_dax(struct dax_device *dax_dev); ...@@ -75,6 +82,11 @@ void kill_dax(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
void **kaddr, pfn_t *pfn); void **kaddr, pfn_t *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t size);
void dax_write_cache(struct dax_device *dax_dev, bool wc);
/* /*
* We use lowest available bit in exceptional entry for locking, one bit for * We use lowest available bit in exceptional entry for locking, one bit for
......
...@@ -132,6 +132,10 @@ typedef int (*dm_busy_fn) (struct dm_target *ti); ...@@ -132,6 +132,10 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
*/ */
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn); long nr_pages, void **kaddr, pfn_t *pfn);
typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i);
typedef void (*dm_dax_flush_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr,
size_t size);
#define PAGE_SECTORS (PAGE_SIZE / 512) #define PAGE_SECTORS (PAGE_SIZE / 512)
void dm_error(const char *message); void dm_error(const char *message);
...@@ -181,6 +185,8 @@ struct target_type { ...@@ -181,6 +185,8 @@ struct target_type {
dm_iterate_devices_fn iterate_devices; dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints; dm_io_hints_fn io_hints;
dm_dax_direct_access_fn direct_access; dm_dax_direct_access_fn direct_access;
dm_dax_copy_from_iter_fn dax_copy_from_iter;
dm_dax_flush_fn dax_flush;
/* For internal device-mapper use. */ /* For internal device-mapper use. */
struct list_head list; struct list_head list;
......
...@@ -166,9 +166,11 @@ void *nd_region_provider_data(struct nd_region *nd_region); ...@@ -166,9 +166,11 @@ void *nd_region_provider_data(struct nd_region *nd_region);
void *nd_blk_region_provider_data(struct nd_blk_region *ndbr); void *nd_blk_region_provider_data(struct nd_blk_region *ndbr);
void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data); void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data);
struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr); struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr);
unsigned int nd_region_acquire_lane(struct nd_region *nd_region); unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
u64 nd_fletcher64(void *addr, size_t len, bool le); u64 nd_fletcher64(void *addr, size_t len, bool le);
void nvdimm_flush(struct nd_region *nd_region); void nvdimm_flush(struct nd_region *nd_region);
int nvdimm_has_flush(struct nd_region *nd_region); int nvdimm_has_flush(struct nd_region *nd_region);
int nvdimm_has_cache(struct nd_region *nd_region);
#endif /* __LIBNVDIMM_H__ */ #endif /* __LIBNVDIMM_H__ */
/*
* Copyright(c) 2015 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __PMEM_H__
#define __PMEM_H__
#include <linux/io.h>
#include <linux/uio.h>
#ifdef CONFIG_ARCH_HAS_PMEM_API
#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
#include <asm/pmem.h>
#else
#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
/*
* These are simply here to enable compilation, all call sites gate
* calling these symbols with arch_has_pmem_api() and redirect to the
* implementation in asm/pmem.h.
*/
static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
{
BUG();
}
static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
BUG();
return 0;
}
static inline void arch_clear_pmem(void *addr, size_t size)
{
BUG();
}
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
BUG();
}
static inline void arch_invalidate_pmem(void *addr, size_t size)
{
BUG();
}
#endif
static inline bool arch_has_pmem_api(void)
{
return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
}
/**
* memcpy_to_pmem - copy data to persistent memory
* @dst: destination buffer for the copy
* @src: source buffer for the copy
* @n: length of the copy in bytes
*
* Perform a memory copy that results in the destination of the copy
* being effectively evicted from, or never written to, the processor
* cache hierarchy after the copy completes. After memcpy_to_pmem()
* data may still reside in cpu or platform buffers, so this operation
* must be followed by a blkdev_issue_flush() on the pmem block device.
*/
static inline void memcpy_to_pmem(void *dst, const void *src, size_t n)
{
if (arch_has_pmem_api())
arch_memcpy_to_pmem(dst, src, n);
else
memcpy(dst, src, n);
}
/**
* copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
* @bytes: number of bytes to copy
* @i: iterator with source data
*
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
* See blkdev_issue_flush() note for memcpy_to_pmem().
*/
static inline size_t copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
if (arch_has_pmem_api())
return arch_copy_from_iter_pmem(addr, bytes, i);
return copy_from_iter_nocache(addr, bytes, i);
}
/**
* clear_pmem - zero a PMEM memory range
* @addr: virtual start address
* @size: number of bytes to zero
*
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
* See blkdev_issue_flush() note for memcpy_to_pmem().
*/
static inline void clear_pmem(void *addr, size_t size)
{
if (arch_has_pmem_api())
arch_clear_pmem(addr, size);
else
memset(addr, 0, size);
}
/**
* invalidate_pmem - flush a pmem range from the cache hierarchy
* @addr: virtual start address
* @size: bytes to invalidate (internally aligned to cache line size)
*
* For platforms that support clearing poison this flushes any poisoned
* ranges out of the cache
*/
static inline void invalidate_pmem(void *addr, size_t size)
{
if (arch_has_pmem_api())
arch_invalidate_pmem(addr, size);
}
/**
* wb_cache_pmem - write back processor cache for PMEM memory range
* @addr: virtual start address
* @size: number of bytes to write back
*
* Write back the processor cache range starting at 'addr' for 'size' bytes.
* See blkdev_issue_flush() note for memcpy_to_pmem().
*/
static inline void wb_cache_pmem(void *addr, size_t size)
{
if (arch_has_pmem_api())
arch_wb_cache_pmem(addr, size);
}
#endif /* __PMEM_H__ */
...@@ -122,6 +122,12 @@ static inline __must_check int memcpy_mcsafe(void *dst, const void *src, ...@@ -122,6 +122,12 @@ static inline __must_check int memcpy_mcsafe(void *dst, const void *src,
return 0; return 0;
} }
#endif #endif
#ifndef __HAVE_ARCH_MEMCPY_FLUSHCACHE
static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
{
memcpy(dst, src, cnt);
}
#endif
void *memchr_inv(const void *s, int c, size_t n); void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *s, char old, char new); char *strreplace(char *s, char old, char new);
......
...@@ -95,6 +95,21 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); ...@@ -95,6 +95,21 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/*
* Note, users like pmem that depend on the stricter semantics of
* copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
* IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
* destination is flushed from the cache on return.
*/
size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
#else
static inline size_t copy_from_iter_flushcache(void *addr, size_t bytes,
struct iov_iter *i)
{
return copy_from_iter_nocache(addr, bytes, i);
}
#endif
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
size_t iov_iter_zero(size_t bytes, struct iov_iter *); size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i); unsigned long iov_iter_alignment(const struct iov_iter *i);
......
...@@ -548,6 +548,9 @@ config ARCH_HAS_SG_CHAIN ...@@ -548,6 +548,9 @@ config ARCH_HAS_SG_CHAIN
config ARCH_HAS_PMEM_API config ARCH_HAS_PMEM_API
bool bool
config ARCH_HAS_UACCESS_FLUSHCACHE
bool
config ARCH_HAS_MMIO_FLUSH config ARCH_HAS_MMIO_FLUSH
bool bool
......
...@@ -615,6 +615,28 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) ...@@ -615,6 +615,28 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
} }
EXPORT_SYMBOL(copy_from_iter_nocache); EXPORT_SYMBOL(copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
iterate_and_advance(i, bytes, v,
__copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len),
memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
v.iov_len)
)
return bytes;
}
EXPORT_SYMBOL_GPL(copy_from_iter_flushcache);
#endif
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{ {
char *to = addr; char *to = addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment