Commit 5de490da authored by Ross Zwisler's avatar Ross Zwisler Committed by Dan Williams

pmem: add copy_from_iter_pmem() and clear_pmem()

Add support for two new PMEM APIs, copy_from_iter_pmem() and
clear_pmem().  copy_from_iter_pmem() is used to copy data from an
iterator into a PMEM buffer.  clear_pmem() zeros a PMEM memory range.

Both of these new APIs must be explicitly ordered using a wmb_pmem()
function call and are implemented in such a way that the wmb_pmem()
will make the stores to PMEM durable.  Because both APIs are unordered
they can be called as needed without introducing any unwanted memory
barriers.
Signed-off-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 4a370df5
......@@ -66,6 +66,81 @@ static inline void arch_wmb_pmem(void)
pcommit_sfence();
}
/**
* __arch_wb_cache_pmem - write back a cache range with CLWB
* @vaddr: virtual start address
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
* instruction. This function requires explicit ordering with an
* arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation.
*/
static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
{
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
unsigned long clflush_mask = x86_clflush_size - 1;
void *vend = vaddr + size;
void *p;
for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
p < vend; p += x86_clflush_size)
clwb(p);
}
/*
* copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
* iterators, so for other types (bvec & kvec) we must do a cache write-back.
*/
static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
{
return iter_is_iovec(i) == false;
}
/**
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
* @bytes: number of bytes to copy
* @i: iterator with source data
*
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
* This function requires explicit ordering with an arch_wmb_pmem() call.
*/
static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
struct iov_iter *i)
{
void *vaddr = (void __force *)addr;
size_t len;
/* TODO: skip the write-back by always using non-temporal stores */
len = copy_from_iter_nocache(vaddr, bytes, i);
if (__iter_needs_pmem_wb(i))
__arch_wb_cache_pmem(vaddr, bytes);
return len;
}
/**
* arch_clear_pmem - zero a PMEM memory range
* @addr: virtual start address
* @size: number of bytes to zero
*
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
* This function requires explicit ordering with an arch_wmb_pmem() call.
*/
static inline void arch_clear_pmem(void __pmem *addr, size_t size)
{
void *vaddr = (void __force *)addr;
/* TODO: implement the zeroing via non-temporal writes */
if (size == PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0)
clear_page(vaddr);
else
memset(vaddr, 0, size);
__arch_wb_cache_pmem(vaddr, size);
}
static inline bool arch_has_wmb_pmem(void)
{
#ifdef CONFIG_X86_64
......
......@@ -14,6 +14,7 @@
#define __PMEM_H__
#include <linux/io.h>
#include <linux/uio.h>
#ifdef CONFIG_ARCH_HAS_PMEM_API
#include <asm/pmem.h>
......@@ -33,12 +34,24 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
{
BUG();
}
static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
struct iov_iter *i)
{
BUG();
return 0;
}
static inline void arch_clear_pmem(void __pmem *addr, size_t size)
{
BUG();
}
#endif
/*
* Architectures that define ARCH_HAS_PMEM_API must provide
* implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), and
* arch_has_wmb_pmem().
* implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
* arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem().
*/
static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
......@@ -78,6 +91,20 @@ static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
memcpy((void __force *) dst, src, size);
}
static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
size_t bytes, struct iov_iter *i)
{
return copy_from_iter_nocache((void __force *)addr, bytes, i);
}
static inline void default_clear_pmem(void __pmem *addr, size_t size)
{
if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
clear_page((void __force *)addr);
else
memset((void __force *)addr, 0, size);
}
/**
* memremap_pmem - map physical persistent memory for pmem api
* @offset: physical address of persistent memory
......@@ -134,4 +161,37 @@ static inline void wmb_pmem(void)
if (arch_has_pmem_api())
arch_wmb_pmem();
}
/**
* copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
* @bytes: number of bytes to copy
* @i: iterator with source data
*
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
* This function requires explicit ordering with a wmb_pmem() call.
*/
static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
struct iov_iter *i)
{
if (arch_has_pmem_api())
return arch_copy_from_iter_pmem(addr, bytes, i);
return default_copy_from_iter_pmem(addr, bytes, i);
}
/**
* clear_pmem - zero a PMEM memory range
* @addr: virtual start address
* @size: number of bytes to zero
*
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
* This function requires explicit ordering with a wmb_pmem() call.
*/
static inline void clear_pmem(void __pmem *addr, size_t size)
{
if (arch_has_pmem_api())
arch_clear_pmem(addr, size);
else
default_clear_pmem(addr, size);
}
#endif /* __PMEM_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment