Commit 60622d68 authored by Dan Williams's avatar Dan Williams Committed by Ingo Molnar

x86/asm/memcpy_mcsafe: Return bytes remaining

Machine check safe memory copies are currently deployed in the pmem
driver whenever reading from persistent memory media, so that -EIO is
returned rather than triggering a kernel panic. While this protects most
pmem accesses, it is not complete in the filesystem-dax case. When
filesystem-dax is enabled reads may bypass the block layer and the
driver via dax_iomap_actor() and its usage of copy_to_iter().

In preparation for creating a copy_to_iter() variant that can handle
machine checks, teach memcpy_mcsafe() to return the number of bytes
remaining rather than -EFAULT when an exception occurs.
Co-developed-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: hch@lst.de
Cc: linux-fsdevel@vger.kernel.org
Cc: linux-nvdimm@lists.01.org
Link: http://lkml.kernel.org/r/152539238119.31796.14318473522414462886.stgit@dwillia2-desk3.amr.corp.intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent bd131544
...@@ -116,7 +116,8 @@ int strcmp(const char *cs, const char *ct); ...@@ -116,7 +116,8 @@ int strcmp(const char *cs, const char *ct);
#endif #endif
#define __HAVE_ARCH_MEMCPY_MCSAFE 1 #define __HAVE_ARCH_MEMCPY_MCSAFE 1
__must_check int __memcpy_mcsafe(void *dst, const void *src, size_t cnt); __must_check unsigned long __memcpy_mcsafe(void *dst, const void *src,
size_t cnt);
DECLARE_STATIC_KEY_FALSE(mcsafe_key); DECLARE_STATIC_KEY_FALSE(mcsafe_key);
/** /**
...@@ -131,9 +132,10 @@ DECLARE_STATIC_KEY_FALSE(mcsafe_key); ...@@ -131,9 +132,10 @@ DECLARE_STATIC_KEY_FALSE(mcsafe_key);
* actually do machine check recovery. Everyone else can just * actually do machine check recovery. Everyone else can just
* use memcpy(). * use memcpy().
* *
* Return 0 for success, -EFAULT for fail * Return 0 for success, or number of bytes not copied if there was an
* exception.
*/ */
static __always_inline __must_check int static __always_inline __must_check unsigned long
memcpy_mcsafe(void *dst, const void *src, size_t cnt) memcpy_mcsafe(void *dst, const void *src, size_t cnt)
{ {
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
......
...@@ -252,14 +252,22 @@ ENDPROC(__memcpy_mcsafe) ...@@ -252,14 +252,22 @@ ENDPROC(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe) EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
.section .fixup, "ax" .section .fixup, "ax"
/* Return -EFAULT for any failure */ /*
.L_memcpy_mcsafe_fail: * Return number of bytes not copied for any failure. Note that
mov $-EFAULT, %rax * there is no "tail" handling since the source buffer is 8-byte
* aligned and poison is cacheline aligned.
*/
.E_read_words:
shll $3, %ecx
.E_leading_bytes:
addl %edx, %ecx
.E_trailing_bytes:
mov %ecx, %eax
ret ret
.previous .previous
_ASM_EXTABLE_FAULT(.L_read_leading_bytes, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
_ASM_EXTABLE_FAULT(.L_read_words, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
_ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
#endif #endif
...@@ -276,7 +276,8 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, ...@@ -276,7 +276,8 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
if (rw == READ) { if (rw == READ) {
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
return -EIO; return -EIO;
return memcpy_mcsafe(buf, nsio->addr + offset, size); if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0)
return -EIO;
} }
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
......
...@@ -101,15 +101,15 @@ static blk_status_t read_pmem(struct page *page, unsigned int off, ...@@ -101,15 +101,15 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
void *pmem_addr, unsigned int len) void *pmem_addr, unsigned int len)
{ {
unsigned int chunk; unsigned int chunk;
int rc; unsigned long rem;
void *mem; void *mem;
while (len) { while (len) {
mem = kmap_atomic(page); mem = kmap_atomic(page);
chunk = min_t(unsigned int, len, PAGE_SIZE); chunk = min_t(unsigned int, len, PAGE_SIZE);
rc = memcpy_mcsafe(mem + off, pmem_addr, chunk); rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
kunmap_atomic(mem); kunmap_atomic(mem);
if (rc) if (rem)
return BLK_STS_IOERR; return BLK_STS_IOERR;
len -= chunk; len -= chunk;
off = 0; off = 0;
......
...@@ -147,8 +147,8 @@ extern int memcmp(const void *,const void *,__kernel_size_t); ...@@ -147,8 +147,8 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
extern void * memchr(const void *,int,__kernel_size_t); extern void * memchr(const void *,int,__kernel_size_t);
#endif #endif
#ifndef __HAVE_ARCH_MEMCPY_MCSAFE #ifndef __HAVE_ARCH_MEMCPY_MCSAFE
static inline __must_check int memcpy_mcsafe(void *dst, const void *src, static inline __must_check unsigned long memcpy_mcsafe(void *dst,
size_t cnt) const void *src, size_t cnt)
{ {
memcpy(dst, src, cnt); memcpy(dst, src, cnt);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment