Commit 3639a535 authored by Linus Torvalds's avatar Linus Torvalds

x86: move stac/clac from user copy routines into callers

This is preparatory work for inlining the 'rep movs' case, but also a
cleanup.  The __copy_user_nocache() function was mis-used by the rdma
code to do uncached kernel copies that don't actually want user copies
at all, and as a result doesn't want the stac/clac either.
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d2c95f9d
...@@ -27,6 +27,7 @@ copy_user_generic(void *to, const void *from, unsigned len) ...@@ -27,6 +27,7 @@ copy_user_generic(void *to, const void *from, unsigned len)
{ {
unsigned ret; unsigned ret;
stac();
/* /*
* If CPU has FSRM feature, use 'rep movs'. * If CPU has FSRM feature, use 'rep movs'.
* Otherwise, use copy_user_generic_unrolled. * Otherwise, use copy_user_generic_unrolled.
...@@ -38,6 +39,7 @@ copy_user_generic(void *to, const void *from, unsigned len) ...@@ -38,6 +39,7 @@ copy_user_generic(void *to, const void *from, unsigned len)
"=d" (len)), "=d" (len)),
"1" (to), "2" (from), "3" (len) "1" (to), "2" (from), "3" (len)
: "memory", "rcx", "r8", "r9", "r10", "r11"); : "memory", "rcx", "r8", "r9", "r10", "r11");
clac();
return ret; return ret;
} }
...@@ -64,8 +66,12 @@ static inline int ...@@ -64,8 +66,12 @@ static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src, __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size) unsigned size)
{ {
long ret;
kasan_check_write(dst, size); kasan_check_write(dst, size);
return __copy_user_nocache(dst, src, size, 0); stac();
ret = __copy_user_nocache(dst, src, size, 0);
clac();
return ret;
} }
static inline int static inline int
......
...@@ -51,7 +51,6 @@ ...@@ -51,7 +51,6 @@
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
SYM_FUNC_START(copy_user_generic_unrolled) SYM_FUNC_START(copy_user_generic_unrolled)
ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb .Lcopy_user_short_string_bytes jb .Lcopy_user_short_string_bytes
ALIGN_DESTINATION ALIGN_DESTINATION
...@@ -123,15 +122,12 @@ EXPORT_SYMBOL(copy_user_generic_unrolled) ...@@ -123,15 +122,12 @@ EXPORT_SYMBOL(copy_user_generic_unrolled)
* eax uncopied bytes or 0 if successful. * eax uncopied bytes or 0 if successful.
*/ */
SYM_FUNC_START(copy_user_fast_string) SYM_FUNC_START(copy_user_fast_string)
ASM_STAC
movl %edx,%ecx movl %edx,%ecx
1: rep movsb 1: rep movsb
xorl %eax,%eax xorl %eax,%eax
ASM_CLAC
RET RET
12: movl %ecx,%eax /* ecx is zerorest also */ 12: movl %ecx,%eax /* ecx is zerorest also */
ASM_CLAC
RET RET
_ASM_EXTABLE_CPY(1b, 12b) _ASM_EXTABLE_CPY(1b, 12b)
...@@ -160,12 +156,10 @@ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) ...@@ -160,12 +156,10 @@ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
movl %edx,%ecx movl %edx,%ecx
1: rep movsb 1: rep movsb
2: mov %ecx,%eax 2: mov %ecx,%eax
ASM_CLAC
RET RET
3: 3:
movl %edx,%eax movl %edx,%eax
ASM_CLAC
RET RET
_ASM_EXTABLE_CPY(1b, 2b) _ASM_EXTABLE_CPY(1b, 2b)
...@@ -209,7 +203,6 @@ SYM_CODE_START_LOCAL(copy_user_short_string) ...@@ -209,7 +203,6 @@ SYM_CODE_START_LOCAL(copy_user_short_string)
decl %ecx decl %ecx
jnz 21b jnz 21b
23: xor %eax,%eax 23: xor %eax,%eax
ASM_CLAC
RET RET
40: leal (%rdx,%rcx,8),%edx 40: leal (%rdx,%rcx,8),%edx
...@@ -233,8 +226,6 @@ SYM_CODE_END(copy_user_short_string) ...@@ -233,8 +226,6 @@ SYM_CODE_END(copy_user_short_string)
* - Require 4-byte alignment when size is 4 bytes. * - Require 4-byte alignment when size is 4 bytes.
*/ */
SYM_FUNC_START(__copy_user_nocache) SYM_FUNC_START(__copy_user_nocache)
ASM_STAC
/* If size is less than 8 bytes, go to 4-byte copy */ /* If size is less than 8 bytes, go to 4-byte copy */
cmpl $8,%edx cmpl $8,%edx
jb .L_4b_nocache_copy_entry jb .L_4b_nocache_copy_entry
...@@ -327,7 +318,6 @@ SYM_FUNC_START(__copy_user_nocache) ...@@ -327,7 +318,6 @@ SYM_FUNC_START(__copy_user_nocache)
/* Finished copying; fence the prior stores */ /* Finished copying; fence the prior stores */
.L_finish_copy: .L_finish_copy:
xorl %eax,%eax xorl %eax,%eax
ASM_CLAC
sfence sfence
RET RET
......
...@@ -45,7 +45,11 @@ EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); ...@@ -45,7 +45,11 @@ EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
{ {
unsigned long flushed, dest = (unsigned long) dst; unsigned long flushed, dest = (unsigned long) dst;
long rc = __copy_user_nocache(dst, src, size, 0); long rc;
stac();
rc = __copy_user_nocache(dst, src, size, 0);
clac();
/* /*
* __copy_user_nocache() uses non-temporal stores for the bulk * __copy_user_nocache() uses non-temporal stores for the bulk
......
...@@ -1285,6 +1285,9 @@ static const char *uaccess_safe_builtin[] = { ...@@ -1285,6 +1285,9 @@ static const char *uaccess_safe_builtin[] = {
"copy_mc_enhanced_fast_string", "copy_mc_enhanced_fast_string",
"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
"clear_user_original", "clear_user_original",
"copy_user_generic_unrolled",
"copy_user_fast_string",
"__copy_user_nocache",
NULL NULL
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment