Commit 1654c37d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'arm64-uaccess' (early part)

Merge arm64 support for proper 'unsafe' user accessor functionality,
with 'asm goto' for handling exceptions.

The arm64 user access code used the slow fallback code for the user
access code, which generates horrendous code for things like
strncpy_from_user(), because it causes us to generate code for SW PAN
and for range checking for every individual word.

Teach arm64 about 'user_access_begin()' and the so-called 'unsafe' user
access functions that take an error label and use 'asm goto' to make all
the exception handling be entirely out of line.

[ These user access functions are called 'unsafe' not because the
  concept is unsafe, but because the low-level accessor functions
  absolutely have to be protected by the 'user_access_begin()' code,
  because that's what does the range checking.

  So the accessor functions have that scary name to make sure people
  don't think they are usable on their own, and cannot be mis-used the
  way our old "double underscore" versions of __get_user() and friends
  were ]

The "(early part)" of the branch is because the full branch also
improved on the "access_ok()" function, but the exact semantics of TBI
(top byte ignore) have to be discussed before doing that part.  So this
just does the low-level accessor update to use "asm goto".

* 'arm64-uaccess' (early part):
  arm64: start using 'asm goto' for put_user()
  arm64: start using 'asm goto' for get_user() when available
parents 6a31ffdf 7fd298d4
...@@ -1649,6 +1649,7 @@ config RODATA_FULL_DEFAULT_ENABLED ...@@ -1649,6 +1649,7 @@ config RODATA_FULL_DEFAULT_ENABLED
config ARM64_SW_TTBR0_PAN config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching" bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
depends on !KCSAN
help help
Enabling this option prevents the kernel from accessing Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved user-space memory directly by pointing TTBR0_EL1 to a reserved
......
...@@ -112,6 +112,9 @@ ...@@ -112,6 +112,9 @@
#define _ASM_EXTABLE_KACCESS_ERR(insn, fixup, err) \ #define _ASM_EXTABLE_KACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, wzr) _ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, wzr)
#define _ASM_EXTABLE_KACCESS(insn, fixup) \
_ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, wzr, wzr)
#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \ #define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \
__DEFINE_ASM_GPR_NUMS \ __DEFINE_ASM_GPR_NUMS \
__ASM_EXTABLE_RAW(#insn, #fixup, \ __ASM_EXTABLE_RAW(#insn, #fixup, \
......
...@@ -184,29 +184,40 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) ...@@ -184,29 +184,40 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
* The "__xxx_error" versions set the third argument to -EFAULT if an error * The "__xxx_error" versions set the third argument to -EFAULT if an error
* occurs, and leave it unchanged on success. * occurs, and leave it unchanged on success.
*/ */
#define __get_mem_asm(load, reg, x, addr, err, type) \ #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#define __get_mem_asm(load, reg, x, addr, label, type) \
asm_goto_output( \
"1: " load " " reg "0, [%1]\n" \
_ASM_EXTABLE_##type##ACCESS_ERR(1b, %l2, %w0) \
: "=r" (x) \
: "r" (addr) : : label)
#else
#define __get_mem_asm(load, reg, x, addr, label, type) do { \
int __gma_err = 0; \
asm volatile( \ asm volatile( \
"1: " load " " reg "1, [%2]\n" \ "1: " load " " reg "1, [%2]\n" \
"2:\n" \ "2:\n" \
_ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \ _ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
: "+r" (err), "=r" (x) \ : "+r" (__gma_err), "=r" (x) \
: "r" (addr)) : "r" (addr)); \
if (__gma_err) goto label; } while (0)
#endif
#define __raw_get_mem(ldr, x, ptr, err, type) \ #define __raw_get_mem(ldr, x, ptr, label, type) \
do { \ do { \
unsigned long __gu_val; \ unsigned long __gu_val; \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: \ case 1: \
__get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err), type); \ __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), label, type); \
break; \ break; \
case 2: \ case 2: \
__get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err), type); \ __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), label, type); \
break; \ break; \
case 4: \ case 4: \
__get_mem_asm(ldr, "%w", __gu_val, (ptr), (err), type); \ __get_mem_asm(ldr, "%w", __gu_val, (ptr), label, type); \
break; \ break; \
case 8: \ case 8: \
__get_mem_asm(ldr, "%x", __gu_val, (ptr), (err), type); \ __get_mem_asm(ldr, "%x", __gu_val, (ptr), label, type); \
break; \ break; \
default: \ default: \
BUILD_BUG(); \ BUILD_BUG(); \
...@@ -219,27 +230,34 @@ do { \ ...@@ -219,27 +230,34 @@ do { \
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions, * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
* we must evaluate these outside of the critical section. * we must evaluate these outside of the critical section.
*/ */
#define __raw_get_user(x, ptr, err) \ #define __raw_get_user(x, ptr, label) \
do { \ do { \
__typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \ __typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
__typeof__(x) __rgu_val; \ __typeof__(x) __rgu_val; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
\ do { \
uaccess_ttbr0_enable(); \ __label__ __rgu_failed; \
__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err, U); \ uaccess_ttbr0_enable(); \
uaccess_ttbr0_disable(); \ __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, __rgu_failed, U); \
\ uaccess_ttbr0_disable(); \
(x) = __rgu_val; \ (x) = __rgu_val; \
break; \
__rgu_failed: \
uaccess_ttbr0_disable(); \
goto label; \
} while (0); \
} while (0) } while (0)
#define __get_user_error(x, ptr, err) \ #define __get_user_error(x, ptr, err) \
do { \ do { \
__label__ __gu_failed; \
__typeof__(*(ptr)) __user *__p = (ptr); \ __typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \ might_fault(); \
if (access_ok(__p, sizeof(*__p))) { \ if (access_ok(__p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \ __p = uaccess_mask_ptr(__p); \
__raw_get_user((x), __p, (err)); \ __raw_get_user((x), __p, __gu_failed); \
} else { \ } else { \
__gu_failed: \
(x) = (__force __typeof__(x))0; (err) = -EFAULT; \ (x) = (__force __typeof__(x))0; (err) = -EFAULT; \
} \ } \
} while (0) } while (0)
...@@ -262,40 +280,42 @@ do { \ ...@@ -262,40 +280,42 @@ do { \
do { \ do { \
__typeof__(dst) __gkn_dst = (dst); \ __typeof__(dst) __gkn_dst = (dst); \
__typeof__(src) __gkn_src = (src); \ __typeof__(src) __gkn_src = (src); \
int __gkn_err = 0; \ do { \
\ __label__ __gkn_label; \
__mte_enable_tco_async(); \
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
(__force type *)(__gkn_src), __gkn_err, K); \
__mte_disable_tco_async(); \
\ \
if (unlikely(__gkn_err)) \ __mte_enable_tco_async(); \
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
(__force type *)(__gkn_src), __gkn_label, K); \
__mte_disable_tco_async(); \
break; \
__gkn_label: \
__mte_disable_tco_async(); \
goto err_label; \ goto err_label; \
} while (0); \
} while (0) } while (0)
#define __put_mem_asm(store, reg, x, addr, err, type) \ #define __put_mem_asm(store, reg, x, addr, label, type) \
asm volatile( \ asm goto( \
"1: " store " " reg "1, [%2]\n" \ "1: " store " " reg "0, [%1]\n" \
"2:\n" \ "2:\n" \
_ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \ _ASM_EXTABLE_##type##ACCESS(1b, %l2) \
: "+r" (err) \ : : "rZ" (x), "r" (addr) : : label)
: "rZ" (x), "r" (addr))
#define __raw_put_mem(str, x, ptr, err, type) \ #define __raw_put_mem(str, x, ptr, label, type) \
do { \ do { \
__typeof__(*(ptr)) __pu_val = (x); \ __typeof__(*(ptr)) __pu_val = (x); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: \ case 1: \
__put_mem_asm(str "b", "%w", __pu_val, (ptr), (err), type); \ __put_mem_asm(str "b", "%w", __pu_val, (ptr), label, type); \
break; \ break; \
case 2: \ case 2: \
__put_mem_asm(str "h", "%w", __pu_val, (ptr), (err), type); \ __put_mem_asm(str "h", "%w", __pu_val, (ptr), label, type); \
break; \ break; \
case 4: \ case 4: \
__put_mem_asm(str, "%w", __pu_val, (ptr), (err), type); \ __put_mem_asm(str, "%w", __pu_val, (ptr), label, type); \
break; \ break; \
case 8: \ case 8: \
__put_mem_asm(str, "%x", __pu_val, (ptr), (err), type); \ __put_mem_asm(str, "%x", __pu_val, (ptr), label, type); \
break; \ break; \
default: \ default: \
BUILD_BUG(); \ BUILD_BUG(); \
...@@ -307,25 +327,34 @@ do { \ ...@@ -307,25 +327,34 @@ do { \
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions, * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
* we must evaluate these outside of the critical section. * we must evaluate these outside of the critical section.
*/ */
#define __raw_put_user(x, ptr, err) \ #define __raw_put_user(x, ptr, label) \
do { \ do { \
__label__ __rpu_failed; \
__typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \ __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
__typeof__(*(ptr)) __rpu_val = (x); \ __typeof__(*(ptr)) __rpu_val = (x); \
__chk_user_ptr(__rpu_ptr); \ __chk_user_ptr(__rpu_ptr); \
\ \
uaccess_ttbr0_enable(); \ do { \
__raw_put_mem("sttr", __rpu_val, __rpu_ptr, err, U); \ uaccess_ttbr0_enable(); \
uaccess_ttbr0_disable(); \ __raw_put_mem("sttr", __rpu_val, __rpu_ptr, __rpu_failed, U); \
uaccess_ttbr0_disable(); \
break; \
__rpu_failed: \
uaccess_ttbr0_disable(); \
goto label; \
} while (0); \
} while (0) } while (0)
#define __put_user_error(x, ptr, err) \ #define __put_user_error(x, ptr, err) \
do { \ do { \
__label__ __pu_failed; \
__typeof__(*(ptr)) __user *__p = (ptr); \ __typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \ might_fault(); \
if (access_ok(__p, sizeof(*__p))) { \ if (access_ok(__p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \ __p = uaccess_mask_ptr(__p); \
__raw_put_user((x), __p, (err)); \ __raw_put_user((x), __p, __pu_failed); \
} else { \ } else { \
__pu_failed: \
(err) = -EFAULT; \ (err) = -EFAULT; \
} \ } \
} while (0) } while (0)
...@@ -348,15 +377,18 @@ do { \ ...@@ -348,15 +377,18 @@ do { \
do { \ do { \
__typeof__(dst) __pkn_dst = (dst); \ __typeof__(dst) __pkn_dst = (dst); \
__typeof__(src) __pkn_src = (src); \ __typeof__(src) __pkn_src = (src); \
int __pkn_err = 0; \
\ \
__mte_enable_tco_async(); \ do { \
__raw_put_mem("str", *((type *)(__pkn_src)), \ __label__ __pkn_err; \
(__force type *)(__pkn_dst), __pkn_err, K); \ __mte_enable_tco_async(); \
__mte_disable_tco_async(); \ __raw_put_mem("str", *((type *)(__pkn_src)), \
\ (__force type *)(__pkn_dst), __pkn_err, K); \
if (unlikely(__pkn_err)) \ __mte_disable_tco_async(); \
break; \
__pkn_err: \
__mte_disable_tco_async(); \
goto err_label; \ goto err_label; \
} while (0); \
} while(0) } while(0)
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
...@@ -381,6 +413,51 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi ...@@ -381,6 +413,51 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
__actu_ret; \ __actu_ret; \
}) })
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
{
if (unlikely(!access_ok(ptr,len)))
return 0;
uaccess_ttbr0_enable();
return 1;
}
#define user_access_begin(a,b) user_access_begin(a,b)
#define user_access_end() uaccess_ttbr0_disable()
#define unsafe_put_user(x, ptr, label) \
__raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
#define unsafe_get_user(x, ptr, label) \
__raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
/*
* KCSAN uses these to save and restore ttbr state.
* We do not support KCSAN with ARM64_SW_TTBR0_PAN, so
* they are no-ops.
*/
static inline unsigned long user_access_save(void) { return 0; }
static inline void user_access_restore(unsigned long enabled) { }
/*
* We want the unsafe accessors to always be inlined and use
* the error labels - thus the macro games.
*/
#define unsafe_copy_loop(dst, src, len, type, label) \
while (len >= sizeof(type)) { \
unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
dst += sizeof(type); \
src += sizeof(type); \
len -= sizeof(type); \
}
#define unsafe_copy_to_user(_dst,_src,_len,label) \
do { \
char __user *__ucu_dst = (_dst); \
const char *__ucu_src = (_src); \
size_t __ucu_len = (_len); \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
} while (0)
#define INLINE_COPY_TO_USER #define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER #define INLINE_COPY_FROM_USER
......
...@@ -582,12 +582,9 @@ subsys_initcall(register_mte_tcf_preferred_sysctl); ...@@ -582,12 +582,9 @@ subsys_initcall(register_mte_tcf_preferred_sysctl);
size_t mte_probe_user_range(const char __user *uaddr, size_t size) size_t mte_probe_user_range(const char __user *uaddr, size_t size)
{ {
const char __user *end = uaddr + size; const char __user *end = uaddr + size;
int err = 0;
char val; char val;
__raw_get_user(val, uaddr, err); __raw_get_user(val, uaddr, efault);
if (err)
return size;
uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE); uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE);
while (uaddr < end) { while (uaddr < end) {
...@@ -595,12 +592,13 @@ size_t mte_probe_user_range(const char __user *uaddr, size_t size) ...@@ -595,12 +592,13 @@ size_t mte_probe_user_range(const char __user *uaddr, size_t size)
* A read is sufficient for mte, the caller should have probed * A read is sufficient for mte, the caller should have probed
* for the pte write permission if required. * for the pte write permission if required.
*/ */
__raw_get_user(val, uaddr, err); __raw_get_user(val, uaddr, efault);
if (err)
return end - uaddr;
uaddr += MTE_GRANULE_SIZE; uaddr += MTE_GRANULE_SIZE;
} }
(void)val; (void)val;
return 0; return 0;
efault:
return end - uaddr;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment