Commit 20802d8d authored by Jisheng Zhang's avatar Jisheng Zhang Committed by Palmer Dabbelt

riscv: extable: add a dedicated uaccess handler

Inspired by commit 2e77a62c ("arm64: extable: add a dedicated
uaccess handler"), do similar to riscv to add a dedicated uaccess
exception handler to update registers in exception context and
subsequently return back into the function which faulted, so we remove
the need for fixups specialized to each faulting instruction.
Signed-off-by: default avatarJisheng Zhang <jszhang@kernel.org>
Signed-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent ff4b8cad
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#define EX_TYPE_NONE 0 #define EX_TYPE_NONE 0
#define EX_TYPE_FIXUP 1 #define EX_TYPE_FIXUP 1
#define EX_TYPE_BPF 2 #define EX_TYPE_BPF 2
#define EX_TYPE_UACCESS_ERR_ZERO 3
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
...@@ -23,7 +24,9 @@ ...@@ -23,7 +24,9 @@
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLY__ */
#include <linux/bits.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#include <asm/gpr-num.h>
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ #define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
".pushsection __ex_table, \"a\"\n" \ ".pushsection __ex_table, \"a\"\n" \
...@@ -37,6 +40,26 @@ ...@@ -37,6 +40,26 @@
#define _ASM_EXTABLE(insn, fixup) \ #define _ASM_EXTABLE(insn, fixup) \
__ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0") __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
#define EX_DATA_REG_ERR_SHIFT 0
#define EX_DATA_REG_ERR GENMASK(4, 0)
#define EX_DATA_REG_ZERO_SHIFT 5
#define EX_DATA_REG_ZERO GENMASK(9, 5)
#define EX_DATA_REG(reg, gpr) \
"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \
__DEFINE_ASM_GPR_NUMS \
__ASM_EXTABLE_RAW(#insn, #fixup, \
__stringify(EX_TYPE_UACCESS_ERR_ZERO), \
"(" \
EX_DATA_REG(ERR, err) " | " \
EX_DATA_REG(ZERO, zero) \
")")
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_ASM_EXTABLE_H */ #endif /* __ASM_ASM_EXTABLE_H */
...@@ -21,20 +21,14 @@ ...@@ -21,20 +21,14 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \ { \
uintptr_t tmp; \
__enable_user_access(); \ __enable_user_access(); \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
"1: " insn " \n" \ "1: " insn " \n" \
"2: \n" \ "2: \n" \
" .section .fixup,\"ax\" \n" \ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %[r]) \
" .balign 4 \n" \
"3: li %[r],%[e] \n" \
" jump 2b,%[t] \n" \
" .previous \n" \
_ASM_EXTABLE(1b, 3b) \
: [r] "+r" (ret), [ov] "=&r" (oldval), \ : [r] "+r" (ret), [ov] "=&r" (oldval), \
[u] "+m" (*uaddr), [t] "=&r" (tmp) \ [u] "+m" (*uaddr) \
: [op] "Jr" (oparg), [e] "i" (-EFAULT) \ : [op] "Jr" (oparg) \
: "memory"); \ : "memory"); \
__disable_user_access(); \ __disable_user_access(); \
} }
...@@ -96,15 +90,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -96,15 +90,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"2: sc.w.aqrl %[t],%z[nv],%[u] \n" "2: sc.w.aqrl %[t],%z[nv],%[u] \n"
" bnez %[t],1b \n" " bnez %[t],1b \n"
"3: \n" "3: \n"
" .section .fixup,\"ax\" \n" _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \
" .balign 4 \n" _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \
"4: li %[r],%[e] \n"
" jump 3b,%[t] \n"
" .previous \n"
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp) : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
: [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "i" (-EFAULT) : [ov] "Jr" (oldval), [nv] "Jr" (newval)
: "memory"); : "memory");
__disable_user_access(); __disable_user_access();
......
...@@ -81,22 +81,14 @@ static inline int __access_ok(unsigned long addr, unsigned long size) ...@@ -81,22 +81,14 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
#define __get_user_asm(insn, x, ptr, err) \ #define __get_user_asm(insn, x, ptr, err) \
do { \ do { \
uintptr_t __tmp; \
__typeof__(x) __x; \ __typeof__(x) __x; \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
"1:\n" \ "1:\n" \
" " insn " %1, %3\n" \ " " insn " %1, %2\n" \
"2:\n" \ "2:\n" \
" .section .fixup,\"ax\"\n" \ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \
" .balign 4\n" \ : "+r" (err), "=&r" (__x) \
"3:\n" \ : "m" (*(ptr))); \
" li %0, %4\n" \
" li %1, 0\n" \
" jump 2b, %2\n" \
" .previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "+r" (err), "=&r" (__x), "=r" (__tmp) \
: "m" (*(ptr)), "i" (-EFAULT)); \
(x) = __x; \ (x) = __x; \
} while (0) } while (0)
...@@ -108,27 +100,18 @@ do { \ ...@@ -108,27 +100,18 @@ do { \
do { \ do { \
u32 __user *__ptr = (u32 __user *)(ptr); \ u32 __user *__ptr = (u32 __user *)(ptr); \
u32 __lo, __hi; \ u32 __lo, __hi; \
uintptr_t __tmp; \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
"1:\n" \ "1:\n" \
" lw %1, %4\n" \ " lw %1, %3\n" \
"2:\n" \ "2:\n" \
" lw %2, %5\n" \ " lw %2, %4\n" \
"3:\n" \ "3:\n" \
" .section .fixup,\"ax\"\n" \ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \
" .balign 4\n" \ _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \
"4:\n" \ : "+r" (err), "=&r" (__lo), "=r" (__hi) \
" li %0, %6\n" \ : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \
" li %1, 0\n" \ if (err) \
" li %2, 0\n" \ __hi = 0; \
" jump 3b, %3\n" \
" .previous\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "+r" (err), "=&r" (__lo), "=r" (__hi), \
"=r" (__tmp) \
: "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
"i" (-EFAULT)); \
(x) = (__typeof__(x))((__typeof__((x)-(x)))( \ (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
(((u64)__hi << 32) | __lo))); \ (((u64)__hi << 32) | __lo))); \
} while (0) } while (0)
...@@ -216,21 +199,14 @@ do { \ ...@@ -216,21 +199,14 @@ do { \
#define __put_user_asm(insn, x, ptr, err) \ #define __put_user_asm(insn, x, ptr, err) \
do { \ do { \
uintptr_t __tmp; \
__typeof__(*(ptr)) __x = x; \ __typeof__(*(ptr)) __x = x; \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
"1:\n" \ "1:\n" \
" " insn " %z3, %2\n" \ " " insn " %z2, %1\n" \
"2:\n" \ "2:\n" \
" .section .fixup,\"ax\"\n" \ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \
" .balign 4\n" \ : "+r" (err), "=m" (*(ptr)) \
"3:\n" \ : "rJ" (__x)); \
" li %0, %4\n" \
" jump 2b, %1\n" \
" .previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
: "rJ" (__x), "i" (-EFAULT)); \
} while (0) } while (0)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
...@@ -241,25 +217,18 @@ do { \ ...@@ -241,25 +217,18 @@ do { \
do { \ do { \
u32 __user *__ptr = (u32 __user *)(ptr); \ u32 __user *__ptr = (u32 __user *)(ptr); \
u64 __x = (__typeof__((x)-(x)))(x); \ u64 __x = (__typeof__((x)-(x)))(x); \
uintptr_t __tmp; \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
"1:\n" \ "1:\n" \
" sw %z4, %2\n" \ " sw %z3, %1\n" \
"2:\n" \ "2:\n" \
" sw %z5, %3\n" \ " sw %z4, %2\n" \
"3:\n" \ "3:\n" \
" .section .fixup,\"ax\"\n" \ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
" .balign 4\n" \ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
"4:\n" \ : "+r" (err), \
" li %0, %6\n" \
" jump 3b, %1\n" \
" .previous\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "+r" (err), "=r" (__tmp), \
"=m" (__ptr[__LSW]), \ "=m" (__ptr[__LSW]), \
"=m" (__ptr[__MSW]) \ "=m" (__ptr[__MSW]) \
: "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \ : "rJ" (__x), "rJ" (__x >> 32)); \
} while (0) } while (0)
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
......
...@@ -7,10 +7,12 @@ ...@@ -7,10 +7,12 @@
*/ */
#include <linux/bitfield.h>
#include <linux/extable.h> #include <linux/extable.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/asm-extable.h> #include <asm/asm-extable.h>
#include <asm/ptrace.h>
static inline unsigned long static inline unsigned long
get_ex_fixup(const struct exception_table_entry *ex) get_ex_fixup(const struct exception_table_entry *ex)
...@@ -25,6 +27,29 @@ static bool ex_handler_fixup(const struct exception_table_entry *ex, ...@@ -25,6 +27,29 @@ static bool ex_handler_fixup(const struct exception_table_entry *ex,
return true; return true;
} }
static inline void regs_set_gpr(struct pt_regs *regs, unsigned int offset,
unsigned long val)
{
if (unlikely(offset > MAX_REG_OFFSET))
return;
if (!offset)
*(unsigned long *)((unsigned long)regs + offset) = val;
}
static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
regs_set_gpr(regs, reg_err, -EFAULT);
regs_set_gpr(regs, reg_zero, 0);
regs->epc = get_ex_fixup(ex);
return true;
}
bool fixup_exception(struct pt_regs *regs) bool fixup_exception(struct pt_regs *regs)
{ {
const struct exception_table_entry *ex; const struct exception_table_entry *ex;
...@@ -38,6 +63,8 @@ bool fixup_exception(struct pt_regs *regs) ...@@ -38,6 +63,8 @@ bool fixup_exception(struct pt_regs *regs)
return ex_handler_fixup(ex, regs); return ex_handler_fixup(ex, regs);
case EX_TYPE_BPF: case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs); return ex_handler_bpf(ex, regs);
case EX_TYPE_UACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
} }
BUG(); BUG();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment