Commit 99fe09c8 authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/extable' into for-next/core

* for-next/extable:
  arm64: vmlinux.lds.S: remove `.fixup` section
  arm64: extable: add load_unaligned_zeropad() handler
  arm64: extable: add a dedicated uaccess handler
  arm64: extable: add `type` and `data` fields
  arm64: extable: use `ex` for `exception_table_entry`
  arm64: extable: make fixup_exception() return bool
  arm64: extable: consolidate definitions
  arm64: gpr-num: support W registers
  arm64: factor out GPR numbering helpers
  arm64: kvm: use kvm_exception_table_entry
  arm64: lib: __arch_copy_to_user(): fold fixups into body
  arm64: lib: __arch_copy_from_user(): fold fixups into body
  arm64: lib: __arch_clear_user(): fold fixups into body
parents a69483ee bf6e667f
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_ASM_EXTABLE_H
#define __ASM_ASM_EXTABLE_H
#define EX_TYPE_NONE 0
#define EX_TYPE_FIXUP 1
#define EX_TYPE_BPF 2
#define EX_TYPE_UACCESS_ERR_ZERO 3
#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
#ifdef __ASSEMBLY__
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
.pushsection __ex_table, "a"; \
.align 2; \
.long ((insn) - .); \
.long ((fixup) - .); \
.short (type); \
.short (data); \
.popsection;
/*
* Create an exception table entry for `insn`, which will branch to `fixup`
* when an unhandled fault is taken.
*/
.macro _asm_extable, insn, fixup
__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
.endm
/*
* Create an exception table entry for `insn` if `fixup` is provided. Otherwise
* do nothing.
*/
.macro _cond_extable, insn, fixup
.ifnc \fixup,
_asm_extable \insn, \fixup
.endif
.endm
#else /* __ASSEMBLY__ */
#include <linux/bits.h>
#include <linux/stringify.h>
#include <asm/gpr-num.h>
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
".pushsection __ex_table, \"a\"\n" \
".align 2\n" \
".long ((" insn ") - .)\n" \
".long ((" fixup ") - .)\n" \
".short (" type ")\n" \
".short (" data ")\n" \
".popsection\n"
#define _ASM_EXTABLE(insn, fixup) \
__ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
#define EX_DATA_REG_ERR_SHIFT 0
#define EX_DATA_REG_ERR GENMASK(4, 0)
#define EX_DATA_REG_ZERO_SHIFT 5
#define EX_DATA_REG_ZERO GENMASK(9, 5)
#define EX_DATA_REG(reg, gpr) \
"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \
__DEFINE_ASM_GPR_NUMS \
__ASM_EXTABLE_RAW(#insn, #fixup, \
__stringify(EX_TYPE_UACCESS_ERR_ZERO), \
"(" \
EX_DATA_REG(ERR, err) " | " \
EX_DATA_REG(ZERO, zero) \
")")
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
#define EX_DATA_REG_DATA_SHIFT 0
#define EX_DATA_REG_DATA GENMASK(4, 0)
#define EX_DATA_REG_ADDR_SHIFT 5
#define EX_DATA_REG_ADDR GENMASK(9, 5)
#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \
__DEFINE_ASM_GPR_NUMS \
__ASM_EXTABLE_RAW(#insn, #fixup, \
__stringify(EX_TYPE_LOAD_UNALIGNED_ZEROPAD), \
"(" \
EX_DATA_REG(DATA, data) " | " \
EX_DATA_REG(ADDR, addr) \
")")
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ASM_EXTABLE_H */
......@@ -3,10 +3,11 @@
#define __ASM_ASM_UACCESS_H
#include <asm/alternative-macros.h>
#include <asm/asm-extable.h>
#include <asm/assembler.h>
#include <asm/kernel-pgtable.h>
#include <asm/mmu.h>
#include <asm/sysreg.h>
#include <asm/assembler.h>
/*
* User access enabling/disabling macros.
......@@ -58,6 +59,10 @@ alternative_else_nop_endif
.endm
#endif
#define USER(l, x...) \
9999: x; \
_asm_extable 9999b, l
/*
* Generate the assembly for LDTR/STTR with exception table entries.
* This is complicated as there is no post-increment or pair versions of the
......
......@@ -14,9 +14,10 @@
#include <asm-generic/export.h>
#include <asm/asm-offsets.h>
#include <asm/alternative.h>
#include <asm/asm-bug.h>
#include <asm/asm-extable.h>
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/cputype.h>
#include <asm/debug-monitors.h>
......@@ -129,32 +130,6 @@ alternative_endif
.endr
.endm
/*
* Create an exception table entry for `insn`, which will branch to `fixup`
* when an unhandled fault is taken.
*/
.macro _asm_extable, insn, fixup
.pushsection __ex_table, "a"
.align 3
.long (\insn - .), (\fixup - .)
.popsection
.endm
/*
* Create an exception table entry for `insn` if `fixup` is provided. Otherwise
* do nothing.
*/
.macro _cond_extable, insn, fixup
.ifnc \fixup,
_asm_extable \insn, \fixup
.endif
.endm
#define USER(l, x...) \
9999: x; \
_asm_extable 9999b, l
/*
* Register aliases.
*/
......
......@@ -18,10 +18,21 @@
struct exception_table_entry
{
int insn, fixup;
short type, data;
};
#define ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex_entry_fixup(a, b, tmp, delta) \
do { \
(a)->fixup = (b)->fixup + (delta); \
(b)->fixup = (tmp).fixup - (delta); \
(a)->type = (b)->type; \
(b)->type = (tmp).type; \
(a)->data = (b)->data; \
(b)->data = (tmp).data; \
} while (0)
static inline bool in_bpf_jit(struct pt_regs *regs)
{
if (!IS_ENABLED(CONFIG_BPF_JIT))
......@@ -32,16 +43,16 @@ static inline bool in_bpf_jit(struct pt_regs *regs)
}
#ifdef CONFIG_BPF_JIT
int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
struct pt_regs *regs);
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs);
#else /* !CONFIG_BPF_JIT */
static inline
int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
struct pt_regs *regs)
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
return 0;
return false;
}
#endif /* !CONFIG_BPF_JIT */
extern int fixup_exception(struct pt_regs *regs);
bool fixup_exception(struct pt_regs *regs);
#endif
......@@ -25,19 +25,14 @@ do { \
" cbz %w0, 3f\n" \
" sub %w4, %w4, %w0\n" \
" cbnz %w4, 1b\n" \
" mov %w0, %w7\n" \
" mov %w0, %w6\n" \
"3:\n" \
" dmb ish\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %w0, %w6\n" \
" b 3b\n" \
" .popsection\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w0) \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \
"+r" (loops) \
: "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
: "r" (oparg), "Ir" (-EAGAIN) \
: "memory"); \
uaccess_disable_privileged(); \
} while (0)
......@@ -105,18 +100,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
" cbz %w3, 3f\n"
" sub %w4, %w4, %w3\n"
" cbnz %w4, 1b\n"
" mov %w0, %w8\n"
" mov %w0, %w7\n"
"3:\n"
" dmb ish\n"
"4:\n"
" .pushsection .fixup,\"ax\"\n"
"5: mov %w0, %w7\n"
" b 4b\n"
" .popsection\n"
_ASM_EXTABLE(1b, 5b)
_ASM_EXTABLE(2b, 5b)
_ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0)
_ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0)
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
: "r" (oldval), "r" (newval), "Ir" (-EAGAIN)
: "memory");
uaccess_disable_privileged();
......
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_GPR_NUM_H
#define __ASM_GPR_NUM_H
#ifdef __ASSEMBLY__
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
.equ .L__gpr_num_x\num, \num
.equ .L__gpr_num_w\num, \num
.endr
.equ .L__gpr_num_xzr, 31
.equ .L__gpr_num_wzr, 31
#else /* __ASSEMBLY__ */
#define __DEFINE_ASM_GPR_NUMS \
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
" .equ .L__gpr_num_x\\num, \\num\n" \
" .equ .L__gpr_num_w\\num, \\num\n" \
" .endr\n" \
" .equ .L__gpr_num_xzr, 31\n" \
" .equ .L__gpr_num_wzr, 31\n"
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GPR_NUM_H */
......@@ -263,9 +263,10 @@ extern u64 __kvm_get_mdcr_el2(void);
/*
* KVM extable for unexpected exceptions.
* In the same format _asm_extable, but output to a different section so that
* it can be mapped to EL2. The KVM version is not sorted. The caller must
* ensure:
* Create a struct kvm_exception_table_entry output to a section that can be
* mapped by EL2. The table is not sorted.
*
* The caller must ensure:
* x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
* code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
*/
......
......@@ -13,6 +13,8 @@
#include <linux/stringify.h>
#include <linux/kasan-tags.h>
#include <asm/gpr-num.h>
/*
* ARMv8 ARM reserves the following encoding for system registers:
* (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
......@@ -1195,17 +1197,12 @@
#ifdef __ASSEMBLY__
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
.equ .L__reg_num_x\num, \num
.endr
.equ .L__reg_num_xzr, 31
.macro mrs_s, rt, sreg
__emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
__emit_inst(0xd5200000|(\sreg)|(.L__gpr_num_\rt))
.endm
.macro msr_s, sreg, rt
__emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt))
__emit_inst(0xd5000000|(\sreg)|(.L__gpr_num_\rt))
.endm
#else
......@@ -1214,22 +1211,16 @@
#include <linux/types.h>
#include <asm/alternative.h>
#define __DEFINE_MRS_MSR_S_REGNUM \
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
" .equ .L__reg_num_x\\num, \\num\n" \
" .endr\n" \
" .equ .L__reg_num_xzr, 31\n"
#define DEFINE_MRS_S \
__DEFINE_MRS_MSR_S_REGNUM \
__DEFINE_ASM_GPR_NUMS \
" .macro mrs_s, rt, sreg\n" \
__emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) \
__emit_inst(0xd5200000|(\\sreg)|(.L__gpr_num_\\rt)) \
" .endm\n"
#define DEFINE_MSR_S \
__DEFINE_MRS_MSR_S_REGNUM \
__DEFINE_ASM_GPR_NUMS \
" .macro msr_s, sreg, rt\n" \
__emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) \
__emit_inst(0xd5000000|(\\sreg)|(.L__gpr_num_\\rt)) \
" .endm\n"
#define UNDEFINE_MRS_S \
......
......@@ -18,6 +18,7 @@
#include <linux/kasan-checks.h>
#include <linux/string.h>
#include <asm/asm-extable.h>
#include <asm/cpufeature.h>
#include <asm/mmu.h>
#include <asm/mte.h>
......@@ -70,12 +71,6 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
#define access_ok(addr, size) __range_ok(addr, size)
#define _ASM_EXTABLE(from, to) \
" .pushsection __ex_table, \"a\"\n" \
" .align 3\n" \
" .long (" #from " - .), (" #to " - .)\n" \
" .popsection\n"
/*
* User access enabling/disabling.
*/
......@@ -260,15 +255,9 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
asm volatile( \
"1: " load " " reg "1, [%2]\n" \
"2:\n" \
" .section .fixup, \"ax\"\n" \
" .align 2\n" \
"3: mov %w0, %3\n" \
" mov %1, #0\n" \
" b 2b\n" \
" .previous\n" \
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT))
: "r" (addr))
#define __raw_get_mem(ldr, x, ptr, err) \
do { \
......@@ -337,14 +326,9 @@ do { \
asm volatile( \
"1: " store " " reg "1, [%2]\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %w0, %3\n" \
" b 2b\n" \
" .previous\n" \
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \
: "+r" (err) \
: "r" (x), "r" (addr), "i" (-EFAULT))
: "r" (x), "r" (addr))
#define __raw_put_mem(str, x, ptr, err) \
do { \
......
......@@ -53,29 +53,16 @@ static inline unsigned long find_zero(unsigned long mask)
*/
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long ret, tmp;
unsigned long ret;
__uaccess_enable_tco_async();
/* Load word from unaligned pointer addr */
asm(
"1: ldr %0, %3\n"
"1: ldr %0, %2\n"
"2:\n"
" .pushsection .fixup,\"ax\"\n"
" .align 2\n"
"3: bic %1, %2, #0x7\n"
" ldr %0, [%1]\n"
" and %1, %2, #0x7\n"
" lsl %1, %1, #0x3\n"
#ifndef __AARCH64EB__
" lsr %0, %0, %1\n"
#else
" lsl %0, %0, %1\n"
#endif
" b 2b\n"
" .popsection\n"
_ASM_EXTABLE(1b, 3b)
: "=&r" (ret), "=&r" (tmp)
_ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(1b, 2b, %0, %1)
: "=&r" (ret)
: "r" (addr), "Q" (*(unsigned long *)addr));
__uaccess_disable_tco_async();
......
......@@ -279,7 +279,7 @@ static void __init register_insn_emulation_sysctl(void)
do { \
uaccess_enable_privileged(); \
__asm__ __volatile__( \
" mov %w3, %w7\n" \
" mov %w3, %w6\n" \
"0: ldxr"B" %w2, [%4]\n" \
"1: stxr"B" %w0, %w1, [%4]\n" \
" cbz %w0, 2f\n" \
......@@ -290,16 +290,10 @@ do { \
"2:\n" \
" mov %w1, %w2\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %w0, %w6\n" \
" b 3b\n" \
" .popsection" \
_ASM_EXTABLE(0b, 4b) \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE_UACCESS_ERR(0b, 3b, %w0) \
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \
: "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
: "r" ((unsigned long)addr), "i" (-EAGAIN), \
"i" (-EFAULT), \
"i" (__SWP_LL_SC_LOOPS) \
: "memory"); \
uaccess_disable_privileged(); \
......
......@@ -527,14 +527,9 @@ NOKPROBE_SYMBOL(do_ptrauth_fault);
"1: " insn ", %1\n" \
" mov %w0, #0\n" \
"2:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %w0, %w2\n" \
" b 2b\n" \
" .popsection\n" \
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \
: "=r" (res) \
: "r" (address), "i" (-EFAULT)); \
: "r" (address)); \
uaccess_ttbr0_disable(); \
}
......
......@@ -57,7 +57,7 @@
#define SBSS_ALIGN 0
#endif
#define RO_EXCEPTION_TABLE_ALIGN 8
#define RO_EXCEPTION_TABLE_ALIGN 4
#define RUNTIME_DISCARD_EXIT
#include <asm-generic/vmlinux.lds.h>
......@@ -161,7 +161,6 @@ SECTIONS
IDMAP_TEXT
HIBERNATE_TEXT
TRAMP_TEXT
*(.fixup)
*(.gnu.warning)
. = ALIGN(16);
*(.got) /* Global offset table */
......
......@@ -30,8 +30,12 @@
#include <asm/processor.h>
#include <asm/thread_info.h>
extern struct exception_table_entry __start___kvm_ex_table;
extern struct exception_table_entry __stop___kvm_ex_table;
struct kvm_exception_table_entry {
int insn, fixup;
};
extern struct kvm_exception_table_entry __start___kvm_ex_table;
extern struct kvm_exception_table_entry __stop___kvm_ex_table;
/* Check whether the FP regs were dirtied while in the host-side run loop: */
static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
......@@ -510,7 +514,7 @@ static inline void __kvm_unexpected_el2_exception(void)
{
extern char __guest_exit_panic[];
unsigned long addr, fixup;
struct exception_table_entry *entry, *end;
struct kvm_exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2);
entry = &__start___kvm_ex_table;
......
......@@ -4,7 +4,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-uaccess.h>
.text
......@@ -45,13 +45,11 @@ USER(9f, sttrh wzr, [x0])
USER(7f, sttrb wzr, [x2, #-1])
5: mov x0, #0
ret
SYM_FUNC_END(__arch_clear_user)
EXPORT_SYMBOL(__arch_clear_user)
.section .fixup,"ax"
.align 2
// Exception fixups
7: sub x0, x2, #5 // Adjust for faulting on the final byte...
8: add x0, x0, #4 // ...or the second word of the 4-7 byte case
9: sub x0, x2, x0
ret
.previous
SYM_FUNC_END(__arch_clear_user)
EXPORT_SYMBOL(__arch_clear_user)
......@@ -60,11 +60,8 @@ SYM_FUNC_START(__arch_copy_from_user)
#include "copy_template.S"
mov x0, #0 // Nothing to copy
ret
SYM_FUNC_END(__arch_copy_from_user)
EXPORT_SYMBOL(__arch_copy_from_user)
.section .fixup,"ax"
.align 2
// Exception fixups
9997: cmp dst, dstin
b.ne 9998f
// Before being absolutely sure we couldn't copy anything, try harder
......@@ -72,4 +69,5 @@ USER(9998f, ldtrb tmp1w, [srcin])
strb tmp1w, [dst], #1
9998: sub x0, end, dst // bytes not copied
ret
.previous
SYM_FUNC_END(__arch_copy_from_user)
EXPORT_SYMBOL(__arch_copy_from_user)
......@@ -59,11 +59,8 @@ SYM_FUNC_START(__arch_copy_to_user)
#include "copy_template.S"
mov x0, #0
ret
SYM_FUNC_END(__arch_copy_to_user)
EXPORT_SYMBOL(__arch_copy_to_user)
.section .fixup,"ax"
.align 2
// Exception fixups
9997: cmp dst, dstin
b.ne 9998f
// Before being absolutely sure we couldn't copy anything, try harder
......@@ -72,4 +69,5 @@ USER(9998f, sttrb tmp1w, [dst])
add dst, dst, #1
9998: sub x0, end, dst // bytes not copied
ret
.previous
SYM_FUNC_END(__arch_copy_to_user)
EXPORT_SYMBOL(__arch_copy_to_user)
......@@ -3,20 +3,87 @@
* Based on arch/arm/mm/extable.c
*/
#include <linux/bitfield.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
int fixup_exception(struct pt_regs *regs)
#include <asm/asm-extable.h>
#include <asm/ptrace.h>
typedef bool (*ex_handler_t)(const struct exception_table_entry *,
struct pt_regs *);
static inline unsigned long
get_ex_fixup(const struct exception_table_entry *ex)
{
return ((unsigned long)&ex->fixup + ex->fixup);
}
static bool ex_handler_fixup(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
regs->pc = get_ex_fixup(ex);
return true;
}
static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
pt_regs_write_reg(regs, reg_err, -EFAULT);
pt_regs_write_reg(regs, reg_zero, 0);
regs->pc = get_ex_fixup(ex);
return true;
}
static bool
ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->type);
int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->type);
unsigned long data, addr, offset;
addr = pt_regs_read_reg(regs, reg_addr);
offset = addr & 0x7UL;
addr &= ~0x7UL;
data = *(unsigned long*)addr;
#ifndef __AARCH64EB__
data >>= 8 * offset;
#else
data <<= 8 * offset;
#endif
pt_regs_write_reg(regs, reg_data, data);
regs->pc = get_ex_fixup(ex);
return true;
}
bool fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
const struct exception_table_entry *ex;
fixup = search_exception_tables(instruction_pointer(regs));
if (!fixup)
return 0;
ex = search_exception_tables(instruction_pointer(regs));
if (!ex)
return false;
if (in_bpf_jit(regs))
return arm64_bpf_fixup_exception(fixup, regs);
switch (ex->type) {
case EX_TYPE_FIXUP:
return ex_handler_fixup(ex, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
case EX_TYPE_UACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
return ex_handler_load_unaligned_zeropad(ex, regs);
}
regs->pc = (unsigned long)&fixup->fixup + fixup->fixup;
return 1;
BUG();
}
......@@ -13,6 +13,7 @@
#include <linux/printk.h>
#include <linux/slab.h>
#include <asm/asm-extable.h>
#include <asm/byteorder.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
......@@ -358,15 +359,15 @@ static void build_epilogue(struct jit_ctx *ctx)
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
struct pt_regs *regs)
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
regs->regs[dst_reg] = 0;
regs->pc = (unsigned long)&ex->fixup - offset;
return 1;
return true;
}
/* For accesses to BTF pointers, add an entry to the exception table */
......@@ -412,6 +413,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
ex->type = EX_TYPE_BPF;
ctx->exentry_idx++;
return 0;
}
......
......@@ -231,6 +231,34 @@ static void sort_relative_table(char *extab_image, int image_size)
}
}
static void arm64_sort_relative_table(char *extab_image, int image_size)
{
int i = 0;
while (i < image_size) {
uint32_t *loc = (uint32_t *)(extab_image + i);
w(r(loc) + i, loc);
w(r(loc + 1) + i + 4, loc + 1);
/* Don't touch the fixup type or data */
i += sizeof(uint32_t) * 3;
}
qsort(extab_image, image_size / 12, 12, compare_relative_table);
i = 0;
while (i < image_size) {
uint32_t *loc = (uint32_t *)(extab_image + i);
w(r(loc) - i, loc);
w(r(loc + 1) - (i + 4), loc + 1);
/* Don't touch the fixup type or data */
i += sizeof(uint32_t) * 3;
}
}
static void x86_sort_relative_table(char *extab_image, int image_size)
{
int i = 0;
......@@ -343,6 +371,8 @@ static int do_file(char const *const fname, void *addr)
custom_sort = s390_sort_relative_table;
break;
case EM_AARCH64:
custom_sort = arm64_sort_relative_table;
break;
case EM_PARISC:
case EM_PPC:
case EM_PPC64:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment