Commit 1270dad3 authored by Oliver Upton's avatar Oliver Upton

Merge branch kvm-arm64/el2-kcfi into kvmarm/next

* kvm-arm64/el2-kcfi:
  : kCFI support in the EL2 hypervisor, courtesy of Pierre-Clément Tosi
  :
  : Enable the usage fo CONFIG_CFI_CLANG (kCFI) for hardening indirect
  : branches in the EL2 hypervisor. Unlike kernel support for the feature,
  : CFI failures at EL2 are always fatal.
  KVM: arm64: nVHE: Support CONFIG_CFI_CLANG at EL2
  KVM: arm64: Introduce print_nvhe_hyp_panic helper
  arm64: Introduce esr_brk_comment, esr_is_cfi_brk
  KVM: arm64: VHE: Mark __hyp_call_panic __noreturn
  KVM: arm64: nVHE: gen-hyprel: Skip R_AARCH64_ABS32
  KVM: arm64: nVHE: Simplify invalid_host_el2_vect
  KVM: arm64: Fix __pkvm_init_switch_pgd call ABI
  KVM: arm64: Fix clobbered ELR in sync abort/SError
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parents 377d0e5d eca4ba5b
...@@ -380,6 +380,11 @@ ...@@ -380,6 +380,11 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/types.h> #include <asm/types.h>
static inline unsigned long esr_brk_comment(unsigned long esr)
{
return esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
}
static inline bool esr_is_data_abort(unsigned long esr) static inline bool esr_is_data_abort(unsigned long esr)
{ {
const unsigned long ec = ESR_ELx_EC(esr); const unsigned long ec = ESR_ELx_EC(esr);
...@@ -387,6 +392,12 @@ static inline bool esr_is_data_abort(unsigned long esr) ...@@ -387,6 +392,12 @@ static inline bool esr_is_data_abort(unsigned long esr)
return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR; return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
} }
static inline bool esr_is_cfi_brk(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
(esr_brk_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE;
}
static inline bool esr_fsc_is_translation_fault(unsigned long esr) static inline bool esr_fsc_is_translation_fault(unsigned long esr)
{ {
/* Translation fault, level -1 */ /* Translation fault, level -1 */
......
...@@ -124,8 +124,8 @@ void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr, ...@@ -124,8 +124,8 @@ void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
#endif #endif
#ifdef __KVM_NVHE_HYPERVISOR__ #ifdef __KVM_NVHE_HYPERVISOR__
void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size, void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,
phys_addr_t pgd, void *sp, void *cont_fn); void (*fn)(void));
int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
unsigned long *per_cpu_base, u32 hyp_va_bits); unsigned long *per_cpu_base, u32 hyp_va_bits);
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
......
...@@ -128,6 +128,7 @@ int main(void) ...@@ -128,6 +128,7 @@ int main(void)
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs));
DEFINE(CPU_ELR_EL2, offsetof(struct kvm_cpu_context, sys_regs[ELR_EL2]));
DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1])); DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1]));
DEFINE(CPU_GCR_EL1, offsetof(struct kvm_cpu_context, sys_regs[GCR_EL1])); DEFINE(CPU_GCR_EL1, offsetof(struct kvm_cpu_context, sys_regs[GCR_EL1]));
DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1])); DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1]));
......
...@@ -312,9 +312,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned long esr) ...@@ -312,9 +312,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned long esr)
* entirely not preemptible, and we can use rcu list safely here. * entirely not preemptible, and we can use rcu list safely here.
*/ */
list_for_each_entry_rcu(hook, list, node) { list_for_each_entry_rcu(hook, list, node) {
unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; if ((esr_brk_comment(esr) & ~hook->mask) == hook->imm)
if ((comment & ~hook->mask) == hook->imm)
fn = hook->fn; fn = hook->fn;
} }
......
...@@ -1105,8 +1105,6 @@ static struct break_hook ubsan_break_hook = { ...@@ -1105,8 +1105,6 @@ static struct break_hook ubsan_break_hook = {
}; };
#endif #endif
#define esr_comment(esr) ((esr) & ESR_ELx_BRK64_ISS_COMMENT_MASK)
/* /*
* Initial handler for AArch64 BRK exceptions * Initial handler for AArch64 BRK exceptions
* This handler only used until debug_traps_init(). * This handler only used until debug_traps_init().
...@@ -1115,15 +1113,15 @@ int __init early_brk64(unsigned long addr, unsigned long esr, ...@@ -1115,15 +1113,15 @@ int __init early_brk64(unsigned long addr, unsigned long esr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
#ifdef CONFIG_CFI_CLANG #ifdef CONFIG_CFI_CLANG
if ((esr_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE) if (esr_is_cfi_brk(esr))
return cfi_handler(regs, esr) != DBG_HOOK_HANDLED; return cfi_handler(regs, esr) != DBG_HOOK_HANDLED;
#endif #endif
#ifdef CONFIG_KASAN_SW_TAGS #ifdef CONFIG_KASAN_SW_TAGS
if ((esr_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) if ((esr_brk_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
#endif #endif
#ifdef CONFIG_UBSAN_TRAP #ifdef CONFIG_UBSAN_TRAP
if ((esr_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM) if ((esr_brk_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM)
return ubsan_handler(regs, esr) != DBG_HOOK_HANDLED; return ubsan_handler(regs, esr) != DBG_HOOK_HANDLED;
#endif #endif
return bug_handler(regs, esr) != DBG_HOOK_HANDLED; return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
......
...@@ -411,6 +411,20 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index) ...@@ -411,6 +411,20 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu)); kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
} }
static void print_nvhe_hyp_panic(const char *name, u64 panic_addr)
{
kvm_err("nVHE hyp %s at: [<%016llx>] %pB!\n", name, panic_addr,
(void *)(panic_addr + kaslr_offset()));
}
static void kvm_nvhe_report_cfi_failure(u64 panic_addr)
{
print_nvhe_hyp_panic("CFI failure", panic_addr);
if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
kvm_err(" (CONFIG_CFI_PERMISSIVE ignored for hyp failures)\n");
}
void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
u64 elr_virt, u64 elr_phys, u64 elr_virt, u64 elr_phys,
u64 par, uintptr_t vcpu, u64 par, uintptr_t vcpu,
...@@ -423,7 +437,7 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, ...@@ -423,7 +437,7 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) { if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
kvm_err("Invalid host exception to nVHE hyp!\n"); kvm_err("Invalid host exception to nVHE hyp!\n");
} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 && } else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
(esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) { esr_brk_comment(esr) == BUG_BRK_IMM) {
const char *file = NULL; const char *file = NULL;
unsigned int line = 0; unsigned int line = 0;
...@@ -439,11 +453,11 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, ...@@ -439,11 +453,11 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
if (file) if (file)
kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line); kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
else else
kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", panic_addr, print_nvhe_hyp_panic("BUG", panic_addr);
(void *)(panic_addr + kaslr_offset())); } else if (IS_ENABLED(CONFIG_CFI_CLANG) && esr_is_cfi_brk(esr)) {
kvm_nvhe_report_cfi_failure(panic_addr);
} else { } else {
kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", panic_addr, print_nvhe_hyp_panic("panic", panic_addr);
(void *)(panic_addr + kaslr_offset()));
} }
/* Dump the nVHE hypervisor backtrace */ /* Dump the nVHE hypervisor backtrace */
......
...@@ -83,6 +83,14 @@ alternative_else_nop_endif ...@@ -83,6 +83,14 @@ alternative_else_nop_endif
eret eret
sb sb
SYM_INNER_LABEL(__guest_exit_restore_elr_and_panic, SYM_L_GLOBAL)
// x2-x29,lr: vcpu regs
// vcpu x0-x1 on the stack
adr_this_cpu x0, kvm_hyp_ctxt, x1
ldr x0, [x0, #CPU_ELR_EL2]
msr elr_el2, x0
SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL) SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
// x2-x29,lr: vcpu regs // x2-x29,lr: vcpu regs
// vcpu x0-x1 on the stack // vcpu x0-x1 on the stack
......
...@@ -693,7 +693,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -693,7 +693,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline void __kvm_unexpected_el2_exception(void) static inline void __kvm_unexpected_el2_exception(void)
{ {
extern char __guest_exit_panic[]; extern char __guest_exit_restore_elr_and_panic[];
unsigned long addr, fixup; unsigned long addr, fixup;
struct kvm_exception_table_entry *entry, *end; struct kvm_exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2); unsigned long elr_el2 = read_sysreg(elr_el2);
...@@ -715,7 +715,8 @@ static inline void __kvm_unexpected_el2_exception(void) ...@@ -715,7 +715,8 @@ static inline void __kvm_unexpected_el2_exception(void)
} }
/* Trigger a panic after restoring the hyp context. */ /* Trigger a panic after restoring the hyp context. */
write_sysreg(__guest_exit_panic, elr_el2); this_cpu_ptr(&kvm_hyp_ctxt)->sys_regs[ELR_EL2] = elr_el2;
write_sysreg(__guest_exit_restore_elr_and_panic, elr_el2);
} }
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */ #endif /* __ARM64_KVM_HYP_SWITCH_H__ */
...@@ -89,9 +89,9 @@ quiet_cmd_hyprel = HYPREL $@ ...@@ -89,9 +89,9 @@ quiet_cmd_hyprel = HYPREL $@
quiet_cmd_hypcopy = HYPCOPY $@ quiet_cmd_hypcopy = HYPCOPY $@
cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ $< $@ cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ $< $@
# Remove ftrace, Shadow Call Stack, and CFI CFLAGS. # Remove ftrace and Shadow Call Stack CFLAGS.
# This is equivalent to the 'notrace', '__noscs', and '__nocfi' annotations. # This is equivalent to the 'notrace' and '__noscs' annotations.
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI), $(KBUILD_CFLAGS)) KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
# Starting from 13.0.0 llvm emits SHT_REL section '.llvm.call-graph-profile' # Starting from 13.0.0 llvm emits SHT_REL section '.llvm.call-graph-profile'
# when profile optimization is applied. gen-hyprel does not support SHT_REL and # when profile optimization is applied. gen-hyprel does not support SHT_REL and
# causes a build failure. Remove profile optimization flags. # causes a build failure. Remove profile optimization flags.
......
...@@ -50,6 +50,9 @@ ...@@ -50,6 +50,9 @@
#ifndef R_AARCH64_ABS64 #ifndef R_AARCH64_ABS64
#define R_AARCH64_ABS64 257 #define R_AARCH64_ABS64 257
#endif #endif
#ifndef R_AARCH64_ABS32
#define R_AARCH64_ABS32 258
#endif
#ifndef R_AARCH64_PREL64 #ifndef R_AARCH64_PREL64
#define R_AARCH64_PREL64 260 #define R_AARCH64_PREL64 260
#endif #endif
...@@ -383,6 +386,9 @@ static void emit_rela_section(Elf64_Shdr *sh_rela) ...@@ -383,6 +386,9 @@ static void emit_rela_section(Elf64_Shdr *sh_rela)
case R_AARCH64_ABS64: case R_AARCH64_ABS64:
emit_rela_abs64(rela, sh_orig_name); emit_rela_abs64(rela, sh_orig_name);
break; break;
/* Allow 32-bit absolute relocation, for kCFI type hashes. */
case R_AARCH64_ABS32:
break;
/* Allow position-relative data relocations. */ /* Allow position-relative data relocations. */
case R_AARCH64_PREL64: case R_AARCH64_PREL64:
case R_AARCH64_PREL32: case R_AARCH64_PREL32:
......
...@@ -197,12 +197,6 @@ SYM_FUNC_END(__host_hvc) ...@@ -197,12 +197,6 @@ SYM_FUNC_END(__host_hvc)
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
/* If a guest is loaded, panic out of it. */
stp x0, x1, [sp, #-16]!
get_loaded_vcpu x0, x1
cbnz x0, __guest_exit_panic
add sp, sp, #16
/* /*
* The panic may not be clean if the exception is taken before the host * The panic may not be clean if the exception is taken before the host
* context has been saved by __host_exit or after the hyp context has * context has been saved by __host_exit or after the hyp context has
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
*/ */
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
#include <linux/cfi_types.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative.h> #include <asm/alternative.h>
...@@ -265,33 +266,38 @@ alternative_else_nop_endif ...@@ -265,33 +266,38 @@ alternative_else_nop_endif
SYM_CODE_END(__kvm_handle_stub_hvc) SYM_CODE_END(__kvm_handle_stub_hvc)
SYM_FUNC_START(__pkvm_init_switch_pgd) /*
* void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,
* void (*fn)(void));
*
* SYM_TYPED_FUNC_START() allows C to call this ID-mapped function indirectly
* using a physical pointer without triggering a kCFI failure.
*/
SYM_TYPED_FUNC_START(__pkvm_init_switch_pgd)
/* Turn the MMU off */ /* Turn the MMU off */
pre_disable_mmu_workaround pre_disable_mmu_workaround
mrs x2, sctlr_el2 mrs x3, sctlr_el2
bic x3, x2, #SCTLR_ELx_M bic x4, x3, #SCTLR_ELx_M
msr sctlr_el2, x3 msr sctlr_el2, x4
isb isb
tlbi alle2 tlbi alle2
/* Install the new pgtables */ /* Install the new pgtables */
ldr x3, [x0, #NVHE_INIT_PGD_PA] phys_to_ttbr x5, x0
phys_to_ttbr x4, x3
alternative_if ARM64_HAS_CNP alternative_if ARM64_HAS_CNP
orr x4, x4, #TTBR_CNP_BIT orr x5, x5, #TTBR_CNP_BIT
alternative_else_nop_endif alternative_else_nop_endif
msr ttbr0_el2, x4 msr ttbr0_el2, x5
/* Set the new stack pointer */ /* Set the new stack pointer */
ldr x0, [x0, #NVHE_INIT_STACK_HYP_VA] mov sp, x1
mov sp, x0
/* And turn the MMU back on! */ /* And turn the MMU back on! */
dsb nsh dsb nsh
isb isb
set_sctlr_el2 x2 set_sctlr_el2 x3
ret x1 ret x2
SYM_FUNC_END(__pkvm_init_switch_pgd) SYM_FUNC_END(__pkvm_init_switch_pgd)
.popsection .popsection
...@@ -339,7 +339,7 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, ...@@ -339,7 +339,7 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
{ {
struct kvm_nvhe_init_params *params; struct kvm_nvhe_init_params *params;
void *virt = hyp_phys_to_virt(phys); void *virt = hyp_phys_to_virt(phys);
void (*fn)(phys_addr_t params_pa, void *finalize_fn_va); typeof(__pkvm_init_switch_pgd) *fn;
int ret; int ret;
BUG_ON(kvm_check_pvm_sysreg_table()); BUG_ON(kvm_check_pvm_sysreg_table());
...@@ -363,7 +363,7 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, ...@@ -363,7 +363,7 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
/* Jump in the idmap page to switch to the new page-tables */ /* Jump in the idmap page to switch to the new page-tables */
params = this_cpu_ptr(&kvm_init_params); params = this_cpu_ptr(&kvm_init_params);
fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd); fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
fn(__hyp_pa(params), __pkvm_init_finalise); fn(params->pgd_pa, params->stack_hyp_va, __pkvm_init_finalise);
unreachable(); unreachable();
} }
...@@ -437,7 +437,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -437,7 +437,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
static void __hyp_call_panic(u64 spsr, u64 elr, u64 par) static void __noreturn __hyp_call_panic(u64 spsr, u64 elr, u64 par)
{ {
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
...@@ -462,7 +462,6 @@ void __noreturn hyp_panic(void) ...@@ -462,7 +462,6 @@ void __noreturn hyp_panic(void)
u64 par = read_sysreg_par(); u64 par = read_sysreg_par();
__hyp_call_panic(spsr, elr, par); __hyp_call_panic(spsr, elr, par);
unreachable();
} }
asmlinkage void kvm_unexpected_el2_exception(void) asmlinkage void kvm_unexpected_el2_exception(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment