Commit 6e3bfbb2 authored by Andrew Scull's avatar Andrew Scull Committed by Marc Zyngier

KVM: arm64: nVHE: Use separate vector for the host

The host is treated differently from the guests when an exception is
taken so introduce a separate vector that is specialized for the host.
This also allows the nVHE specific code to move out of hyp-entry.S and
into nvhe/host.S.

The host is only expected to make HVC calls and anything else is
considered invalid and results in a panic.

Hyp initialization is now passed the vector that is used for the host
and it is swapped for the guest vector during the context switch.
Signed-off-by: default avatarAndrew Scull <ascull@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200915104643.2543892-7-ascull@google.com
parent a0e47952
...@@ -111,8 +111,10 @@ struct kvm_vcpu; ...@@ -111,8 +111,10 @@ struct kvm_vcpu;
struct kvm_s2_mmu; struct kvm_s2_mmu;
DECLARE_KVM_NVHE_SYM(__kvm_hyp_init); DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector);
DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
#define __kvm_hyp_host_vector CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
#ifdef CONFIG_KVM_INDIRECT_VECTORS #ifdef CONFIG_KVM_INDIRECT_VECTORS
......
...@@ -71,6 +71,7 @@ KVM_NVHE_ALIAS(kvm_update_va_mask); ...@@ -71,6 +71,7 @@ KVM_NVHE_ALIAS(kvm_update_va_mask);
/* Global kernel state accessed by nVHE hyp code. */ /* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(arm64_ssbd_callback_required); KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
KVM_NVHE_ALIAS(kvm_host_data); KVM_NVHE_ALIAS(kvm_host_data);
KVM_NVHE_ALIAS(kvm_hyp_vector);
KVM_NVHE_ALIAS(kvm_vgic_global_state); KVM_NVHE_ALIAS(kvm_vgic_global_state);
/* Kernel constant needed to compute idmap addresses. */ /* Kernel constant needed to compute idmap addresses. */
......
...@@ -1277,7 +1277,7 @@ static void cpu_init_hyp_mode(void) ...@@ -1277,7 +1277,7 @@ static void cpu_init_hyp_mode(void)
pgd_ptr = kvm_mmu_get_httbr(); pgd_ptr = kvm_mmu_get_httbr();
hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE; hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
vector_ptr = __this_cpu_read(kvm_hyp_vector); vector_ptr = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector));
/* /*
* Call initialization code, and switch to the full blown HYP code. * Call initialization code, and switch to the full blown HYP code.
...@@ -1542,6 +1542,7 @@ static int init_hyp_mode(void) ...@@ -1542,6 +1542,7 @@ static int init_hyp_mode(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct kvm_host_data *cpu_data; struct kvm_host_data *cpu_data;
unsigned long *vector;
cpu_data = per_cpu_ptr(&kvm_host_data, cpu); cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP); err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
...@@ -1550,6 +1551,14 @@ static int init_hyp_mode(void) ...@@ -1550,6 +1551,14 @@ static int init_hyp_mode(void)
kvm_err("Cannot map host CPU state: %d\n", err); kvm_err("Cannot map host CPU state: %d\n", err);
goto out_err; goto out_err;
} }
vector = per_cpu_ptr(&kvm_hyp_vector, cpu);
err = create_hyp_mappings(vector, vector + 1, PAGE_HYP);
if (err) {
kvm_err("Cannot map hyp guest vector address\n");
goto out_err;
}
} }
err = hyp_map_aux_data(); err = hyp_map_aux_data();
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
#include <asm/mmu.h> #include <asm/mmu.h>
.macro save_caller_saved_regs_vect .macro save_caller_saved_regs_vect
...@@ -41,20 +40,6 @@ ...@@ -41,20 +40,6 @@
.text .text
.macro do_el2_call
/*
* Shuffle the parameters before calling the function
* pointed to in x0. Assumes parameters in x[1,2,3].
*/
str lr, [sp, #-16]!
mov lr, x0
mov x0, x1
mov x1, x2
mov x2, x3
blr lr
ldr lr, [sp], #16
.endm
el1_sync: // Guest trapped into EL2 el1_sync: // Guest trapped into EL2
mrs x0, esr_el2 mrs x0, esr_el2
...@@ -63,44 +48,6 @@ el1_sync: // Guest trapped into EL2 ...@@ -63,44 +48,6 @@ el1_sync: // Guest trapped into EL2
ccmp x0, #ESR_ELx_EC_HVC32, #4, ne ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
b.ne el1_trap b.ne el1_trap
#ifdef __KVM_NVHE_HYPERVISOR__
mrs x1, vttbr_el2 // If vttbr is valid, the guest
cbnz x1, el1_hvc_guest // called HVC
/* Here, we're pretty sure the host called HVC. */
ldp x0, x1, [sp], #16
/* Check for a stub HVC call */
cmp x0, #HVC_STUB_HCALL_NR
b.hs 1f
/*
* Compute the idmap address of __kvm_handle_stub_hvc and
* jump there. Since we use kimage_voffset, do not use the
* HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
* (by loading it from the constant pool).
*
* Preserve x0-x4, which may contain stub parameters.
*/
ldr x5, =__kvm_handle_stub_hvc
ldr_l x6, kimage_voffset
/* x5 = __pa(x5) */
sub x5, x5, x6
br x5
1:
/*
* Perform the EL2 call
*/
kern_hyp_va x0
do_el2_call
eret
sb
#endif /* __KVM_NVHE_HYPERVISOR__ */
el1_hvc_guest:
/* /*
* Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
* The workaround has already been applied on the host, * The workaround has already been applied on the host,
...@@ -198,18 +145,6 @@ el2_error: ...@@ -198,18 +145,6 @@ el2_error:
eret eret
sb sb
#ifdef __KVM_NVHE_HYPERVISOR__
SYM_FUNC_START(__hyp_do_panic)
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
ldr lr, =panic
msr elr_el2, lr
eret
sb
SYM_FUNC_END(__hyp_do_panic)
#endif
.macro invalid_vector label, target = hyp_panic .macro invalid_vector label, target = hyp_panic
.align 2 .align 2
SYM_CODE_START(\label) SYM_CODE_START(\label)
...@@ -222,7 +157,6 @@ SYM_CODE_END(\label) ...@@ -222,7 +157,6 @@ SYM_CODE_END(\label)
invalid_vector el2t_irq_invalid invalid_vector el2t_irq_invalid
invalid_vector el2t_fiq_invalid invalid_vector el2t_fiq_invalid
invalid_vector el2t_error_invalid invalid_vector el2t_error_invalid
invalid_vector el2h_sync_invalid
invalid_vector el2h_irq_invalid invalid_vector el2h_irq_invalid
invalid_vector el2h_fiq_invalid invalid_vector el2h_fiq_invalid
invalid_vector el1_fiq_invalid invalid_vector el1_fiq_invalid
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
asflags-y := -D__KVM_NVHE_HYPERVISOR__ asflags-y := -D__KVM_NVHE_HYPERVISOR__
ccflags-y := -D__KVM_NVHE_HYPERVISOR__ ccflags-y := -D__KVM_NVHE_HYPERVISOR__
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../fpsimd.o ../hyp-entry.o
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020 - Google Inc
* Author: Andrew Scull <ascull@google.com>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
.text
SYM_FUNC_START(__hyp_do_panic)
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
ldr lr, =panic
msr elr_el2, lr
eret
sb
SYM_FUNC_END(__hyp_do_panic)
.macro host_el1_sync_vect
.align 7
.L__vect_start\@:
esb
stp x0, x1, [sp, #-16]!
mrs x0, esr_el2
lsr x0, x0, #ESR_ELx_EC_SHIFT
cmp x0, #ESR_ELx_EC_HVC64
ldp x0, x1, [sp], #16
b.ne hyp_panic
/* Check for a stub HVC call */
cmp x0, #HVC_STUB_HCALL_NR
b.hs 1f
/*
* Compute the idmap address of __kvm_handle_stub_hvc and
* jump there. Since we use kimage_voffset, do not use the
* HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
* (by loading it from the constant pool).
*
* Preserve x0-x4, which may contain stub parameters.
*/
ldr x5, =__kvm_handle_stub_hvc
ldr_l x6, kimage_voffset
/* x5 = __pa(x5) */
sub x5, x5, x6
br x5
1:
/*
* Shuffle the parameters before calling the function
* pointed to in x0. Assumes parameters in x[1,2,3].
*/
kern_hyp_va x0
str lr, [sp, #-16]!
mov lr, x0
mov x0, x1
mov x1, x2
mov x2, x3
blr lr
ldr lr, [sp], #16
eret
sb
.L__vect_end\@:
.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
.error "host_el1_sync_vect larger than vector entry"
.endif
.endm
.macro invalid_host_vect
.align 7
b hyp_panic
.endm
/*
* CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
* host knows about the EL2 vectors already, and there is no point in hiding
* them.
*/
.align 11
SYM_CODE_START(__kvm_hyp_host_vector)
invalid_host_vect // Synchronous EL2t
invalid_host_vect // IRQ EL2t
invalid_host_vect // FIQ EL2t
invalid_host_vect // Error EL2t
invalid_host_vect // Synchronous EL2h
invalid_host_vect // IRQ EL2h
invalid_host_vect // FIQ EL2h
invalid_host_vect // Error EL2h
host_el1_sync_vect // Synchronous 64-bit EL1
invalid_host_vect // IRQ 64-bit EL1
invalid_host_vect // FIQ 64-bit EL1
invalid_host_vect // Error 64-bit EL1
invalid_host_vect // Synchronous 32-bit EL1
invalid_host_vect // IRQ 32-bit EL1
invalid_host_vect // FIQ 32-bit EL1
invalid_host_vect // Error 32-bit EL1
SYM_CODE_END(__kvm_hyp_host_vector)
...@@ -42,6 +42,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu) ...@@ -42,6 +42,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
} }
write_sysreg(val, cptr_el2); write_sysreg(val, cptr_el2);
write_sysreg(__hyp_this_cpu_read(kvm_hyp_vector), vbar_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
...@@ -60,6 +61,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu) ...@@ -60,6 +61,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
static void __deactivate_traps(struct kvm_vcpu *vcpu) static void __deactivate_traps(struct kvm_vcpu *vcpu)
{ {
extern char __kvm_hyp_host_vector[];
u64 mdcr_el2; u64 mdcr_el2;
___deactivate_traps(vcpu); ___deactivate_traps(vcpu);
...@@ -91,6 +93,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) ...@@ -91,6 +93,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(mdcr_el2, mdcr_el2); write_sysreg(mdcr_el2, mdcr_el2);
write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
} }
static void __load_host_stage2(void) static void __load_host_stage2(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment