Commit 95537f06 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: nv: Add trap forwarding for ERET and SMC

Honor the trap forwarding bits for both ERET and SMC, using a new
helper that checks for common conditions.
Reviewed-by: default avatarJoey Gouly <joey.gouly@arm.com>
Co-developed-by: default avatarJintack Lim <jintack.lim@linaro.org>
Signed-off-by: default avatarJintack Lim <jintack.lim@linaro.org>
Reviewed-by: default avatarOliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240419102935.1935571-7-maz@kernel.orgSigned-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 04ab519b
...@@ -60,6 +60,7 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0) ...@@ -60,6 +60,7 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
return ttbr0 & ~GENMASK_ULL(63, 48); return ttbr0 & ~GENMASK_ULL(63, 48);
} }
extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
int kvm_init_nv_sysregs(struct kvm *kvm); int kvm_init_nv_sysregs(struct kvm *kvm);
......
...@@ -2117,6 +2117,26 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index) ...@@ -2117,6 +2117,26 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
return true; return true;
} }
static bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit)
{
bool control_bit_set;
if (!vcpu_has_nv(vcpu))
return false;
control_bit_set = __vcpu_sys_reg(vcpu, HCR_EL2) & control_bit;
if (!is_hyp_ctxt(vcpu) && control_bit_set) {
kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
return true;
}
return false;
}
bool forward_smc_trap(struct kvm_vcpu *vcpu)
{
return forward_traps(vcpu, HCR_TSC);
}
static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr) static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
{ {
u64 mode = spsr & PSR_MODE_MASK; u64 mode = spsr & PSR_MODE_MASK;
...@@ -2155,6 +2175,13 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu) ...@@ -2155,6 +2175,13 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
u64 spsr, elr, mode; u64 spsr, elr, mode;
bool direct_eret; bool direct_eret;
/*
* Forward this trap to the virtual EL2 if the virtual
* HCR_EL2.NV bit is set and this is coming from !EL2.
*/
if (forward_traps(vcpu, HCR_NV))
return;
/* /*
* Going through the whole put/load motions is a waste of time * Going through the whole put/load motions is a waste of time
* if this is a VHE guest hypervisor returning to its own * if this is a VHE guest hypervisor returning to its own
......
...@@ -55,6 +55,13 @@ static int handle_hvc(struct kvm_vcpu *vcpu) ...@@ -55,6 +55,13 @@ static int handle_hvc(struct kvm_vcpu *vcpu)
static int handle_smc(struct kvm_vcpu *vcpu) static int handle_smc(struct kvm_vcpu *vcpu)
{ {
/*
* Forward this trapped smc instruction to the virtual EL2 if
* the guest has asked for it.
*/
if (forward_smc_trap(vcpu))
return 1;
/* /*
* "If an SMC instruction executed at Non-secure EL1 is * "If an SMC instruction executed at Non-secure EL1 is
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment