Commit 0cfc85b8 authored by Oliver Upton's avatar Oliver Upton

KVM: arm64: nv: Load guest FP state for ZCR_EL2 trap

Round out the ZCR_EL2 gymnastics by loading SVE state in the fast path
when the guest hypervisor tries to access SVE state.
Reviewed-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20240620164653.1130714-11-oliver.upton@linux.devSigned-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent 493da2b1
......@@ -371,6 +371,10 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
if (guest_hyp_fpsimd_traps_enabled(vcpu))
return false;
break;
case ESR_ELx_EC_SYS64:
if (WARN_ON_ONCE(!is_hyp_ctxt(vcpu)))
return false;
fallthrough;
case ESR_ELx_EC_SVE:
if (!sve_guest)
return false;
......
......@@ -288,11 +288,38 @@ static bool kvm_hyp_handle_cpacr_el1(struct kvm_vcpu *vcpu, u64 *exit_code)
return true;
}
static bool kvm_hyp_handle_zcr_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
{
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
if (!vcpu_has_nv(vcpu))
return false;
if (sysreg != SYS_ZCR_EL2)
return false;
if (guest_owns_fp_regs())
return false;
/*
* ZCR_EL2 traps are handled in the slow path, with the expectation
* that the guest's FP context has already been loaded onto the CPU.
*
* Load the guest's FP context and unconditionally forward to the
* slow path for handling (i.e. return false).
*/
kvm_hyp_handle_fpsimd(vcpu, exit_code);
return false;
}
static bool kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (kvm_hyp_handle_cpacr_el1(vcpu, exit_code))
return true;
if (kvm_hyp_handle_zcr_el2(vcpu, exit_code))
return true;
return kvm_hyp_handle_sysreg(vcpu, exit_code);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment