Commit e72341c5 authored by Christoffer Dall's avatar Christoffer Dall Committed by Marc Zyngier

KVM: arm/arm64: Introduce vcpu_el1_is_32bit

We have numerous checks around that checks if the HCR_EL2 has the RW bit
set to figure out if we're running an AArch64 or AArch32 VM.  In some
cases, directly checking the RW bit (given its unintuitive name), is a
bit confusing, and that's not going to improve as we move logic around
for the following patches that optimize KVM on AArch64 hosts with VHE.

Therefore, introduce a helper, vcpu_el1_is_32bit, and replace existing
direct checks of HCR_EL2.RW with the helper.
Reviewed-by: default avatarJulien Grall <julien.grall@arm.com>
Reviewed-by: default avatarJulien Thierry <julien.thierry@arm.com>
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Reviewed-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent bc192cee
...@@ -45,6 +45,11 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu); ...@@ -45,6 +45,11 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu);
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.hcr_el2 & HCR_RW);
}
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
...@@ -65,7 +70,7 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) ...@@ -65,7 +70,7 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
* For now this is conditional, since no AArch32 feature regs * For now this is conditional, since no AArch32 feature regs
* are currently virtualised. * are currently virtualised.
*/ */
if (vcpu->arch.hcr_el2 & HCR_RW) if (!vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 |= HCR_TID3; vcpu->arch.hcr_el2 |= HCR_TID3;
} }
......
...@@ -74,7 +74,7 @@ static hyp_alternate_select(__activate_traps_arch, ...@@ -74,7 +74,7 @@ static hyp_alternate_select(__activate_traps_arch,
static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
{ {
u64 val; u64 hcr = vcpu->arch.hcr_el2;
/* /*
* We are about to set CPTR_EL2.TFP to trap all floating point * We are about to set CPTR_EL2.TFP to trap all floating point
...@@ -85,17 +85,16 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) ...@@ -85,17 +85,16 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
* If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
* it will cause an exception. * it will cause an exception.
*/ */
val = vcpu->arch.hcr_el2; if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
if (!(val & HCR_RW) && system_supports_fpsimd()) {
write_sysreg(1 << 30, fpexc32_el2); write_sysreg(1 << 30, fpexc32_el2);
isb(); isb();
} }
write_sysreg(val, hcr_el2);
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (val & HCR_VSE)) if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
write_sysreg(hcr, hcr_el2);
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2); write_sysreg(1 << 15, hstr_el2);
/* /*
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
/* Yes, this does nothing, on purpose */ /* Yes, this does nothing, on purpose */
...@@ -147,7 +148,7 @@ void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) ...@@ -147,7 +148,7 @@ void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
{ {
u64 *spsr, *sysreg; u64 *spsr, *sysreg;
if (read_sysreg(hcr_el2) & HCR_RW) if (!vcpu_el1_is_32bit(vcpu))
return; return;
spsr = vcpu->arch.ctxt.gp_regs.spsr; spsr = vcpu->arch.ctxt.gp_regs.spsr;
...@@ -172,7 +173,7 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu) ...@@ -172,7 +173,7 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
{ {
u64 *spsr, *sysreg; u64 *spsr, *sysreg;
if (read_sysreg(hcr_el2) & HCR_RW) if (!vcpu_el1_is_32bit(vcpu))
return; return;
spsr = vcpu->arch.ctxt.gp_regs.spsr; spsr = vcpu->arch.ctxt.gp_regs.spsr;
......
...@@ -128,7 +128,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) ...@@ -128,7 +128,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
*/ */
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
{ {
if (!(vcpu->arch.hcr_el2 & HCR_RW)) if (vcpu_el1_is_32bit(vcpu))
kvm_inject_dabt32(vcpu, addr); kvm_inject_dabt32(vcpu, addr);
else else
inject_abt64(vcpu, false, addr); inject_abt64(vcpu, false, addr);
...@@ -144,7 +144,7 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) ...@@ -144,7 +144,7 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
*/ */
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
{ {
if (!(vcpu->arch.hcr_el2 & HCR_RW)) if (vcpu_el1_is_32bit(vcpu))
kvm_inject_pabt32(vcpu, addr); kvm_inject_pabt32(vcpu, addr);
else else
inject_abt64(vcpu, true, addr); inject_abt64(vcpu, true, addr);
...@@ -158,7 +158,7 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) ...@@ -158,7 +158,7 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
*/ */
void kvm_inject_undefined(struct kvm_vcpu *vcpu) void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{ {
if (!(vcpu->arch.hcr_el2 & HCR_RW)) if (vcpu_el1_is_32bit(vcpu))
kvm_inject_undef32(vcpu); kvm_inject_undef32(vcpu);
else else
inject_undef64(vcpu); inject_undef64(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment