Commit 4151bb63 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: Fix SMPRI_EL1/TPIDR2_EL0 trapping on VHE

The trapping of SMPRI_EL1 and TPIDR2_EL0 currently only really
work on nVHE, as only this mode uses the fine-grained trapping
that controls these two registers.

Move the trapping enable/disable code into
__{de,}activate_traps_common(), allowing it to be called when it
actually matters on VHE, and remove the flipping of EL2 control
for TPIDR2_EL0, which only affects the host access of this
register.

Fixes: 861262ab ("KVM: arm64: Handle SME host state when running guests")
Reported-by: default avatarMark Brown <broonie@kernel.org>
Reviewed-by: default avatarMark Brown <broonie@kernel.org>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/86bkpqer4z.wl-maz@kernel.org
parent 7a2726ec
...@@ -87,6 +87,17 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) ...@@ -87,6 +87,17 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
if (cpus_have_final_cap(ARM64_SME)) {
sysreg_clear_set_s(SYS_HFGRTR_EL2,
HFGxTR_EL2_nSMPRI_EL1_MASK |
HFGxTR_EL2_nTPIDR2_EL0_MASK,
0);
sysreg_clear_set_s(SYS_HFGWTR_EL2,
HFGxTR_EL2_nSMPRI_EL1_MASK |
HFGxTR_EL2_nTPIDR2_EL0_MASK,
0);
}
} }
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
...@@ -96,6 +107,15 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) ...@@ -96,6 +107,15 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(0, hstr_el2); write_sysreg(0, hstr_el2);
if (kvm_arm_support_pmu_v3()) if (kvm_arm_support_pmu_v3())
write_sysreg(0, pmuserenr_el0); write_sysreg(0, pmuserenr_el0);
if (cpus_have_final_cap(ARM64_SME)) {
sysreg_clear_set_s(SYS_HFGRTR_EL2, 0,
HFGxTR_EL2_nSMPRI_EL1_MASK |
HFGxTR_EL2_nTPIDR2_EL0_MASK);
sysreg_clear_set_s(SYS_HFGWTR_EL2, 0,
HFGxTR_EL2_nSMPRI_EL1_MASK |
HFGxTR_EL2_nTPIDR2_EL0_MASK);
}
} }
static inline void ___activate_traps(struct kvm_vcpu *vcpu) static inline void ___activate_traps(struct kvm_vcpu *vcpu)
......
...@@ -55,18 +55,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu) ...@@ -55,18 +55,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
write_sysreg(val, cptr_el2); write_sysreg(val, cptr_el2);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
if (cpus_have_final_cap(ARM64_SME)) {
val = read_sysreg_s(SYS_HFGRTR_EL2);
val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
HFGxTR_EL2_nSMPRI_EL1_MASK);
write_sysreg_s(val, SYS_HFGRTR_EL2);
val = read_sysreg_s(SYS_HFGWTR_EL2);
val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
HFGxTR_EL2_nSMPRI_EL1_MASK);
write_sysreg_s(val, SYS_HFGWTR_EL2);
}
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
...@@ -110,20 +98,6 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) ...@@ -110,20 +98,6 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
if (cpus_have_final_cap(ARM64_SME)) {
u64 val;
val = read_sysreg_s(SYS_HFGRTR_EL2);
val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
HFGxTR_EL2_nSMPRI_EL1_MASK;
write_sysreg_s(val, SYS_HFGRTR_EL2);
val = read_sysreg_s(SYS_HFGWTR_EL2);
val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
HFGxTR_EL2_nSMPRI_EL1_MASK;
write_sysreg_s(val, SYS_HFGWTR_EL2);
}
cptr = CPTR_EL2_DEFAULT; cptr = CPTR_EL2_DEFAULT;
if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)) if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
cptr |= CPTR_EL2_TZ; cptr |= CPTR_EL2_TZ;
......
...@@ -63,10 +63,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu) ...@@ -63,10 +63,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
__activate_traps_fpsimd32(vcpu); __activate_traps_fpsimd32(vcpu);
} }
if (cpus_have_final_cap(ARM64_SME))
write_sysreg(read_sysreg(sctlr_el2) & ~SCTLR_ELx_ENTP2,
sctlr_el2);
write_sysreg(val, cpacr_el1); write_sysreg(val, cpacr_el1);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1); write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
...@@ -88,10 +84,6 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) ...@@ -88,10 +84,6 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
*/ */
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
if (cpus_have_final_cap(ARM64_SME))
write_sysreg(read_sysreg(sctlr_el2) | SCTLR_ELx_ENTP2,
sctlr_el2);
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
if (!arm64_kernel_unmapped_at_el0()) if (!arm64_kernel_unmapped_at_el0())
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment