Commit 0067df41 authored by James Morse's avatar James Morse Committed by Catalin Marinas

KVM: arm64: Handle RAS SErrors from EL2 on guest exit

We expect to have firmware-first handling of RAS SErrors, with errors
notified via an APEI method. For systems without firmware-first, add
some minimal handling to KVM.

There are two ways KVM can take an SError due to a guest, either may be a
RAS error: we exit the guest due to an SError routed to EL2 by HCR_EL2.AMO,
or we take an SError from EL2 when we unmask PSTATE.A from __guest_exit.

The current SError from EL2 code unmasks SError and tries to fence any
pending SError into a single instruction window. It then leaves SError
unmasked.

With the v8.2 RAS Extensions we may take an SError for a 'corrected'
error, but KVM is only able to handle SError from EL2 if they occur
during this single instruction window...

The RAS Extensions give us a new instruction to synchronise and
consume SErrors. The RAS Extensions document (ARM DDI0587),
'2.4.1 ESB and Unrecoverable errors' describes ESB as synchronising
SError interrupts generated by 'instructions, translation table walks,
hardware updates to the translation tables, and instruction fetches on
the same PE'. This makes ESB equivalent to KVMs existing
'dsb, mrs-daifclr, isb' sequence.

Use the alternatives to synchronise and consume any SError using ESB
instead of unmasking and taking the SError. Set ARM_EXIT_WITH_SERROR_BIT
in the exit_code so that we can restart the vcpu if it turns out this
SError has no impact on the vcpu.
Reviewed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 3368bd80
...@@ -176,6 +176,11 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) ...@@ -176,6 +176,11 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
} }
static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.disr_el1;
}
static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
......
...@@ -90,6 +90,7 @@ struct kvm_vcpu_fault_info { ...@@ -90,6 +90,7 @@ struct kvm_vcpu_fault_info {
u32 esr_el2; /* Hyp Syndrom Register */ u32 esr_el2; /* Hyp Syndrom Register */
u64 far_el2; /* Hyp Fault Address Register */ u64 far_el2; /* Hyp Fault Address Register */
u64 hpfar_el2; /* Hyp IPA Fault Address Register */ u64 hpfar_el2; /* Hyp IPA Fault Address Register */
u64 disr_el1; /* Deferred [SError] Status Register */
}; };
/* /*
......
...@@ -132,6 +132,7 @@ int main(void) ...@@ -132,6 +132,7 @@ int main(void)
BLANK(); BLANK();
#ifdef CONFIG_KVM_ARM_HOST #ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/exception.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h> #include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
...@@ -249,7 +250,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -249,7 +250,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
*vcpu_pc(vcpu) -= adj; *vcpu_pc(vcpu) -= adj;
} }
kvm_inject_vabt(vcpu);
return 1; return 1;
} }
...@@ -286,6 +286,18 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -286,6 +286,18 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index) int exception_index)
{ {
if (ARM_SERROR_PENDING(exception_index)) {
if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
u64 disr = kvm_vcpu_get_disr(vcpu);
kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
} else {
kvm_inject_vabt(vcpu);
}
return;
}
exception_index = ARM_EXCEPTION_CODE(exception_index); exception_index = ARM_EXCEPTION_CODE(exception_index);
if (exception_index == ARM_EXCEPTION_EL1_SERROR) if (exception_index == ARM_EXCEPTION_EL1_SERROR)
......
...@@ -124,6 +124,17 @@ ENTRY(__guest_exit) ...@@ -124,6 +124,17 @@ ENTRY(__guest_exit)
// Now restore the host regs // Now restore the host regs
restore_callee_saved_regs x2 restore_callee_saved_regs x2
alternative_if ARM64_HAS_RAS_EXTN
// If we have the RAS extensions we can consume a pending error
// without an unmask-SError and isb.
esb
mrs_s x2, SYS_DISR_EL1
str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
cbz x2, 1f
msr_s SYS_DISR_EL1, xzr
orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
1: ret
alternative_else
// If we have a pending asynchronous abort, now is the // If we have a pending asynchronous abort, now is the
// time to find out. From your VAXorcist book, page 666: // time to find out. From your VAXorcist book, page 666:
// "Threaten me not, oh Evil one! For I speak with // "Threaten me not, oh Evil one! For I speak with
...@@ -134,7 +145,9 @@ ENTRY(__guest_exit) ...@@ -134,7 +145,9 @@ ENTRY(__guest_exit)
mov x5, x0 mov x5, x0
dsb sy // Synchronize against in-flight ld/st dsb sy // Synchronize against in-flight ld/st
nop
msr daifclr, #4 // Unmask aborts msr daifclr, #4 // Unmask aborts
alternative_endif
// This is our single instruction exception window. A pending // This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask // SError is guaranteed to occur at the earliest when we unmask
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment