Commit f14eec0a authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: SVM: move more vmentry code to assembly

Manipulate IF around vmload/vmsave to remove the confusing usage of
local_irq_enable where interrupts are actually disabled via GIF.
And stuff the RSB immediately without waiting for a RET to avoid
Spectre-v2 attacks.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9ef1530c
...@@ -237,27 +237,6 @@ enum ssb_mitigation { ...@@ -237,27 +237,6 @@ enum ssb_mitigation {
extern char __indirect_thunk_start[]; extern char __indirect_thunk_start[];
extern char __indirect_thunk_end[]; extern char __indirect_thunk_end[];
/*
* On VMEXIT we must ensure that no RSB predictions learned in the guest
* can be followed in the host, by overwriting the RSB completely. Both
* retpoline and IBRS mitigations for Spectre v2 need this; only on future
* CPUs with IBRS_ALL *might* it be avoided.
*/
static inline void vmexit_fill_RSB(void)
{
#ifdef CONFIG_RETPOLINE
unsigned long loops;
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
ALTERNATIVE("jmp 910f",
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
X86_FEATURE_RETPOLINE)
"910:"
: "=r" (loops), ASM_CALL_CONSTRAINT
: : "memory" );
#endif
}
static __always_inline static __always_inline
void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
{ {
......
...@@ -3330,13 +3330,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3330,13 +3330,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
*/ */
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
local_irq_enable();
__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, svm->host.gs_base); wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else #else
...@@ -3366,8 +3361,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3366,8 +3361,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
reload_tss(vcpu); reload_tss(vcpu);
local_irq_disable();
x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
vcpu->arch.cr2 = svm->vmcb->save.cr2; vcpu->arch.cr2 = svm->vmcb->save.cr2;
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/bitsperlong.h> #include <asm/bitsperlong.h>
#include <asm/kvm_vcpu_regs.h> #include <asm/kvm_vcpu_regs.h>
#include <asm/nospec-branch.h>
#define WORD_SIZE (BITS_PER_LONG / 8) #define WORD_SIZE (BITS_PER_LONG / 8)
...@@ -78,6 +79,7 @@ SYM_FUNC_START(__svm_vcpu_run) ...@@ -78,6 +79,7 @@ SYM_FUNC_START(__svm_vcpu_run)
pop %_ASM_AX pop %_ASM_AX
/* Enter guest mode */ /* Enter guest mode */
sti
1: vmload %_ASM_AX 1: vmload %_ASM_AX
jmp 3f jmp 3f
2: cmpb $0, kvm_rebooting 2: cmpb $0, kvm_rebooting
...@@ -99,6 +101,13 @@ SYM_FUNC_START(__svm_vcpu_run) ...@@ -99,6 +101,13 @@ SYM_FUNC_START(__svm_vcpu_run)
ud2 ud2
_ASM_EXTABLE(5b, 6b) _ASM_EXTABLE(5b, 6b)
7: 7:
cli
#ifdef CONFIG_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
#endif
/* "POP" @regs to RAX. */ /* "POP" @regs to RAX. */
pop %_ASM_AX pop %_ASM_AX
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment