Commit 8bd200d2 authored by Josh Poimboeuf's avatar Josh Poimboeuf Committed by Borislav Petkov

KVM: VMX: Flatten __vmx_vcpu_run()

Move the vmx_vm{enter,exit}() functionality into __vmx_vcpu_run().  This
will make it easier to do the spec_ctrl handling before the first RET.
Signed-off-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
parent 8faea26e
......@@ -30,68 +30,6 @@
.section .noinstr.text, "ax"
/**
* vmx_vmenter - VM-Enter the current loaded VMCS
*
* %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME
*
* Returns:
* %RFLAGS.CF is set on VM-Fail Invalid
* %RFLAGS.ZF is set on VM-Fail Valid
* %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
*
* Note that VMRESUME/VMLAUNCH fall-through and return directly if
* they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
* to vmx_vmexit.
*/
SYM_FUNC_START_LOCAL(vmx_vmenter)
/* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
je 2f
1: vmresume
RET
2: vmlaunch
RET
3: cmpb $0, kvm_rebooting
je 4f
RET
4: ud2
_ASM_EXTABLE(1b, 3b)
_ASM_EXTABLE(2b, 3b)
SYM_FUNC_END(vmx_vmenter)
/**
* vmx_vmexit - Handle a VMX VM-Exit
*
* Returns:
* %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
*
* This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump
* here after hardware loads the host's state, i.e. this is the destination
* referred to by VMCS.HOST_RIP.
*/
SYM_FUNC_START(vmx_vmexit)
#ifdef CONFIG_RETPOLINE
ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE
/* Preserve guest's RAX, it's used to stuff the RSB. */
push %_ASM_AX
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
/* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
or $1, %_ASM_AX
pop %_ASM_AX
.Lvmexit_skip_rsb:
#endif
RET
SYM_FUNC_END(vmx_vmexit)
/**
* __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
* @vmx: struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
......@@ -124,8 +62,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
/* Copy @launched to BL, _ASM_ARG3 is volatile. */
mov %_ASM_ARG3B, %bl
/* Adjust RSP to account for the CALL to vmx_vmenter(). */
lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
lea (%_ASM_SP), %_ASM_ARG2
call vmx_update_host_rsp
/* Load @regs to RAX. */
......@@ -154,11 +91,37 @@ SYM_FUNC_START(__vmx_vcpu_run)
/* Load guest RAX. This kills the @regs pointer! */
mov VCPU_RAX(%_ASM_AX), %_ASM_AX
/* Enter guest mode */
call vmx_vmenter
/* Check EFLAGS.ZF from 'testb' above */
je .Lvmlaunch
/*
* After a successful VMRESUME/VMLAUNCH, control flow "magically"
* resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting.
* So this isn't a typical function and objtool needs to be told to
* save the unwind state here and restore it below.
*/
UNWIND_HINT_SAVE
/*
* If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at
* the 'vmx_vmexit' label below.
*/
.Lvmresume:
vmresume
jmp .Lvmfail
.Lvmlaunch:
vmlaunch
jmp .Lvmfail
_ASM_EXTABLE(.Lvmresume, .Lfixup)
_ASM_EXTABLE(.Lvmlaunch, .Lfixup)
/* Jump on VM-Fail. */
jbe 2f
SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
/* Restore unwind state from before the VMRESUME/VMLAUNCH. */
UNWIND_HINT_RESTORE
ENDBR
/* Temporarily save guest's RAX. */
push %_ASM_AX
......@@ -185,9 +148,13 @@ SYM_FUNC_START(__vmx_vcpu_run)
mov %r15, VCPU_R15(%_ASM_AX)
#endif
/* IMPORTANT: RSB must be stuffed before the first return. */
FILL_RETURN_BUFFER %_ASM_BX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
/* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
xor %eax, %eax
.Lclear_regs:
/*
* Clear all general purpose registers except RSP and RAX to prevent
* speculative use of the guest's values, even those that are reloaded
......@@ -197,7 +164,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
* free. RSP and RAX are exempt as RSP is restored by hardware during
* VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
*/
1: xor %ecx, %ecx
xor %ecx, %ecx
xor %edx, %edx
xor %ebx, %ebx
xor %ebp, %ebp
......@@ -216,8 +183,8 @@ SYM_FUNC_START(__vmx_vcpu_run)
/* "POP" @regs. */
add $WORD_SIZE, %_ASM_SP
pop %_ASM_BX
pop %_ASM_BX
#ifdef CONFIG_X86_64
pop %r12
pop %r13
......@@ -230,9 +197,15 @@ SYM_FUNC_START(__vmx_vcpu_run)
pop %_ASM_BP
RET
/* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
2: mov $1, %eax
jmp 1b
.Lfixup:
cmpb $0, kvm_rebooting
jne .Lvmfail
ud2
.Lvmfail:
/* VM-Fail: set return value to 1 */
mov $1, %eax
jmp .Lclear_regs
SYM_FUNC_END(__vmx_vcpu_run)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment