Commit c09b03eb authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Update VMCS.HOST_RSP via helper C function

Providing a helper function to update HOST_RSP is visibly easier to
read, and more importantly (for the future) eliminates two arguments to
the VM-Enter assembly blob.  Reducing the number of arguments to the asm
blob is for all intents and purposes a prerequisite to moving the code
to a proper assembly routine.  It's not truly mandatory, but it greatly
simplifies the future code, and the cost of the extra CALL+RET is
negligible in the grand scheme.

Note that although _ASM_ARG[1-3] can be used in the inline asm itself,
the intput/output constraints need to be manually defined.  gcc will
actually compile with _ASM_ARG[1-3] specified as constraints, but what
it actually ends up doing with the bogus constraint is unknown.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 47e97c09
...@@ -6363,15 +6363,18 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) ...@@ -6363,15 +6363,18 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->hv_timer_armed = false; vmx->loaded_vmcs->hv_timer_armed = false;
} }
static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
{ {
unsigned long evmcs_rsp; if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
vmx->loaded_vmcs->host_state.rsp = host_rsp;
vmcs_writel(HOST_RSP, host_rsp);
}
}
static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
{
vmx->__launched = vmx->loaded_vmcs->launched; vmx->__launched = vmx->loaded_vmcs->launched;
evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
(unsigned long)&current_evmcs->host_rsp : 0;
if (static_branch_unlikely(&vmx_l1d_should_flush)) if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu); vmx_l1d_flush(vcpu);
...@@ -6382,21 +6385,14 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) ...@@ -6382,21 +6385,14 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
/* Store host registers */ /* Store host registers */
"push %%" _ASM_BP " \n\t" "push %%" _ASM_BP " \n\t"
"sub $%c[wordsize], %%" _ASM_SP "\n\t" /* placeholder for guest RCX */ "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* placeholder for guest RCX */
"push %%" _ASM_CX " \n\t" "push %%" _ASM_ARG1 " \n\t"
"sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
"cmp %%" _ASM_SP ", (%%" _ASM_DI ") \n\t" /* Adjust RSP to account for the CALL to vmx_vmenter(). */
"je 1f \n\t" "lea -%c[wordsize](%%" _ASM_SP "), %%" _ASM_ARG2 " \n\t"
"mov %%" _ASM_SP ", (%%" _ASM_DI ") \n\t" "call vmx_update_host_rsp \n\t"
/* Avoid VMWRITE when Enlightened VMCS is in use */
"test %%" _ASM_SI ", %%" _ASM_SI " \n\t" /* Load the vcpu_vmx pointer to RCX. */
"jz 2f \n\t" "mov (%%" _ASM_SP "), %%" _ASM_CX " \n\t"
"mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
"jmp 1f \n\t"
"2: \n\t"
"mov $%c[HOST_RSP], %%" _ASM_DX " \n\t"
__ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
"1: \n\t"
"add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
/* Check if vmlaunch or vmresume is needed */ /* Check if vmlaunch or vmresume is needed */
"cmpb $0, %c[launched](%%" _ASM_CX ") \n\t" "cmpb $0, %c[launched](%%" _ASM_CX ") \n\t"
...@@ -6475,11 +6471,16 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) ...@@ -6475,11 +6471,16 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
"xor %%edi, %%edi \n\t" "xor %%edi, %%edi \n\t"
"xor %%ebp, %%ebp \n\t" "xor %%ebp, %%ebp \n\t"
"pop %%" _ASM_BP " \n\t" "pop %%" _ASM_BP " \n\t"
: ASM_CALL_CONSTRAINT, "=D"((int){0}), "=S"((int){0}) : ASM_CALL_CONSTRAINT,
: "c"(vmx), "D"(&vmx->loaded_vmcs->host_state.rsp), "S"(evmcs_rsp), #ifdef CONFIG_X86_64
"=D"((int){0})
: "D"(vmx),
#else
"=a"((int){0})
: "a"(vmx),
#endif
[launched]"i"(offsetof(struct vcpu_vmx, __launched)), [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
[fail]"i"(offsetof(struct vcpu_vmx, fail)), [fail]"i"(offsetof(struct vcpu_vmx, fail)),
[HOST_RSP]"i"(HOST_RSP),
[rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
[rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
[rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
...@@ -6500,10 +6501,10 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) ...@@ -6500,10 +6501,10 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
[wordsize]"i"(sizeof(ulong)) [wordsize]"i"(sizeof(ulong))
: "cc", "memory" : "cc", "memory"
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
, "rax", "rbx", "rdx" , "rax", "rbx", "rcx", "rdx", "rsi"
, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
#else #else
, "eax", "ebx", "edx" , "ebx", "ecx", "edx", "edi", "esi"
#endif #endif
); );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment