Commit c9afc58c authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Pass "launched" directly to the vCPU-run asm blob

...and remove struct vcpu_vmx's temporary __launched variable.

Eliminating __launched is a bonus, the real motivation is to get to the
point where the only reference to struct vcpu_vmx in the asm code is
to vcpu.arch.regs, which will simplify moving the blob to a proper asm
file.  Note that also means this approach is deliberately different than
what is used in nested_vmx_check_vmentry_hw().

Use BL as it is a callee-save register in both 32-bit and 64-bit ABIs,
i.e. it can't be modified by vmx_update_host_rsp(), to avoid having to
temporarily save/restore the launched flag.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: default avatarJim Mattson <jmattson@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c09b03eb
...@@ -6373,8 +6373,6 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) ...@@ -6373,8 +6373,6 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
{ {
vmx->__launched = vmx->loaded_vmcs->launched;
if (static_branch_unlikely(&vmx_l1d_should_flush)) if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu); vmx_l1d_flush(vcpu);
...@@ -6395,7 +6393,8 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) ...@@ -6395,7 +6393,8 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
"mov (%%" _ASM_SP "), %%" _ASM_CX " \n\t" "mov (%%" _ASM_SP "), %%" _ASM_CX " \n\t"
/* Check if vmlaunch or vmresume is needed */ /* Check if vmlaunch or vmresume is needed */
"cmpb $0, %c[launched](%%" _ASM_CX ") \n\t" "cmpb $0, %%bl \n\t"
/* Load guest registers. Don't clobber flags. */ /* Load guest registers. Don't clobber flags. */
"mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t" "mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
"mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t" "mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t"
...@@ -6471,7 +6470,7 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) ...@@ -6471,7 +6470,7 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
"xor %%edi, %%edi \n\t" "xor %%edi, %%edi \n\t"
"xor %%ebp, %%ebp \n\t" "xor %%ebp, %%ebp \n\t"
"pop %%" _ASM_BP " \n\t" "pop %%" _ASM_BP " \n\t"
: ASM_CALL_CONSTRAINT, : ASM_CALL_CONSTRAINT, "=b"((int){0}),
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
"=D"((int){0}) "=D"((int){0})
: "D"(vmx), : "D"(vmx),
...@@ -6479,7 +6478,7 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) ...@@ -6479,7 +6478,7 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
"=a"((int){0}) "=a"((int){0})
: "a"(vmx), : "a"(vmx),
#endif #endif
[launched]"i"(offsetof(struct vcpu_vmx, __launched)), "b"(vmx->loaded_vmcs->launched),
[fail]"i"(offsetof(struct vcpu_vmx, fail)), [fail]"i"(offsetof(struct vcpu_vmx, fail)),
[rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
[rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
...@@ -6501,10 +6500,10 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) ...@@ -6501,10 +6500,10 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
[wordsize]"i"(sizeof(ulong)) [wordsize]"i"(sizeof(ulong))
: "cc", "memory" : "cc", "memory"
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
, "rax", "rbx", "rcx", "rdx", "rsi" , "rax", "rcx", "rdx", "rsi"
, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
#else #else
, "ebx", "ecx", "edx", "edi", "esi" , "ecx", "edx", "edi", "esi"
#endif #endif
); );
......
...@@ -208,7 +208,7 @@ struct vcpu_vmx { ...@@ -208,7 +208,7 @@ struct vcpu_vmx {
struct loaded_vmcs vmcs01; struct loaded_vmcs vmcs01;
struct loaded_vmcs *loaded_vmcs; struct loaded_vmcs *loaded_vmcs;
struct loaded_vmcs *loaded_cpu_state; struct loaded_vmcs *loaded_cpu_state;
bool __launched; /* temporary, used in vmx_vcpu_run */
struct msr_autoload { struct msr_autoload {
struct vmx_msrs guest; struct vmx_msrs guest;
struct vmx_msrs host; struct vmx_msrs host;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment