Commit 66f7b72e authored by Julian Stecklina's avatar Julian Stecklina Committed by Gleb Natapov

KVM: x86: Make register state after reset conform to specification

VMX behaves now as SVM wrt to FPU initialization. Code has been moved to
generic code path. General-purpose registers are now cleared on reset and
INIT.  SVM code properly initializes EDX.
Signed-off-by: default avatarJulian Stecklina <jsteckli@os.inf.tu-dresden.de>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent 2b3c5cbc
...@@ -661,6 +661,7 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) ...@@ -661,6 +661,7 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
} else } else
*eax = *ebx = *ecx = *edx = 0; *eax = *ebx = *ecx = *edx = 0;
} }
EXPORT_SYMBOL_GPL(kvm_cpuid);
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
{ {
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "mmu.h" #include "mmu.h"
#include "kvm_cache_regs.h" #include "kvm_cache_regs.h"
#include "x86.h" #include "x86.h"
#include "cpuid.h"
#include <linux/module.h> #include <linux/module.h>
#include <linux/mod_devicetable.h> #include <linux/mod_devicetable.h>
...@@ -1193,6 +1194,8 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1193,6 +1194,8 @@ static void init_vmcb(struct vcpu_svm *svm)
static int svm_vcpu_reset(struct kvm_vcpu *vcpu) static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
u32 dummy;
u32 eax = 1;
init_vmcb(svm); init_vmcb(svm);
...@@ -1201,8 +1204,9 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -1201,8 +1204,9 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12; svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8; svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
} }
vcpu->arch.regs_avail = ~0;
vcpu->arch.regs_dirty = ~0; kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
return 0; return 0;
} }
...@@ -1259,10 +1263,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -1259,10 +1263,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm->asid_generation = 0; svm->asid_generation = 0;
init_vmcb(svm); init_vmcb(svm);
err = fx_init(&svm->vcpu);
if (err)
goto free_page4;
svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
if (kvm_vcpu_is_bsp(&svm->vcpu)) if (kvm_vcpu_is_bsp(&svm->vcpu))
svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
...@@ -1271,8 +1271,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -1271,8 +1271,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
return &svm->vcpu; return &svm->vcpu;
free_page4:
__free_page(hsave_page);
free_page3: free_page3:
__free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
free_page2: free_page2:
......
...@@ -3918,8 +3918,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -3918,8 +3918,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
u64 msr; u64 msr;
int ret; int ret;
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
vmx->rmode.vm86_active = 0; vmx->rmode.vm86_active = 0;
vmx->soft_vnmi_blocked = 0; vmx->soft_vnmi_blocked = 0;
...@@ -3931,10 +3929,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -3931,10 +3929,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
msr |= MSR_IA32_APICBASE_BSP; msr |= MSR_IA32_APICBASE_BSP;
kvm_set_apic_base(&vmx->vcpu, msr); kvm_set_apic_base(&vmx->vcpu, msr);
ret = fx_init(&vmx->vcpu);
if (ret != 0)
goto out;
vmx_segment_cache_clear(vmx); vmx_segment_cache_clear(vmx);
seg_setup(VCPU_SREG_CS); seg_setup(VCPU_SREG_CS);
...@@ -3975,7 +3969,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -3975,7 +3969,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_rip_write(vcpu, 0xfff0); kvm_rip_write(vcpu, 0xfff0);
else else
kvm_rip_write(vcpu, 0); kvm_rip_write(vcpu, 0);
kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
vmcs_writel(GUEST_GDTR_BASE, 0); vmcs_writel(GUEST_GDTR_BASE, 0);
vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
...@@ -4025,7 +4018,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -4025,7 +4018,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
/* HACK: Don't enable emulation on guest boot/reset */ /* HACK: Don't enable emulation on guest boot/reset */
vmx->emulation_required = 0; vmx->emulation_required = 0;
out:
return ret; return ret;
} }
......
...@@ -6461,6 +6461,10 @@ static int kvm_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -6461,6 +6461,10 @@ static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_pmu_reset(vcpu); kvm_pmu_reset(vcpu);
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
vcpu->arch.regs_avail = ~0;
vcpu->arch.regs_dirty = ~0;
return kvm_x86_ops->vcpu_reset(vcpu); return kvm_x86_ops->vcpu_reset(vcpu);
} }
...@@ -6629,11 +6633,17 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -6629,11 +6633,17 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
goto fail_free_mce_banks; goto fail_free_mce_banks;
r = fx_init(vcpu);
if (r)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0; vcpu->arch.ia32_tsc_adjust_msr = 0x0;
kvm_async_pf_hash_reset(vcpu); kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu); kvm_pmu_init(vcpu);
return 0; return 0;
fail_free_wbinvd_dirty_mask:
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fail_free_mce_banks: fail_free_mce_banks:
kfree(vcpu->arch.mce_banks); kfree(vcpu->arch.mce_banks);
fail_free_lapic: fail_free_lapic:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment