Commit 5e1746d6 authored by Nadav Har'El's avatar Nadav Har'El Committed by Avi Kivity

KVM: nVMX: Allow setting the VMXE bit in CR4

This patch allows the guest to enable the VMXE bit in CR4, which is a
prerequisite to running VMXON.

Whether to allow setting the VMXE bit now depends on the architecture (svm
or vmx), so its checking has moved to kvm_x86_ops->set_cr4(). This function
now returns an int: If kvm_x86_ops->set_cr4() returns 1, __kvm_set_cr4()
will also return 1, and this will cause kvm_set_cr4() will throw a #GP.

Turning on the VMXE bit is allowed only when the nested VMX feature is
enabled, and turning it off is forbidden after a vmxon.
Signed-off-by: default avatarNadav Har'El <nyh@il.ibm.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent ec378aee
...@@ -555,7 +555,7 @@ struct kvm_x86_ops { ...@@ -555,7 +555,7 @@ struct kvm_x86_ops {
void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
......
...@@ -1496,11 +1496,14 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -1496,11 +1496,14 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
update_cr0_intercept(svm); update_cr0_intercept(svm);
} }
static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ {
unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE; unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
if (cr4 & X86_CR4_VMXE)
return 1;
if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
svm_flush_tlb(vcpu); svm_flush_tlb(vcpu);
...@@ -1510,6 +1513,7 @@ static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -1510,6 +1513,7 @@ static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
cr4 |= host_cr4_mce; cr4 |= host_cr4_mce;
to_svm(vcpu)->vmcb->save.cr4 = cr4; to_svm(vcpu)->vmcb->save.cr4 = cr4;
mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
return 0;
} }
static void svm_set_segment(struct kvm_vcpu *vcpu, static void svm_set_segment(struct kvm_vcpu *vcpu,
......
...@@ -2121,7 +2121,7 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu) ...@@ -2121,7 +2121,7 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
(unsigned long *)&vcpu->arch.regs_dirty); (unsigned long *)&vcpu->arch.regs_dirty);
} }
static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
unsigned long cr0, unsigned long cr0,
...@@ -2219,11 +2219,23 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -2219,11 +2219,23 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vmcs_writel(GUEST_CR3, guest_cr3); vmcs_writel(GUEST_CR3, guest_cr3);
} }
static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ {
unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ? unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
if (cr4 & X86_CR4_VMXE) {
/*
* To use VMXON (and later other VMX instructions), a guest
* must first be able to turn on cr4.VMXE (see handle_vmon()).
* So basically the check on whether to allow nested VMX
* is here.
*/
if (!nested_vmx_allowed(vcpu))
return 1;
} else if (to_vmx(vcpu)->nested.vmxon)
return 1;
vcpu->arch.cr4 = cr4; vcpu->arch.cr4 = cr4;
if (enable_ept) { if (enable_ept) {
if (!is_paging(vcpu)) { if (!is_paging(vcpu)) {
...@@ -2236,6 +2248,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -2236,6 +2248,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vmcs_writel(CR4_READ_SHADOW, cr4); vmcs_writel(CR4_READ_SHADOW, cr4);
vmcs_writel(GUEST_CR4, hw_cr4); vmcs_writel(GUEST_CR4, hw_cr4);
return 0;
} }
static void vmx_get_segment(struct kvm_vcpu *vcpu, static void vmx_get_segment(struct kvm_vcpu *vcpu,
......
...@@ -615,11 +615,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -615,11 +615,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
kvm_read_cr3(vcpu))) kvm_read_cr3(vcpu)))
return 1; return 1;
if (cr4 & X86_CR4_VMXE) if (kvm_x86_ops->set_cr4(vcpu, cr4))
return 1; return 1;
kvm_x86_ops->set_cr4(vcpu, cr4);
if ((cr4 ^ old_cr4) & pdptr_bits) if ((cr4 ^ old_cr4) & pdptr_bits)
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment