Commit bcad1c39 authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Ben Hutchings

KVM: nVMX: fix lifetime issues for vmcs02

commit 4fa7734c upstream.

free_nested needs the loaded_vmcs to be valid if it is a vmcs02, in
order to detach it from the shadow vmcs.  However, this is not
available anymore after commit 26a865f4 (KVM: VMX: fix use after
free of vmx->loaded_vmcs, 2014-01-03).

Revert that patch, and fix its problem by forcing a vmcs01 as the
active VMCS before freeing all the nested VMX state.
Reported-by: default avatarWanpeng Li <wanpeng.li@linux.intel.com>
Tested-by: default avatarWanpeng Li <wanpeng.li@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent ac5ec090
...@@ -4999,22 +4999,27 @@ static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) ...@@ -4999,22 +4999,27 @@ static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
/* /*
* Free all VMCSs saved for this vcpu, except the one pointed by * Free all VMCSs saved for this vcpu, except the one pointed by
* vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs
* currently used, if running L2), and vmcs01 when running L2. * must be &vmx->vmcs01.
*/ */
static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
{ {
struct vmcs02_list *item, *n; struct vmcs02_list *item, *n;
WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01);
list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
if (vmx->loaded_vmcs != &item->vmcs02) /*
free_loaded_vmcs(&item->vmcs02); * Something will leak if the above WARN triggers. Better than
* a use-after-free.
*/
if (vmx->loaded_vmcs == &item->vmcs02)
continue;
free_loaded_vmcs(&item->vmcs02);
list_del(&item->list); list_del(&item->list);
kfree(item); kfree(item);
vmx->nested.vmcs02_num--;
} }
vmx->nested.vmcs02_num = 0;
if (vmx->loaded_vmcs != &vmx->vmcs01)
free_loaded_vmcs(&vmx->vmcs01);
} }
/* /*
...@@ -6307,13 +6312,31 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6307,13 +6312,31 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#undef R #undef R
#undef Q #undef Q
static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu;
if (vmx->loaded_vmcs == &vmx->vmcs01)
return;
cpu = get_cpu();
vmx->loaded_vmcs = &vmx->vmcs01;
vmx_vcpu_put(vcpu);
vmx_vcpu_load(vcpu, cpu);
vcpu->cpu = cpu;
put_cpu();
}
static void vmx_free_vcpu(struct kvm_vcpu *vcpu) static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
free_vpid(vmx); free_vpid(vmx);
free_loaded_vmcs(vmx->loaded_vmcs); leave_guest_mode(vcpu);
vmx_load_vmcs01(vcpu);
free_nested(vmx); free_nested(vmx);
free_loaded_vmcs(vmx->loaded_vmcs);
kfree(vmx->guest_msrs); kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vmx); kmem_cache_free(kvm_vcpu_cache, vmx);
...@@ -7059,18 +7082,12 @@ void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) ...@@ -7059,18 +7082,12 @@ void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static void nested_vmx_vmexit(struct kvm_vcpu *vcpu) static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu;
struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
leave_guest_mode(vcpu); leave_guest_mode(vcpu);
prepare_vmcs12(vcpu, vmcs12); prepare_vmcs12(vcpu, vmcs12);
cpu = get_cpu(); vmx_load_vmcs01(vcpu);
vmx->loaded_vmcs = &vmx->vmcs01;
vmx_vcpu_put(vcpu);
vmx_vcpu_load(vcpu, cpu);
vcpu->cpu = cpu;
put_cpu();
/* if no vmcs02 cache requested, remove the one we used */ /* if no vmcs02 cache requested, remove the one we used */
if (VMCS02_POOL_SIZE == 0) if (VMCS02_POOL_SIZE == 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment