Commit 3d5bdae8 authored by Liran Alon's avatar Liran Alon Committed by Paolo Bonzini

KVM: nVMX: Use correct VPID02 when emulating L1 INVVPID

In case L0 didn't allocate vmx->nested.vpid02 for L2,
vmcs02->vpid is set to vmx->vpid.
Consider this case when emulating L1 INVVPID in L0.
Reviewed-by: default avatarNikita Leshenko <nikita.leshchenko@oracle.com>
Reviewed-by: default avatarMark Kanda <mark.kanda@oracle.com>
Signed-off-by: default avatarLiran Alon <liran.alon@oracle.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1438921c
...@@ -9003,6 +9003,13 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -9003,6 +9003,13 @@ static int handle_invept(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
}
static int handle_invvpid(struct kvm_vcpu *vcpu) static int handle_invvpid(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
...@@ -9014,6 +9021,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -9014,6 +9021,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
u64 vpid; u64 vpid;
u64 gla; u64 gla;
} operand; } operand;
u16 vpid02;
if (!(vmx->nested.msrs.secondary_ctls_high & if (!(vmx->nested.msrs.secondary_ctls_high &
SECONDARY_EXEC_ENABLE_VPID) || SECONDARY_EXEC_ENABLE_VPID) ||
...@@ -9053,6 +9061,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -9053,6 +9061,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
vpid02 = nested_get_vpid02(vcpu);
switch (type) { switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
if (!operand.vpid || if (!operand.vpid ||
...@@ -9061,12 +9070,11 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -9061,12 +9070,11 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
if (cpu_has_vmx_invvpid_individual_addr() && if (cpu_has_vmx_invvpid_individual_addr()) {
vmx->nested.vpid02) {
__invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
vmx->nested.vpid02, operand.gla); vpid02, operand.gla);
} else } else
__vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); __vmx_flush_tlb(vcpu, vpid02, true);
break; break;
case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_CONTEXT:
case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
...@@ -9075,10 +9083,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -9075,10 +9083,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
__vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); __vmx_flush_tlb(vcpu, vpid02, true);
break; break;
case VMX_VPID_EXTENT_ALL_CONTEXT: case VMX_VPID_EXTENT_ALL_CONTEXT:
__vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); __vmx_flush_tlb(vcpu, vpid02, true);
break; break;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment