Commit 6906e06d authored by Krish Sadhukhan's avatar Krish Sadhukhan Committed by Paolo Bonzini

KVM: nSVM: Add missing checks for reserved bits to svm_set_nested_state()

The path for SVM_SET_NESTED_STATE needs to have the same checks for the CPU
registers, as we have in the VMRUN path for a nested guest. This patch adds
those missing checks to svm_set_nested_state().
Suggested-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarKrish Sadhukhan <krish.sadhukhan@oracle.com>
Message-Id: <20201006190654.32305-3-krish.sadhukhan@oracle.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c08f390a
......@@ -246,29 +246,51 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
return true;
}
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
static bool nested_vmcb_check_cr3_cr4(struct vcpu_svm *svm,
struct vmcb_save_area *save)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
bool vmcb12_lma;
if ((vmcb12->save.efer & EFER_SVME) == 0)
/*
* These checks are also performed by KVM_SET_SREGS,
* except that EFER.LMA is not checked by SVM against
* CR0.PG && EFER.LME.
*/
if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
if (!(save->cr4 & X86_CR4_PAE) || !(save->cr0 & X86_CR0_PE) ||
kvm_vcpu_is_illegal_gpa(vcpu, save->cr3))
return false;
}
if (((vmcb12->save.cr0 & X86_CR0_CD) == 0) && (vmcb12->save.cr0 & X86_CR0_NW))
return kvm_is_valid_cr4(&svm->vcpu, save->cr4);
}
/* Common checks that apply to both L1 and L2 state. */
static bool nested_vmcb_valid_sregs(struct vcpu_svm *svm,
struct vmcb_save_area *save)
{
if (!(save->efer & EFER_SVME))
return false;
if (!kvm_dr6_valid(vmcb12->save.dr6) || !kvm_dr7_valid(vmcb12->save.dr7))
if (((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
(save->cr0 & ~0xffffffffULL))
return false;
vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
if (!kvm_dr6_valid(save->dr6) || !kvm_dr7_valid(save->dr7))
return false;
if (vmcb12_lma) {
if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
!(vmcb12->save.cr0 & X86_CR0_PE) ||
kvm_vcpu_is_illegal_gpa(vcpu, vmcb12->save.cr3))
if (!nested_vmcb_check_cr3_cr4(svm, save))
return false;
}
if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
if (!kvm_valid_efer(&svm->vcpu, save->efer))
return false;
return true;
}
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
{
if (!nested_vmcb_valid_sregs(svm, &vmcb12->save))
return false;
return nested_vmcb_check_controls(&vmcb12->control);
......@@ -1234,9 +1256,11 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
/*
* Validate host state saved from before VMRUN (see
* nested_svm_check_permissions).
* TODO: validate reserved bits for all saved state.
*/
if (!(save->cr0 & X86_CR0_PG))
if (!(save->cr0 & X86_CR0_PG) ||
!(save->cr0 & X86_CR0_PE) ||
(save->rflags & X86_EFLAGS_VM) ||
!nested_vmcb_valid_sregs(svm, save))
goto out_free;
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment