Commit 4444dfe4 authored by Tom Lendacky's avatar Tom Lendacky Committed by Paolo Bonzini

KVM: SVM: Add NMI support for an SEV-ES guest

The GHCB specification defines how NMIs are to be handled for an SEV-ES
guest. To detect the completion of an NMI the hypervisor must not
intercept the IRET instruction (because a #VC while running the NMI will
issue an IRET) and, instead, must receive an NMI Complete exit event from
the guest.

Update the KVM support for detecting the completion of NMIs in the guest
to follow the GHCB specification. When an SEV-ES guest is active, the
IRET instruction will no longer be intercepted. Now, when the NMI Complete
exit event is received, the iret_interception() function will be called
to simulate the completion of the NMI.
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Message-Id: <5ea3dd69b8d4396cefdc9048ebc1ab7caa70a847.1607620209.git.thomas.lendacky@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ed02b213
...@@ -1449,6 +1449,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) ...@@ -1449,6 +1449,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
if (!ghcb_sw_scratch_is_valid(ghcb)) if (!ghcb_sw_scratch_is_valid(ghcb))
goto vmgexit_err; goto vmgexit_err;
break; break;
case SVM_VMGEXIT_NMI_COMPLETE:
case SVM_VMGEXIT_UNSUPPORTED_EVENT: case SVM_VMGEXIT_UNSUPPORTED_EVENT:
break; break;
default: default:
...@@ -1770,6 +1771,9 @@ int sev_handle_vmgexit(struct vcpu_svm *svm) ...@@ -1770,6 +1771,9 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
control->exit_info_2, control->exit_info_2,
svm->ghcb_sa); svm->ghcb_sa);
break; break;
case SVM_VMGEXIT_NMI_COMPLETE:
ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
break;
case SVM_VMGEXIT_UNSUPPORTED_EVENT: case SVM_VMGEXIT_UNSUPPORTED_EVENT:
vcpu_unimpl(&svm->vcpu, vcpu_unimpl(&svm->vcpu,
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n", "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
......
...@@ -2319,9 +2319,11 @@ static int cpuid_interception(struct vcpu_svm *svm) ...@@ -2319,9 +2319,11 @@ static int cpuid_interception(struct vcpu_svm *svm)
static int iret_interception(struct vcpu_svm *svm) static int iret_interception(struct vcpu_svm *svm)
{ {
++svm->vcpu.stat.nmi_window_exits; ++svm->vcpu.stat.nmi_window_exits;
svm_clr_intercept(svm, INTERCEPT_IRET);
svm->vcpu.arch.hflags |= HF_IRET_MASK; svm->vcpu.arch.hflags |= HF_IRET_MASK;
if (!sev_es_guest(svm->vcpu.kvm)) {
svm_clr_intercept(svm, INTERCEPT_IRET);
svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
}
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
return 1; return 1;
} }
...@@ -3302,6 +3304,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu) ...@@ -3302,6 +3304,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
vcpu->arch.hflags |= HF_NMI_MASK; vcpu->arch.hflags |= HF_NMI_MASK;
if (!sev_es_guest(svm->vcpu.kvm))
svm_set_intercept(svm, INTERCEPT_IRET); svm_set_intercept(svm, INTERCEPT_IRET);
++vcpu->stat.nmi_injections; ++vcpu->stat.nmi_injections;
} }
...@@ -3386,9 +3389,11 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) ...@@ -3386,9 +3389,11 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
if (masked) { if (masked) {
svm->vcpu.arch.hflags |= HF_NMI_MASK; svm->vcpu.arch.hflags |= HF_NMI_MASK;
if (!sev_es_guest(svm->vcpu.kvm))
svm_set_intercept(svm, INTERCEPT_IRET); svm_set_intercept(svm, INTERCEPT_IRET);
} else { } else {
svm->vcpu.arch.hflags &= ~HF_NMI_MASK; svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
if (!sev_es_guest(svm->vcpu.kvm))
svm_clr_intercept(svm, INTERCEPT_IRET); svm_clr_intercept(svm, INTERCEPT_IRET);
} }
} }
...@@ -3567,8 +3572,9 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) ...@@ -3567,8 +3572,9 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
* If we've made progress since setting HF_IRET_MASK, we've * If we've made progress since setting HF_IRET_MASK, we've
* executed an IRET and can allow NMI injection. * executed an IRET and can allow NMI injection.
*/ */
if ((svm->vcpu.arch.hflags & HF_IRET_MASK) if ((svm->vcpu.arch.hflags & HF_IRET_MASK) &&
&& kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { (sev_es_guest(svm->vcpu.kvm) ||
kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip)) {
svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment