Commit dc87275f authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Move (most) SMM hflags modifications into kvm_smm_changed()

Move the core of SMM hflags modifications into kvm_smm_changed() and use
kvm_smm_changed() in enter_smm().  Clear HF_SMM_INSIDE_NMI_MASK for
leaving SMM but do not set it for entering SMM.  If the vCPU is executing
outside of SMM, the flag should unequivocally be cleared, e.g. this
technically fixes a benign bug where the flag could be left set after
KVM_SET_VCPU_EVENTS, but the reverse is not true as NMI blocking depends
on pre-SMM state or userspace input.

Note, this adds an extra kvm_mmu_reset_context() to enter_smm().  The
extra/early reset isn't strictly necessary, and in a way can never be
necessary since the vCPU/MMU context is in a half-baked state until the
final context reset at the end of the function.  But, enter_smm() is not
a hot path, and exploding on an invalid root_hpa is probably better than
having a stale SMM flag in the MMU role; it's at least no worse.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210609185619.992058-6-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent fa75e08b
...@@ -4532,7 +4532,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -4532,7 +4532,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
memset(&events->reserved, 0, sizeof(events->reserved)); memset(&events->reserved, 0, sizeof(events->reserved));
} }
static void kvm_smm_changed(struct kvm_vcpu *vcpu); static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm);
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events) struct kvm_vcpu_events *events)
...@@ -4592,13 +4592,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -4592,13 +4592,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.apic->sipi_vector = events->sipi_vector; vcpu->arch.apic->sipi_vector = events->sipi_vector;
if (events->flags & KVM_VCPUEVENT_VALID_SMM) { if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm)
if (events->smi.smm) kvm_smm_changed(vcpu, events->smi.smm);
vcpu->arch.hflags |= HF_SMM_MASK;
else
vcpu->arch.hflags &= ~HF_SMM_MASK;
kvm_smm_changed(vcpu);
}
vcpu->arch.smi_pending = events->smi.pending; vcpu->arch.smi_pending = events->smi.pending;
...@@ -7218,8 +7213,7 @@ static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt) ...@@ -7218,8 +7213,7 @@ static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt)
{ {
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); kvm_smm_changed(vcpu, false);
kvm_smm_changed(vcpu);
} }
static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
...@@ -7548,9 +7542,13 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, ...@@ -7548,9 +7542,13 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
static int complete_emulated_mmio(struct kvm_vcpu *vcpu); static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu); static int complete_emulated_pio(struct kvm_vcpu *vcpu);
static void kvm_smm_changed(struct kvm_vcpu *vcpu) static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
{ {
if (!(vcpu->arch.hflags & HF_SMM_MASK)) { if (entering_smm) {
vcpu->arch.hflags |= HF_SMM_MASK;
} else {
vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
/* This is a good place to trace that we are exiting SMM. */ /* This is a good place to trace that we are exiting SMM. */
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false); trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
...@@ -9022,7 +9020,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) ...@@ -9022,7 +9020,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
*/ */
static_call(kvm_x86_pre_enter_smm)(vcpu, buf); static_call(kvm_x86_pre_enter_smm)(vcpu, buf);
vcpu->arch.hflags |= HF_SMM_MASK; kvm_smm_changed(vcpu, true);
kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
if (static_call(kvm_x86_get_nmi_mask)(vcpu)) if (static_call(kvm_x86_get_nmi_mask)(vcpu))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment