Commit c9a7953f authored by Jan Kiszka's avatar Jan Kiszka Committed by Paolo Bonzini

KVM: x86: Remove return code from enable_irq/nmi_window

It's no longer possible to enter enable_irq_window in guest mode when
L1 intercepts external interrupts and we are entering L2. This is now
caught in vcpu_enter_guest. So we can remove the check from the VMX
version of enable_irq_window, thus the need to return an error code from
both enable_irq_window and enable_nmi_window.
Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 220c5672
...@@ -729,8 +729,8 @@ struct kvm_x86_ops { ...@@ -729,8 +729,8 @@ struct kvm_x86_ops {
int (*nmi_allowed)(struct kvm_vcpu *vcpu); int (*nmi_allowed)(struct kvm_vcpu *vcpu);
bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
int (*enable_nmi_window)(struct kvm_vcpu *vcpu); void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
int (*enable_irq_window)(struct kvm_vcpu *vcpu); void (*enable_irq_window)(struct kvm_vcpu *vcpu);
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
int (*vm_has_apicv)(struct kvm *kvm); int (*vm_has_apicv)(struct kvm *kvm);
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
......
...@@ -3650,7 +3650,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) ...@@ -3650,7 +3650,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
static int enable_irq_window(struct kvm_vcpu *vcpu) static void enable_irq_window(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
...@@ -3664,16 +3664,15 @@ static int enable_irq_window(struct kvm_vcpu *vcpu) ...@@ -3664,16 +3664,15 @@ static int enable_irq_window(struct kvm_vcpu *vcpu)
svm_set_vintr(svm); svm_set_vintr(svm);
svm_inject_irq(svm, 0x0); svm_inject_irq(svm, 0x0);
} }
return 0;
} }
static int enable_nmi_window(struct kvm_vcpu *vcpu) static void enable_nmi_window(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
== HF_NMI_MASK) == HF_NMI_MASK)
return 0; /* IRET will cause a vm exit */ return; /* IRET will cause a vm exit */
/* /*
* Something prevents NMI from been injected. Single step over possible * Something prevents NMI from been injected. Single step over possible
...@@ -3682,7 +3681,6 @@ static int enable_nmi_window(struct kvm_vcpu *vcpu) ...@@ -3682,7 +3681,6 @@ static int enable_nmi_window(struct kvm_vcpu *vcpu)
svm->nmi_singlestep = true; svm->nmi_singlestep = true;
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
update_db_bp_intercept(vcpu); update_db_bp_intercept(vcpu);
return 0;
} }
static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
......
...@@ -4514,39 +4514,28 @@ static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) ...@@ -4514,39 +4514,28 @@ static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
PIN_BASED_NMI_EXITING; PIN_BASED_NMI_EXITING;
} }
static int enable_irq_window(struct kvm_vcpu *vcpu) static void enable_irq_window(struct kvm_vcpu *vcpu)
{ {
u32 cpu_based_vm_exec_control; u32 cpu_based_vm_exec_control;
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
/*
* We get here if vmx_interrupt_allowed() said we can't
* inject to L1 now because L2 must run. The caller will have
* to make L2 exit right after entry, so we can inject to L1
* more promptly.
*/
return -EBUSY;
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
return 0;
} }
static int enable_nmi_window(struct kvm_vcpu *vcpu) static void enable_nmi_window(struct kvm_vcpu *vcpu)
{ {
u32 cpu_based_vm_exec_control; u32 cpu_based_vm_exec_control;
if (!cpu_has_virtual_nmis()) if (!cpu_has_virtual_nmis() ||
return enable_irq_window(vcpu); vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
enable_irq_window(vcpu);
if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) return;
return enable_irq_window(vcpu); }
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
return 0;
} }
static void vmx_inject_irq(struct kvm_vcpu *vcpu) static void vmx_inject_irq(struct kvm_vcpu *vcpu)
......
...@@ -5976,11 +5976,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -5976,11 +5976,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
req_immediate_exit = true; req_immediate_exit = true;
/* enable NMI/IRQ window open exits if needed */ /* enable NMI/IRQ window open exits if needed */
else if (vcpu->arch.nmi_pending) else if (vcpu->arch.nmi_pending)
req_immediate_exit = kvm_x86_ops->enable_nmi_window(vcpu);
kvm_x86_ops->enable_nmi_window(vcpu) != 0;
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
req_immediate_exit = kvm_x86_ops->enable_irq_window(vcpu);
kvm_x86_ops->enable_irq_window(vcpu) != 0;
if (kvm_lapic_enabled(vcpu)) { if (kvm_lapic_enabled(vcpu)) {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment