Commit 730dca42 authored by Jan Kiszka's avatar Jan Kiszka Committed by Gleb Natapov

KVM: x86: Rework request for immediate exit

The VMX implementation of enable_irq_window raised
KVM_REQ_IMMEDIATE_EXIT after we checked it in vcpu_enter_guest. This
caused infinite loops on vmentry. Fix it by letting enable_irq_window
signal the need for an immediate exit via its return value and drop
KVM_REQ_IMMEDIATE_EXIT.

This issue only affects nested VMX scenarios.
Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent 6614c7d0
...@@ -694,7 +694,7 @@ struct kvm_x86_ops { ...@@ -694,7 +694,7 @@ struct kvm_x86_ops {
bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
void (*enable_nmi_window)(struct kvm_vcpu *vcpu); void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
void (*enable_irq_window)(struct kvm_vcpu *vcpu); int (*enable_irq_window)(struct kvm_vcpu *vcpu);
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
int (*vm_has_apicv)(struct kvm *kvm); int (*vm_has_apicv)(struct kvm *kvm);
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
......
...@@ -3632,7 +3632,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) ...@@ -3632,7 +3632,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
static void enable_irq_window(struct kvm_vcpu *vcpu) static int enable_irq_window(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
...@@ -3646,6 +3646,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) ...@@ -3646,6 +3646,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
svm_set_vintr(svm); svm_set_vintr(svm);
svm_inject_irq(svm, 0x0); svm_inject_irq(svm, 0x0);
} }
return 0;
} }
static void enable_nmi_window(struct kvm_vcpu *vcpu) static void enable_nmi_window(struct kvm_vcpu *vcpu)
......
...@@ -4398,22 +4398,23 @@ static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) ...@@ -4398,22 +4398,23 @@ static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
PIN_BASED_NMI_EXITING; PIN_BASED_NMI_EXITING;
} }
static void enable_irq_window(struct kvm_vcpu *vcpu) static int enable_irq_window(struct kvm_vcpu *vcpu)
{ {
u32 cpu_based_vm_exec_control; u32 cpu_based_vm_exec_control;
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
/* /*
* We get here if vmx_interrupt_allowed() said we can't * We get here if vmx_interrupt_allowed() said we can't
* inject to L1 now because L2 must run. Ask L2 to exit * inject to L1 now because L2 must run. The caller will have
* right after entry, so we can inject to L1 more promptly. * to make L2 exit right after entry, so we can inject to L1
* more promptly.
*/ */
kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); return -EBUSY;
return;
}
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
return 0;
} }
static void enable_nmi_window(struct kvm_vcpu *vcpu) static void enable_nmi_window(struct kvm_vcpu *vcpu)
......
...@@ -5692,7 +5692,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -5692,7 +5692,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
int r; int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window; vcpu->run->request_interrupt_window;
bool req_immediate_exit = 0; bool req_immediate_exit = false;
if (vcpu->requests) { if (vcpu->requests) {
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
...@@ -5734,8 +5734,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -5734,8 +5734,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
record_steal_time(vcpu); record_steal_time(vcpu);
if (kvm_check_request(KVM_REQ_NMI, vcpu)) if (kvm_check_request(KVM_REQ_NMI, vcpu))
process_nmi(vcpu); process_nmi(vcpu);
req_immediate_exit =
kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
if (kvm_check_request(KVM_REQ_PMU, vcpu)) if (kvm_check_request(KVM_REQ_PMU, vcpu))
kvm_handle_pmu_event(vcpu); kvm_handle_pmu_event(vcpu);
if (kvm_check_request(KVM_REQ_PMI, vcpu)) if (kvm_check_request(KVM_REQ_PMI, vcpu))
...@@ -5757,7 +5755,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -5757,7 +5755,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (vcpu->arch.nmi_pending) if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu); kvm_x86_ops->enable_nmi_window(vcpu);
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
kvm_x86_ops->enable_irq_window(vcpu); req_immediate_exit =
kvm_x86_ops->enable_irq_window(vcpu) != 0;
if (kvm_lapic_enabled(vcpu)) { if (kvm_lapic_enabled(vcpu)) {
/* /*
......
...@@ -119,14 +119,13 @@ static inline bool is_error_page(struct page *page) ...@@ -119,14 +119,13 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_APF_HALT 12 #define KVM_REQ_APF_HALT 12
#define KVM_REQ_STEAL_UPDATE 13 #define KVM_REQ_STEAL_UPDATE 13
#define KVM_REQ_NMI 14 #define KVM_REQ_NMI 14
#define KVM_REQ_IMMEDIATE_EXIT 15 #define KVM_REQ_PMU 15
#define KVM_REQ_PMU 16 #define KVM_REQ_PMI 16
#define KVM_REQ_PMI 17 #define KVM_REQ_WATCHDOG 17
#define KVM_REQ_WATCHDOG 18 #define KVM_REQ_MASTERCLOCK_UPDATE 18
#define KVM_REQ_MASTERCLOCK_UPDATE 19 #define KVM_REQ_MCLOCK_INPROGRESS 19
#define KVM_REQ_MCLOCK_INPROGRESS 20 #define KVM_REQ_EPR_EXIT 20
#define KVM_REQ_EPR_EXIT 21 #define KVM_REQ_SCAN_IOAPIC 21
#define KVM_REQ_SCAN_IOAPIC 22
#define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment