Commit 3842d135 authored by Avi Kivity's avatar Avi Kivity

KVM: Check for pending events before attempting injection

Instead of blindly attempting to inject an event before each guest entry,
check for a possible event first in vcpu->requests.  Sites that can trigger
event injection are modified to set KVM_REQ_EVENT:

- interrupt, nmi window opening
- ppr updates
- i8259 output changes
- local apic irr changes
- rflags updates
- gif flag set
- event set on exit

This improves non-injecting entry performance, and sets the stage for
non-atomic injection.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent b0bc3ee2
...@@ -67,6 +67,7 @@ static void pic_unlock(struct kvm_pic *s) ...@@ -67,6 +67,7 @@ static void pic_unlock(struct kvm_pic *s)
if (!found) if (!found)
return; return;
kvm_make_request(KVM_REQ_EVENT, found);
kvm_vcpu_kick(found); kvm_vcpu_kick(found);
} }
} }
......
...@@ -259,9 +259,10 @@ static inline int apic_find_highest_isr(struct kvm_lapic *apic) ...@@ -259,9 +259,10 @@ static inline int apic_find_highest_isr(struct kvm_lapic *apic)
static void apic_update_ppr(struct kvm_lapic *apic) static void apic_update_ppr(struct kvm_lapic *apic)
{ {
u32 tpr, isrv, ppr; u32 tpr, isrv, ppr, old_ppr;
int isr; int isr;
old_ppr = apic_get_reg(apic, APIC_PROCPRI);
tpr = apic_get_reg(apic, APIC_TASKPRI); tpr = apic_get_reg(apic, APIC_TASKPRI);
isr = apic_find_highest_isr(apic); isr = apic_find_highest_isr(apic);
isrv = (isr != -1) ? isr : 0; isrv = (isr != -1) ? isr : 0;
...@@ -274,7 +275,10 @@ static void apic_update_ppr(struct kvm_lapic *apic) ...@@ -274,7 +275,10 @@ static void apic_update_ppr(struct kvm_lapic *apic)
apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x", apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
apic, ppr, isr, isrv); apic, ppr, isr, isrv);
if (old_ppr != ppr) {
apic_set_reg(apic, APIC_PROCPRI, ppr); apic_set_reg(apic, APIC_PROCPRI, ppr);
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
}
} }
static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
...@@ -391,6 +395,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, ...@@ -391,6 +395,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
break; break;
} }
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
break; break;
...@@ -416,6 +421,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, ...@@ -416,6 +421,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
"INIT on a runnable vcpu %d\n", "INIT on a runnable vcpu %d\n",
vcpu->vcpu_id); vcpu->vcpu_id);
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
} else { } else {
apic_debug("Ignoring de-assert INIT to vcpu %d\n", apic_debug("Ignoring de-assert INIT to vcpu %d\n",
...@@ -430,6 +436,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, ...@@ -430,6 +436,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
result = 1; result = 1;
vcpu->arch.sipi_vector = vector; vcpu->arch.sipi_vector = vector;
vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED; vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
} }
break; break;
...@@ -475,6 +482,7 @@ static void apic_set_eoi(struct kvm_lapic *apic) ...@@ -475,6 +482,7 @@ static void apic_set_eoi(struct kvm_lapic *apic)
trigger_mode = IOAPIC_EDGE_TRIG; trigger_mode = IOAPIC_EDGE_TRIG;
if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)) if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI))
kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
} }
static void apic_send_ipi(struct kvm_lapic *apic) static void apic_send_ipi(struct kvm_lapic *apic)
...@@ -1152,6 +1160,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) ...@@ -1152,6 +1160,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
update_divide_count(apic); update_divide_count(apic);
start_apic_timer(apic); start_apic_timer(apic);
apic->irr_pending = true; apic->irr_pending = true;
kvm_make_request(KVM_REQ_EVENT, vcpu);
} }
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
......
...@@ -2371,6 +2371,7 @@ static int stgi_interception(struct vcpu_svm *svm) ...@@ -2371,6 +2371,7 @@ static int stgi_interception(struct vcpu_svm *svm)
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu); skip_emulated_instruction(&svm->vcpu);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
enable_gif(svm); enable_gif(svm);
...@@ -2763,6 +2764,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm) ...@@ -2763,6 +2764,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
{ {
struct kvm_run *kvm_run = svm->vcpu.run; struct kvm_run *kvm_run = svm->vcpu.run;
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
svm_clear_vintr(svm); svm_clear_vintr(svm);
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
/* /*
...@@ -3209,8 +3211,10 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) ...@@ -3209,8 +3211,10 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
svm->int3_injected = 0; svm->int3_injected = 0;
if (svm->vcpu.arch.hflags & HF_IRET_MASK) if (svm->vcpu.arch.hflags & HF_IRET_MASK) {
svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
}
svm->vcpu.arch.nmi_injected = false; svm->vcpu.arch.nmi_injected = false;
kvm_clear_exception_queue(&svm->vcpu); kvm_clear_exception_queue(&svm->vcpu);
...@@ -3219,6 +3223,8 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) ...@@ -3219,6 +3223,8 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
if (!(exitintinfo & SVM_EXITINTINFO_VALID)) if (!(exitintinfo & SVM_EXITINTINFO_VALID))
return; return;
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
......
...@@ -3327,6 +3327,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu) ...@@ -3327,6 +3327,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
{ {
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 1; return 1;
} }
...@@ -3339,6 +3340,8 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu) ...@@ -3339,6 +3340,8 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
kvm_make_request(KVM_REQ_EVENT, vcpu);
++vcpu->stat.irq_window_exits; ++vcpu->stat.irq_window_exits;
/* /*
...@@ -3595,6 +3598,7 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu) ...@@ -3595,6 +3598,7 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu)
cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
++vcpu->stat.nmi_window_exits; ++vcpu->stat.nmi_window_exits;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 1; return 1;
} }
...@@ -3828,6 +3832,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) ...@@ -3828,6 +3832,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
if (!idtv_info_valid) if (!idtv_info_valid)
return; return;
kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
......
...@@ -284,6 +284,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, ...@@ -284,6 +284,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
u32 prev_nr; u32 prev_nr;
int class1, class2; int class1, class2;
kvm_make_request(KVM_REQ_EVENT, vcpu);
if (!vcpu->arch.exception.pending) { if (!vcpu->arch.exception.pending) {
queue: queue:
vcpu->arch.exception.pending = true; vcpu->arch.exception.pending = true;
...@@ -356,6 +358,7 @@ void kvm_propagate_fault(struct kvm_vcpu *vcpu) ...@@ -356,6 +358,7 @@ void kvm_propagate_fault(struct kvm_vcpu *vcpu)
void kvm_inject_nmi(struct kvm_vcpu *vcpu) void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{ {
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.nmi_pending = 1; vcpu->arch.nmi_pending = 1;
} }
EXPORT_SYMBOL_GPL(kvm_inject_nmi); EXPORT_SYMBOL_GPL(kvm_inject_nmi);
...@@ -2418,6 +2421,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, ...@@ -2418,6 +2421,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
return -ENXIO; return -ENXIO;
kvm_queue_interrupt(vcpu, irq->irq, false); kvm_queue_interrupt(vcpu, irq->irq, false);
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0; return 0;
} }
...@@ -2571,6 +2575,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -2571,6 +2575,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
vcpu->arch.sipi_vector = events->sipi_vector; vcpu->arch.sipi_vector = events->sipi_vector;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0; return 0;
} }
...@@ -4329,6 +4335,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -4329,6 +4335,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility); toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
...@@ -4998,6 +5005,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -4998,6 +5005,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
int r; int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window; vcpu->run->request_interrupt_window;
bool req_event;
if (vcpu->requests) { if (vcpu->requests) {
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
...@@ -5045,8 +5053,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -5045,8 +5053,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
local_irq_disable(); local_irq_disable();
req_event = kvm_check_request(KVM_REQ_EVENT, vcpu);
if (!atomic_read(&vcpu->guest_mode) || vcpu->requests if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
|| need_resched() || signal_pending(current)) { || need_resched() || signal_pending(current)) {
if (req_event)
kvm_make_request(KVM_REQ_EVENT, vcpu);
atomic_set(&vcpu->guest_mode, 0); atomic_set(&vcpu->guest_mode, 0);
smp_wmb(); smp_wmb();
local_irq_enable(); local_irq_enable();
...@@ -5055,6 +5067,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -5055,6 +5067,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out; goto out;
} }
if (req_event || req_int_win) {
inject_pending_event(vcpu); inject_pending_event(vcpu);
/* enable NMI/IRQ window open exits if needed */ /* enable NMI/IRQ window open exits if needed */
...@@ -5067,6 +5080,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -5067,6 +5080,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
update_cr8_intercept(vcpu); update_cr8_intercept(vcpu);
kvm_lapic_sync_to_vapic(vcpu); kvm_lapic_sync_to_vapic(vcpu);
} }
}
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
...@@ -5305,6 +5319,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -5305,6 +5319,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.exception.pending = false; vcpu->arch.exception.pending = false;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0; return 0;
} }
...@@ -5368,6 +5384,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, ...@@ -5368,6 +5384,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
vcpu->arch.mp_state = mp_state->mp_state; vcpu->arch.mp_state = mp_state->mp_state;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0; return 0;
} }
...@@ -5389,6 +5406,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, ...@@ -5389,6 +5406,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
return EMULATE_DONE; return EMULATE_DONE;
} }
EXPORT_SYMBOL_GPL(kvm_task_switch); EXPORT_SYMBOL_GPL(kvm_task_switch);
...@@ -5459,6 +5477,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -5459,6 +5477,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
!is_protmode(vcpu)) !is_protmode(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0; return 0;
} }
...@@ -5691,6 +5711,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -5691,6 +5711,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
vcpu->arch.dr6 = DR6_FIXED_1; vcpu->arch.dr6 = DR6_FIXED_1;
vcpu->arch.dr7 = DR7_FIXED_1; vcpu->arch.dr7 = DR7_FIXED_1;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return kvm_x86_ops->vcpu_reset(vcpu); return kvm_x86_ops->vcpu_reset(vcpu);
} }
...@@ -6001,6 +6023,7 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) ...@@ -6001,6 +6023,7 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
rflags |= X86_EFLAGS_TF; rflags |= X86_EFLAGS_TF;
kvm_x86_ops->set_rflags(vcpu, rflags); kvm_x86_ops->set_rflags(vcpu, rflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
} }
EXPORT_SYMBOL_GPL(kvm_set_rflags); EXPORT_SYMBOL_GPL(kvm_set_rflags);
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#define KVM_REQ_KVMCLOCK_UPDATE 8 #define KVM_REQ_KVMCLOCK_UPDATE 8
#define KVM_REQ_KICK 9 #define KVM_REQ_KICK 9
#define KVM_REQ_DEACTIVATE_FPU 10 #define KVM_REQ_DEACTIVATE_FPU 10
#define KVM_REQ_EVENT 11
#define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment