Commit a20ed54d authored by Yang Zhang's avatar Yang Zhang Committed by Marcelo Tosatti

KVM: VMX: Add the deliver posted interrupt algorithm

Only deliver the posted interrupt when target vcpu is running
and there is no previous interrupt pending in pir.
Signed-off-by: default avatarYang Zhang <yang.z.zhang@Intel.com>
Reviewed-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent cf9e65b7
...@@ -701,6 +701,8 @@ struct kvm_x86_ops { ...@@ -701,6 +701,8 @@ struct kvm_x86_ops {
void (*hwapic_isr_update)(struct kvm *kvm, int isr); void (*hwapic_isr_update)(struct kvm *kvm, int isr);
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*get_tdp_level)(void); int (*get_tdp_level)(void);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
......
...@@ -318,6 +318,19 @@ static u8 count_vectors(void *bitmap) ...@@ -318,6 +318,19 @@ static u8 count_vectors(void *bitmap)
return count; return count;
} }
void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
{
u32 i, pir_val;
struct kvm_lapic *apic = vcpu->arch.apic;
for (i = 0; i <= 7; i++) {
pir_val = xchg(&pir[i], 0);
if (pir_val)
*((u32 *)(apic->regs + APIC_IRR + i * 0x10)) |= pir_val;
}
}
EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic) static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
{ {
apic->irr_pending = true; apic->irr_pending = true;
......
...@@ -54,6 +54,7 @@ u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu); ...@@ -54,6 +54,7 @@ u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
void kvm_apic_set_version(struct kvm_vcpu *vcpu); void kvm_apic_set_version(struct kvm_vcpu *vcpu);
void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr); void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr);
void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
......
...@@ -3577,6 +3577,11 @@ static void svm_hwapic_isr_update(struct kvm *kvm, int isr) ...@@ -3577,6 +3577,11 @@ static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
return; return;
} }
static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
return;
}
static int svm_nmi_allowed(struct kvm_vcpu *vcpu) static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
...@@ -4305,6 +4310,7 @@ static struct kvm_x86_ops svm_x86_ops = { ...@@ -4305,6 +4310,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.vm_has_apicv = svm_vm_has_apicv, .vm_has_apicv = svm_vm_has_apicv,
.load_eoi_exitmap = svm_load_eoi_exitmap, .load_eoi_exitmap = svm_load_eoi_exitmap,
.hwapic_isr_update = svm_hwapic_isr_update, .hwapic_isr_update = svm_hwapic_isr_update,
.sync_pir_to_irr = svm_sync_pir_to_irr,
.set_tss_addr = svm_set_tss_addr, .set_tss_addr = svm_set_tss_addr,
.get_tdp_level = get_npt_level, .get_tdp_level = get_npt_level,
......
...@@ -375,6 +375,23 @@ struct pi_desc { ...@@ -375,6 +375,23 @@ struct pi_desc {
u32 rsvd[7]; u32 rsvd[7];
} __aligned(64); } __aligned(64);
static bool pi_test_and_set_on(struct pi_desc *pi_desc)
{
return test_and_set_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
{
return test_and_clear_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
{
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
}
struct vcpu_vmx { struct vcpu_vmx {
struct kvm_vcpu vcpu; struct kvm_vcpu vcpu;
unsigned long host_rsp; unsigned long host_rsp;
...@@ -639,6 +656,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu, ...@@ -639,6 +656,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg); struct kvm_segment *var, int seg);
static bool guest_state_valid(struct kvm_vcpu *vcpu); static bool guest_state_valid(struct kvm_vcpu *vcpu);
static u32 vmx_segment_access_rights(struct kvm_segment *var); static u32 vmx_segment_access_rights(struct kvm_segment *var);
static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs); static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
...@@ -2846,8 +2864,11 @@ static __init int hardware_setup(void) ...@@ -2846,8 +2864,11 @@ static __init int hardware_setup(void)
if (enable_apicv) if (enable_apicv)
kvm_x86_ops->update_cr8_intercept = NULL; kvm_x86_ops->update_cr8_intercept = NULL;
else else {
kvm_x86_ops->hwapic_irr_update = NULL; kvm_x86_ops->hwapic_irr_update = NULL;
kvm_x86_ops->deliver_posted_interrupt = NULL;
kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
}
if (nested) if (nested)
nested_vmx_setup_ctls_msrs(); nested_vmx_setup_ctls_msrs();
...@@ -3908,6 +3929,45 @@ static int vmx_vm_has_apicv(struct kvm *kvm) ...@@ -3908,6 +3929,45 @@ static int vmx_vm_has_apicv(struct kvm *kvm)
return enable_apicv && irqchip_in_kernel(kvm); return enable_apicv && irqchip_in_kernel(kvm);
} }
/*
* Send interrupt to vcpu via posted interrupt way.
* 1. If target vcpu is running(non-root mode), send posted interrupt
* notification to vcpu and hardware will sync PIR to vIRR atomically.
* 2. If target vcpu isn't running(root mode), kick it to pick up the
* interrupt from PIR in next vmentry.
*/
static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int r;
if (pi_test_and_set_pir(vector, &vmx->pi_desc))
return;
r = pi_test_and_set_on(&vmx->pi_desc);
kvm_make_request(KVM_REQ_EVENT, vcpu);
if (!r && (vcpu->mode == IN_GUEST_MODE))
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
POSTED_INTR_VECTOR);
else
kvm_vcpu_kick(vcpu);
}
static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!pi_test_and_clear_on(&vmx->pi_desc))
return;
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
}
static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu)
{
return;
}
/* /*
* Set up the vmcs's constant host-state fields, i.e., host-state fields that * Set up the vmcs's constant host-state fields, i.e., host-state fields that
* will not change in the lifetime of the guest. * will not change in the lifetime of the guest.
...@@ -7784,6 +7844,8 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -7784,6 +7844,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.load_eoi_exitmap = vmx_load_eoi_exitmap, .load_eoi_exitmap = vmx_load_eoi_exitmap,
.hwapic_irr_update = vmx_hwapic_irr_update, .hwapic_irr_update = vmx_hwapic_irr_update,
.hwapic_isr_update = vmx_hwapic_isr_update, .hwapic_isr_update = vmx_hwapic_isr_update,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
.set_tss_addr = vmx_set_tss_addr, .set_tss_addr = vmx_set_tss_addr,
.get_tdp_level = get_ept_level, .get_tdp_level = get_ept_level,
......
...@@ -1671,6 +1671,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) ...@@ -1671,6 +1671,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
smp_send_reschedule(cpu); smp_send_reschedule(cpu);
put_cpu(); put_cpu();
} }
EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
#endif /* !CONFIG_S390 */ #endif /* !CONFIG_S390 */
void kvm_resched(struct kvm_vcpu *vcpu) void kvm_resched(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment