Commit af061499 authored by Christoffer Dall's avatar Christoffer Dall Committed by Christoffer Dall

KVM: arm/arm64: vgic: Get rid of unnecessary process_maintenance operation

Since we always read back the LRs that we wrote to the guest and the
MISR and EISR registers simply provide a summary of the configuration of
the bits in the LRs, there is really no need to read back those status
registers and process them.  We might as well just signal the
notifyfd when folding the LR state and save some cycles in the process.
We now clear the underflow bit in the fold_lr_state functions as we only
need to clear this bit if we had used all the LRs, so this is as good a
place as any to do that work.
Reviewed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent 90cac1f5
...@@ -22,59 +22,17 @@ ...@@ -22,59 +22,17 @@
#include "vgic.h" #include "vgic.h"
/* void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
* Call this function to convert a u64 value to an unsigned long * bitmask
* in a way that works on both 32-bit and 64-bit LE and BE platforms.
*
* Warning: Calling this function may modify *val.
*/
static unsigned long *u64_to_bitmask(u64 *val)
{
#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
*val = (*val >> 32) | (*val << 32);
#endif
return (unsigned long *)val;
}
void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
{ {
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
if (cpuif->vgic_misr & GICH_MISR_EOI) { cpuif->vgic_hcr |= GICH_HCR_UIE;
u64 eisr = cpuif->vgic_eisr;
unsigned long *eisr_bmap = u64_to_bitmask(&eisr);
int lr;
for_each_set_bit(lr, eisr_bmap, kvm_vgic_global_state.nr_lr) {
u32 intid = cpuif->vgic_lr[lr] & GICH_LR_VIRTUALID;
WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
/* Only SPIs require notification */
if (vgic_valid_spi(vcpu->kvm, intid))
kvm_notify_acked_irq(vcpu->kvm, 0,
intid - VGIC_NR_PRIVATE_IRQS);
}
}
/* check and disable underflow maintenance IRQ */
cpuif->vgic_hcr &= ~GICH_HCR_UIE;
/*
* In the next iterations of the vcpu loop, if we sync the
* vgic state after flushing it, but before entering the guest
* (this happens for pending signals and vmid rollovers), then
* make sure we don't pick up any old maintenance interrupts
* here.
*/
cpuif->vgic_eisr = 0;
} }
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) static bool lr_signals_eoi_mi(u32 lr_val)
{ {
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) &&
!(lr_val & GICH_LR_HW);
cpuif->vgic_hcr |= GICH_HCR_UIE;
} }
/* /*
...@@ -89,11 +47,18 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -89,11 +47,18 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
int lr; int lr;
cpuif->vgic_hcr &= ~GICH_HCR_UIE;
for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) { for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
u32 val = cpuif->vgic_lr[lr]; u32 val = cpuif->vgic_lr[lr];
u32 intid = val & GICH_LR_VIRTUALID; u32 intid = val & GICH_LR_VIRTUALID;
struct vgic_irq *irq; struct vgic_irq *irq;
/* Notify fds when the guest EOI'ed a level-triggered SPI */
if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
kvm_notify_acked_irq(vcpu->kvm, 0,
intid - VGIC_NR_PRIVATE_IRQS);
irq = vgic_get_irq(vcpu->kvm, vcpu, intid); irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
spin_lock(&irq->irq_lock); spin_lock(&irq->irq_lock);
......
...@@ -21,50 +21,17 @@ ...@@ -21,50 +21,17 @@
#include "vgic.h" #include "vgic.h"
void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu) void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
{ {
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
u32 model = vcpu->kvm->arch.vgic.vgic_model;
if (cpuif->vgic_misr & ICH_MISR_EOI) {
unsigned long eisr_bmap = cpuif->vgic_eisr;
int lr;
for_each_set_bit(lr, &eisr_bmap, kvm_vgic_global_state.nr_lr) {
u32 intid;
u64 val = cpuif->vgic_lr[lr];
if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
intid = val & ICH_LR_VIRTUAL_ID_MASK;
else
intid = val & GICH_LR_VIRTUALID;
WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
/* Only SPIs require notification */
if (vgic_valid_spi(vcpu->kvm, intid))
kvm_notify_acked_irq(vcpu->kvm, 0,
intid - VGIC_NR_PRIVATE_IRQS);
}
/*
* In the next iterations of the vcpu loop, if we sync
* the vgic state after flushing it, but before
* entering the guest (this happens for pending
* signals and vmid rollovers), then make sure we
* don't pick up any old maintenance interrupts here.
*/
cpuif->vgic_eisr = 0;
}
cpuif->vgic_hcr &= ~ICH_HCR_UIE; cpuif->vgic_hcr |= ICH_HCR_UIE;
} }
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) static bool lr_signals_eoi_mi(u64 lr_val)
{ {
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
!(lr_val & ICH_LR_HW);
cpuif->vgic_hcr |= ICH_HCR_UIE;
} }
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
...@@ -73,6 +40,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -73,6 +40,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
u32 model = vcpu->kvm->arch.vgic.vgic_model; u32 model = vcpu->kvm->arch.vgic.vgic_model;
int lr; int lr;
cpuif->vgic_hcr &= ~ICH_HCR_UIE;
for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) { for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
u64 val = cpuif->vgic_lr[lr]; u64 val = cpuif->vgic_lr[lr];
u32 intid; u32 intid;
...@@ -82,6 +51,12 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -82,6 +51,12 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
intid = val & ICH_LR_VIRTUAL_ID_MASK; intid = val & ICH_LR_VIRTUAL_ID_MASK;
else else
intid = val & GICH_LR_VIRTUALID; intid = val & GICH_LR_VIRTUALID;
/* Notify fds when the guest EOI'ed a level-triggered IRQ */
if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
kvm_notify_acked_irq(vcpu->kvm, 0,
intid - VGIC_NR_PRIVATE_IRQS);
irq = vgic_get_irq(vcpu->kvm, vcpu, intid); irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
if (!irq) /* An LPI could have been unmapped. */ if (!irq) /* An LPI could have been unmapped. */
continue; continue;
......
...@@ -527,14 +527,6 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -527,14 +527,6 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
spin_unlock(&vgic_cpu->ap_list_lock); spin_unlock(&vgic_cpu->ap_list_lock);
} }
static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu)
{
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_process_maintenance(vcpu);
else
vgic_v3_process_maintenance(vcpu);
}
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
{ {
if (kvm_vgic_global_state.type == VGIC_V2) if (kvm_vgic_global_state.type == VGIC_V2)
...@@ -644,7 +636,6 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) ...@@ -644,7 +636,6 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
if (unlikely(!vgic_initialized(vcpu->kvm))) if (unlikely(!vgic_initialized(vcpu->kvm)))
return; return;
vgic_process_maintenance_interrupt(vcpu);
vgic_fold_lr_state(vcpu); vgic_fold_lr_state(vcpu);
vgic_prune_ap_list(vcpu); vgic_prune_ap_list(vcpu);
......
...@@ -112,7 +112,6 @@ void vgic_kick_vcpus(struct kvm *kvm); ...@@ -112,7 +112,6 @@ void vgic_kick_vcpus(struct kvm *kvm);
int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
phys_addr_t addr, phys_addr_t alignment); phys_addr_t addr, phys_addr_t alignment);
void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu);
void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
...@@ -141,7 +140,6 @@ static inline void vgic_get_irq_kref(struct vgic_irq *irq) ...@@ -141,7 +140,6 @@ static inline void vgic_get_irq_kref(struct vgic_irq *irq)
kref_get(&irq->refcount); kref_get(&irq->refcount);
} }
void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu);
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment