Commit 720f73b7 authored by Oliver Upton's avatar Oliver Upton Committed by Marc Zyngier

KVM: arm64: vgic-its: Walk LPI xarray in its_sync_lpi_pending_table()

The new LPI xarray makes it possible to walk the VM's LPIs without
holding a lock, meaning that vgic_copy_lpi_list() is no longer
necessary. Prepare for the deletion by walking the LPI xarray directly
in its_sync_lpi_pending_table().
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240422200158.2606761-3-oliver.upton@linux.devSigned-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent ea54dd37
...@@ -446,23 +446,18 @@ static u32 max_lpis_propbaser(u64 propbaser) ...@@ -446,23 +446,18 @@ static u32 max_lpis_propbaser(u64 propbaser)
static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
{ {
gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
unsigned long intid, flags;
struct vgic_irq *irq; struct vgic_irq *irq;
int last_byte_offset = -1; int last_byte_offset = -1;
int ret = 0; int ret = 0;
u32 *intids;
int nr_irqs, i;
unsigned long flags;
u8 pendmask; u8 pendmask;
nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids); xa_for_each(&dist->lpi_xa, intid, irq) {
if (nr_irqs < 0)
return nr_irqs;
for (i = 0; i < nr_irqs; i++) {
int byte_offset, bit_nr; int byte_offset, bit_nr;
byte_offset = intids[i] / BITS_PER_BYTE; byte_offset = intid / BITS_PER_BYTE;
bit_nr = intids[i] % BITS_PER_BYTE; bit_nr = intid % BITS_PER_BYTE;
/* /*
* For contiguously allocated LPIs chances are we just read * For contiguously allocated LPIs chances are we just read
...@@ -472,25 +467,23 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) ...@@ -472,25 +467,23 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
ret = kvm_read_guest_lock(vcpu->kvm, ret = kvm_read_guest_lock(vcpu->kvm,
pendbase + byte_offset, pendbase + byte_offset,
&pendmask, 1); &pendmask, 1);
if (ret) { if (ret)
kfree(intids);
return ret; return ret;
}
last_byte_offset = byte_offset; last_byte_offset = byte_offset;
} }
irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); irq = vgic_get_irq(vcpu->kvm, NULL, intid);
if (!irq) if (!irq)
continue; continue;
raw_spin_lock_irqsave(&irq->irq_lock, flags); raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->target_vcpu == vcpu)
irq->pending_latch = pendmask & (1U << bit_nr); irq->pending_latch = pendmask & (1U << bit_nr);
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
kfree(intids);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment