Commit 481c9ee8 authored by Oliver Upton's avatar Oliver Upton Committed by Marc Zyngier

KVM: arm64: vgic-its: Get rid of the lpi_list_lock

The last genuine use case for the lpi_list_lock was the global LPI
translation cache, which has been removed in favor of a per-ITS xarray.
Remove a layer from the locking puzzle by getting rid of it.

vgic_add_lpi() still has a critical section that needs to protect
against the insertion of other LPIs; change it to take the LPI xarray's
xa_lock to retain this property.
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240422200158.2606761-13-oliver.upton@linux.devSigned-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent ec39bbfd
...@@ -53,7 +53,6 @@ void kvm_vgic_early_init(struct kvm *kvm) ...@@ -53,7 +53,6 @@ void kvm_vgic_early_init(struct kvm *kvm)
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
raw_spin_lock_init(&dist->lpi_list_lock);
xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ); xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
} }
......
...@@ -69,7 +69,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -69,7 +69,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
irq->target_vcpu = vcpu; irq->target_vcpu = vcpu;
irq->group = 1; irq->group = 1;
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); xa_lock_irqsave(&dist->lpi_xa, flags);
/* /*
* There could be a race with another vgic_add_lpi(), so we need to * There could be a race with another vgic_add_lpi(), so we need to
...@@ -84,14 +84,14 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -84,14 +84,14 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
goto out_unlock; goto out_unlock;
} }
ret = xa_err(xa_store(&dist->lpi_xa, intid, irq, 0)); ret = xa_err(__xa_store(&dist->lpi_xa, intid, irq, 0));
if (ret) { if (ret) {
xa_release(&dist->lpi_xa, intid); xa_release(&dist->lpi_xa, intid);
kfree(irq); kfree(irq);
} }
out_unlock: out_unlock:
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); xa_unlock_irqrestore(&dist->lpi_xa, flags);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -29,7 +29,6 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { ...@@ -29,7 +29,6 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* its->cmd_lock (mutex) * its->cmd_lock (mutex)
* its->its_lock (mutex) * its->its_lock (mutex)
* vgic_cpu->ap_list_lock must be taken with IRQs disabled * vgic_cpu->ap_list_lock must be taken with IRQs disabled
* kvm->lpi_list_lock must be taken with IRQs disabled
* vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled * vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
* vgic_irq->irq_lock must be taken with IRQs disabled * vgic_irq->irq_lock must be taken with IRQs disabled
* *
......
...@@ -280,9 +280,6 @@ struct vgic_dist { ...@@ -280,9 +280,6 @@ struct vgic_dist {
*/ */
u64 propbaser; u64 propbaser;
/* Protects the lpi_list. */
raw_spinlock_t lpi_list_lock;
#define LPI_XA_MARK_DEBUG_ITER XA_MARK_0 #define LPI_XA_MARK_DEBUG_ITER XA_MARK_0
struct xarray lpi_xa; struct xarray lpi_xa;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment