Commit ec2cff6c authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/vgic-invlpir into kvmarm-master/next

* kvm-arm64/vgic-invlpir:
  : .
  : Implement MMIO-based LPI invalidation for vGICv3.
  : .
  KVM: arm64: vgic-v3: Advertise GICR_CTLR.{IR, CES} as a new GICD_IIDR revision
  KVM: arm64: vgic-v3: Implement MMIO-based LPI invalidation
  KVM: arm64: vgic-v3: Expose GICR_CTLR.RWP when disabling LPIs
  irqchip/gic-v3: Exposes bit values for GICR_CTLR.{IR, CES}
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 3b8e21e3 49a1a2c7
...@@ -319,7 +319,12 @@ int vgic_init(struct kvm *kvm) ...@@ -319,7 +319,12 @@ int vgic_init(struct kvm *kvm)
vgic_debug_init(kvm); vgic_debug_init(kvm);
dist->implementation_rev = 2; /*
* If userspace didn't set the GIC implementation revision,
* default to the latest and greatest. You know want it.
*/
if (!dist->implementation_rev)
dist->implementation_rev = KVM_VGIC_IMP_REV_LATEST;
dist->initialized = true; dist->initialized = true;
out: out:
......
...@@ -683,7 +683,7 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its, ...@@ -683,7 +683,7 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
if (!vcpu) if (!vcpu)
return E_ITS_INT_UNMAPPED_INTERRUPT; return E_ITS_INT_UNMAPPED_INTERRUPT;
if (!vcpu->arch.vgic_cpu.lpis_enabled) if (!vgic_lpis_enabled(vcpu))
return -EBUSY; return -EBUSY;
vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq); vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
...@@ -1272,6 +1272,11 @@ static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its, ...@@ -1272,6 +1272,11 @@ static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
return 0; return 0;
} }
int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq)
{
return update_lpi_config(kvm, irq, NULL, true);
}
/* /*
* The INV command syncs the configuration bits from the memory table. * The INV command syncs the configuration bits from the memory table.
* Must be called with the its_lock mutex held. * Must be called with the its_lock mutex held.
...@@ -1288,7 +1293,41 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its, ...@@ -1288,7 +1293,41 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
if (!ite) if (!ite)
return E_ITS_INV_UNMAPPED_INTERRUPT; return E_ITS_INV_UNMAPPED_INTERRUPT;
return update_lpi_config(kvm, ite->irq, NULL, true); return vgic_its_inv_lpi(kvm, ite->irq);
}
/**
* vgic_its_invall - invalidate all LPIs targetting a given vcpu
* @vcpu: the vcpu for which the RD is targetted by an invalidation
*
* Contrary to the INVALL command, this targets a RD instead of a
* collection, and we don't need to hold the its_lock, since no ITS is
* involved here.
*/
int vgic_its_invall(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
int irq_count, i = 0;
u32 *intids;
irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
if (irq_count < 0)
return irq_count;
for (i = 0; i < irq_count; i++) {
struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intids[i]);
if (!irq)
continue;
update_lpi_config(kvm, irq, vcpu, false);
vgic_put_irq(kvm, irq);
}
kfree(intids);
if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
return 0;
} }
/* /*
...@@ -1305,32 +1344,13 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its, ...@@ -1305,32 +1344,13 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
u32 coll_id = its_cmd_get_collection(its_cmd); u32 coll_id = its_cmd_get_collection(its_cmd);
struct its_collection *collection; struct its_collection *collection;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct vgic_irq *irq;
u32 *intids;
int irq_count, i;
collection = find_collection(its, coll_id); collection = find_collection(its, coll_id);
if (!its_is_collection_mapped(collection)) if (!its_is_collection_mapped(collection))
return E_ITS_INVALL_UNMAPPED_COLLECTION; return E_ITS_INVALL_UNMAPPED_COLLECTION;
vcpu = kvm_get_vcpu(kvm, collection->target_addr); vcpu = kvm_get_vcpu(kvm, collection->target_addr);
vgic_its_invall(vcpu);
irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
if (irq_count < 0)
return irq_count;
for (i = 0; i < irq_count; i++) {
irq = vgic_get_irq(kvm, NULL, intids[i]);
if (!irq)
continue;
update_lpi_config(kvm, irq, vcpu, false);
vgic_put_irq(kvm, irq);
}
kfree(intids);
if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
return 0; return 0;
} }
......
...@@ -73,9 +73,13 @@ static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu, ...@@ -73,9 +73,13 @@ static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len, gpa_t addr, unsigned int len,
unsigned long val) unsigned long val)
{ {
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
u32 reg;
switch (addr & 0x0c) { switch (addr & 0x0c) {
case GIC_DIST_IIDR: case GIC_DIST_IIDR:
if (val != vgic_mmio_read_v2_misc(vcpu, addr, len)) reg = vgic_mmio_read_v2_misc(vcpu, addr, len);
if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK)
return -EINVAL; return -EINVAL;
/* /*
...@@ -87,8 +91,16 @@ static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu, ...@@ -87,8 +91,16 @@ static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
* migration from old kernels to new kernels with legacy * migration from old kernels to new kernels with legacy
* userspace. * userspace.
*/ */
reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg);
switch (reg) {
case KVM_VGIC_IMP_REV_2:
case KVM_VGIC_IMP_REV_3:
vcpu->kvm->arch.vgic.v2_groups_user_writable = true; vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
dist->implementation_rev = reg;
return 0; return 0;
default:
return -EINVAL;
}
} }
vgic_mmio_write_v2_misc(vcpu, addr, len, val); vgic_mmio_write_v2_misc(vcpu, addr, len, val);
......
...@@ -155,13 +155,27 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu, ...@@ -155,13 +155,27 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
unsigned long val) unsigned long val)
{ {
struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
u32 reg;
switch (addr & 0x0c) { switch (addr & 0x0c) {
case GICD_TYPER2: case GICD_TYPER2:
case GICD_IIDR:
if (val != vgic_mmio_read_v3_misc(vcpu, addr, len)) if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
return -EINVAL; return -EINVAL;
return 0; return 0;
case GICD_IIDR:
reg = vgic_mmio_read_v3_misc(vcpu, addr, len);
if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK)
return -EINVAL;
reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg);
switch (reg) {
case KVM_VGIC_IMP_REV_2:
case KVM_VGIC_IMP_REV_3:
dist->implementation_rev = reg;
return 0;
default:
return -EINVAL;
}
case GICD_CTLR: case GICD_CTLR:
/* Not a GICv4.1? No HW SGIs */ /* Not a GICv4.1? No HW SGIs */
if (!kvm_vgic_global_state.has_gicv4_1) if (!kvm_vgic_global_state.has_gicv4_1)
...@@ -221,34 +235,58 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, ...@@ -221,34 +235,58 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
bool vgic_lpis_enabled(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
return atomic_read(&vgic_cpu->ctlr) == GICR_CTLR_ENABLE_LPIS;
}
static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu, static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len) gpa_t addr, unsigned int len)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
unsigned long val;
return vgic_cpu->lpis_enabled ? GICR_CTLR_ENABLE_LPIS : 0; val = atomic_read(&vgic_cpu->ctlr);
} if (vgic_get_implementation_rev(vcpu) >= KVM_VGIC_IMP_REV_3)
val |= GICR_CTLR_IR | GICR_CTLR_CES;
return val;
}
static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu, static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len, gpa_t addr, unsigned int len,
unsigned long val) unsigned long val)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
bool was_enabled = vgic_cpu->lpis_enabled; u32 ctlr;
if (!vgic_has_its(vcpu->kvm)) if (!vgic_has_its(vcpu->kvm))
return; return;
vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS; if (!(val & GICR_CTLR_ENABLE_LPIS)) {
/*
* Don't disable if RWP is set, as there already an
* ongoing disable. Funky guest...
*/
ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr,
GICR_CTLR_ENABLE_LPIS,
GICR_CTLR_RWP);
if (ctlr != GICR_CTLR_ENABLE_LPIS)
return;
if (was_enabled && !vgic_cpu->lpis_enabled) {
vgic_flush_pending_lpis(vcpu); vgic_flush_pending_lpis(vcpu);
vgic_its_invalidate_cache(vcpu->kvm); vgic_its_invalidate_cache(vcpu->kvm);
} atomic_set_release(&vgic_cpu->ctlr, 0);
} else {
ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr, 0,
GICR_CTLR_ENABLE_LPIS);
if (ctlr != 0)
return;
if (!was_enabled && vgic_cpu->lpis_enabled)
vgic_enable_lpis(vcpu); vgic_enable_lpis(vcpu);
}
} }
static bool vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu *vcpu) static bool vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu *vcpu)
...@@ -478,11 +516,10 @@ static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu, ...@@ -478,11 +516,10 @@ static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
unsigned long val) unsigned long val)
{ {
struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
u64 old_propbaser, propbaser; u64 old_propbaser, propbaser;
/* Storing a value with LPIs already enabled is undefined */ /* Storing a value with LPIs already enabled is undefined */
if (vgic_cpu->lpis_enabled) if (vgic_lpis_enabled(vcpu))
return; return;
do { do {
...@@ -513,7 +550,7 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu, ...@@ -513,7 +550,7 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
u64 old_pendbaser, pendbaser; u64 old_pendbaser, pendbaser;
/* Storing a value with LPIs already enabled is undefined */ /* Storing a value with LPIs already enabled is undefined */
if (vgic_cpu->lpis_enabled) if (vgic_lpis_enabled(vcpu))
return; return;
do { do {
...@@ -525,6 +562,63 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu, ...@@ -525,6 +562,63 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
pendbaser) != old_pendbaser); pendbaser) != old_pendbaser);
} }
static unsigned long vgic_mmio_read_sync(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
{
return !!atomic_read(&vcpu->arch.vgic_cpu.syncr_busy);
}
static void vgic_set_rdist_busy(struct kvm_vcpu *vcpu, bool busy)
{
if (busy) {
atomic_inc(&vcpu->arch.vgic_cpu.syncr_busy);
smp_mb__after_atomic();
} else {
smp_mb__before_atomic();
atomic_dec(&vcpu->arch.vgic_cpu.syncr_busy);
}
}
static void vgic_mmio_write_invlpi(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
struct vgic_irq *irq;
/*
* If the guest wrote only to the upper 32bit part of the
* register, drop the write on the floor, as it is only for
* vPEs (which we don't support for obvious reasons).
*
* Also discard the access if LPIs are not enabled.
*/
if ((addr & 4) || !vgic_lpis_enabled(vcpu))
return;
vgic_set_rdist_busy(vcpu, true);
irq = vgic_get_irq(vcpu->kvm, NULL, lower_32_bits(val));
if (irq) {
vgic_its_inv_lpi(vcpu->kvm, irq);
vgic_put_irq(vcpu->kvm, irq);
}
vgic_set_rdist_busy(vcpu, false);
}
static void vgic_mmio_write_invall(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
/* See vgic_mmio_write_invlpi() for the early return rationale */
if ((addr & 4) || !vgic_lpis_enabled(vcpu))
return;
vgic_set_rdist_busy(vcpu, true);
vgic_its_invall(vcpu);
vgic_set_rdist_busy(vcpu, false);
}
/* /*
* The GICv3 per-IRQ registers are split to control PPIs and SGIs in the * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
* redistributors, while SPIs are covered by registers in the distributor * redistributors, while SPIs are covered by registers in the distributor
...@@ -630,6 +724,15 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = { ...@@ -630,6 +724,15 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER, REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8, vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_INVLPIR,
vgic_mmio_read_raz, vgic_mmio_write_invlpi, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_INVALLR,
vgic_mmio_read_raz, vgic_mmio_write_invall, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_SYNCR,
vgic_mmio_read_sync, vgic_mmio_write_wi, 4,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_IDREGS, REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48, vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
......
...@@ -98,6 +98,11 @@ ...@@ -98,6 +98,11 @@
#define DEBUG_SPINLOCK_BUG_ON(p) #define DEBUG_SPINLOCK_BUG_ON(p)
#endif #endif
static inline u32 vgic_get_implementation_rev(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->arch.vgic.implementation_rev;
}
/* Requires the irq_lock to be held by the caller. */ /* Requires the irq_lock to be held by the caller. */
static inline bool irq_is_pending(struct vgic_irq *irq) static inline bool irq_is_pending(struct vgic_irq *irq)
{ {
...@@ -308,6 +313,7 @@ static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size) ...@@ -308,6 +313,7 @@ static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size)
(base < d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE); (base < d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE);
} }
bool vgic_lpis_enabled(struct kvm_vcpu *vcpu);
int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr); int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr);
int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its, int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
u32 devid, u32 eventid, struct vgic_irq **irq); u32 devid, u32 eventid, struct vgic_irq **irq);
...@@ -317,6 +323,10 @@ void vgic_lpi_translation_cache_init(struct kvm *kvm); ...@@ -317,6 +323,10 @@ void vgic_lpi_translation_cache_init(struct kvm *kvm);
void vgic_lpi_translation_cache_destroy(struct kvm *kvm); void vgic_lpi_translation_cache_destroy(struct kvm *kvm);
void vgic_its_invalidate_cache(struct kvm *kvm); void vgic_its_invalidate_cache(struct kvm *kvm);
/* GICv4.1 MMIO interface */
int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq);
int vgic_its_invall(struct kvm_vcpu *vcpu);
bool vgic_supports_direct_msis(struct kvm *kvm); bool vgic_supports_direct_msis(struct kvm *kvm);
int vgic_v4_init(struct kvm *kvm); int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm); void vgic_v4_teardown(struct kvm *kvm);
......
...@@ -231,6 +231,9 @@ struct vgic_dist { ...@@ -231,6 +231,9 @@ struct vgic_dist {
/* Implementation revision as reported in the GICD_IIDR */ /* Implementation revision as reported in the GICD_IIDR */
u32 implementation_rev; u32 implementation_rev;
#define KVM_VGIC_IMP_REV_2 2 /* GICv2 restorable groups */
#define KVM_VGIC_IMP_REV_3 3 /* GICv3 GICR_CTLR.{IW,CES,RWP} */
#define KVM_VGIC_IMP_REV_LATEST KVM_VGIC_IMP_REV_3
/* Userspace can write to GICv2 IGROUPR */ /* Userspace can write to GICv2 IGROUPR */
bool v2_groups_user_writable; bool v2_groups_user_writable;
...@@ -344,11 +347,12 @@ struct vgic_cpu { ...@@ -344,11 +347,12 @@ struct vgic_cpu {
struct vgic_io_device rd_iodev; struct vgic_io_device rd_iodev;
struct vgic_redist_region *rdreg; struct vgic_redist_region *rdreg;
u32 rdreg_index; u32 rdreg_index;
atomic_t syncr_busy;
/* Contains the attributes and gpa of the LPI pending tables. */ /* Contains the attributes and gpa of the LPI pending tables. */
u64 pendbaser; u64 pendbaser;
/* GICR_CTLR.{ENABLE_LPIS,RWP} */
bool lpis_enabled; atomic_t ctlr;
/* Cache guest priority bits */ /* Cache guest priority bits */
u32 num_pri_bits; u32 num_pri_bits;
......
...@@ -127,6 +127,8 @@ ...@@ -127,6 +127,8 @@
#define GICR_PIDR2 GICD_PIDR2 #define GICR_PIDR2 GICD_PIDR2
#define GICR_CTLR_ENABLE_LPIS (1UL << 0) #define GICR_CTLR_ENABLE_LPIS (1UL << 0)
#define GICR_CTLR_CES (1UL << 1)
#define GICR_CTLR_IR (1UL << 2)
#define GICR_CTLR_RWP (1UL << 3) #define GICR_CTLR_RWP (1UL << 3)
#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) #define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment