Commit 2eeb321f authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-arm-for-v4.8-rc3' of...

Merge tag 'kvm-arm-for-v4.8-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/ARM Fixes for v4.8-rc3

This tag contains the following fixes on top of v4.8-rc1:
 - ITS init issues
 - ITS error handling issues
 - ITS IRQ leakage fix
 - Plug a couple of ITS race conditions
 - An erratum workaround for timers
 - Some removal of misleading use of errors and comments
 - A fix for GICv3 on 32-bit guests
parents c95ba92a f7f6f2d9
...@@ -53,6 +53,7 @@ stable kernels. ...@@ -53,6 +53,7 @@ stable kernels.
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 | | ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
| ARM | Cortex-A57 | #852523 | N/A | | ARM | Cortex-A57 | #852523 | N/A |
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | | ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
| ARM | Cortex-A72 | #853709 | N/A |
| ARM | MMU-500 | #841119,#826419 | N/A | | ARM | MMU-500 | #841119,#826419 | N/A |
| | | | | | | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
......
...@@ -1309,7 +1309,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1309,7 +1309,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
smp_rmb(); smp_rmb();
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
if (is_error_pfn(pfn)) if (is_error_noslot_pfn(pfn))
return -EFAULT; return -EFAULT;
if (kvm_is_device_pfn(pfn)) { if (kvm_is_device_pfn(pfn)) {
......
...@@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) ...@@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
/* /*
* We must restore the 32-bit state before the sysregs, thanks * We must restore the 32-bit state before the sysregs, thanks
* to Cortex-A57 erratum #852523. * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
*/ */
__sysreg32_restore_state(vcpu); __sysreg32_restore_state(vcpu);
__sysreg_restore_guest_state(guest_ctxt); __sysreg_restore_guest_state(guest_ctxt);
......
...@@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* Architected system registers. * Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
* *
* We could trap ID_DFR0 and tell the guest we don't support performance
* monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
* NAKed, so it will read the PMCR anyway.
*
* Therefore we tell the guest we have 0 counters. Unfortunately, we
* must always support PMCCNTR (the cycle counter): we just RAZ/WI for
* all PM registers, which doesn't crash the guest kernel at least.
*
* Debug handling: We do trap most, if not all debug related system * Debug handling: We do trap most, if not all debug related system
* registers. The implementation is good enough to ensure that a guest * registers. The implementation is good enough to ensure that a guest
* can use these with minimal performance degradation. The drawback is * can use these with minimal performance degradation. The drawback is
...@@ -1360,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = { ...@@ -1360,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
/* ICC_SRE */ /* ICC_SRE */
{ Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
......
...@@ -337,6 +337,7 @@ ...@@ -337,6 +337,7 @@
*/ */
#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 #define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107
#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 #define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109
#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307
#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 #define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507
#define E_ITS_MAPD_DEVICE_OOR 0x010801 #define E_ITS_MAPD_DEVICE_OOR 0x010801
#define E_ITS_MAPC_PROCNUM_OOR 0x010902 #define E_ITS_MAPC_PROCNUM_OOR 0x010902
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
static struct timecounter *timecounter; static struct timecounter *timecounter;
static struct workqueue_struct *wqueue; static struct workqueue_struct *wqueue;
static unsigned int host_vtimer_irq; static unsigned int host_vtimer_irq;
static u32 host_vtimer_irq_flags;
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
{ {
...@@ -365,7 +366,7 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -365,7 +366,7 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
static void kvm_timer_init_interrupt(void *info) static void kvm_timer_init_interrupt(void *info)
{ {
enable_percpu_irq(host_vtimer_irq, 0); enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
} }
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
...@@ -432,6 +433,14 @@ int kvm_timer_hyp_init(void) ...@@ -432,6 +433,14 @@ int kvm_timer_hyp_init(void)
} }
host_vtimer_irq = info->virtual_irq; host_vtimer_irq = info->virtual_irq;
host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
host_vtimer_irq);
host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
}
err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler, err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
"kvm guest timer", kvm_get_running_vcpus()); "kvm guest timer", kvm_get_running_vcpus());
if (err) { if (err) {
......
...@@ -51,7 +51,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid) ...@@ -51,7 +51,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid)
irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL); irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
if (!irq) if (!irq)
return NULL; return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&irq->lpi_list); INIT_LIST_HEAD(&irq->lpi_list);
INIT_LIST_HEAD(&irq->ap_list); INIT_LIST_HEAD(&irq->ap_list);
...@@ -441,39 +441,63 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm, ...@@ -441,39 +441,63 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
* Find the target VCPU and the LPI number for a given devid/eventid pair * Find the target VCPU and the LPI number for a given devid/eventid pair
* and make this IRQ pending, possibly injecting it. * and make this IRQ pending, possibly injecting it.
* Must be called with the its_lock mutex held. * Must be called with the its_lock mutex held.
* Returns 0 on success, a positive error value for any ITS mapping
* related errors and negative error values for generic errors.
*/ */
static void vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
u32 devid, u32 eventid) u32 devid, u32 eventid)
{ {
struct kvm_vcpu *vcpu;
struct its_itte *itte; struct its_itte *itte;
if (!its->enabled) if (!its->enabled)
return; return -EBUSY;
itte = find_itte(its, devid, eventid); itte = find_itte(its, devid, eventid);
/* Triggering an unmapped IRQ gets silently dropped. */ if (!itte || !its_is_collection_mapped(itte->collection))
if (itte && its_is_collection_mapped(itte->collection)) { return E_ITS_INT_UNMAPPED_INTERRUPT;
struct kvm_vcpu *vcpu;
vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr); vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
if (vcpu && vcpu->arch.vgic_cpu.lpis_enabled) { if (!vcpu)
return E_ITS_INT_UNMAPPED_INTERRUPT;
if (!vcpu->arch.vgic_cpu.lpis_enabled)
return -EBUSY;
spin_lock(&itte->irq->irq_lock); spin_lock(&itte->irq->irq_lock);
itte->irq->pending = true; itte->irq->pending = true;
vgic_queue_irq_unlock(kvm, itte->irq); vgic_queue_irq_unlock(kvm, itte->irq);
}
} return 0;
}
static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
{
struct vgic_io_device *iodev;
if (dev->ops != &kvm_io_gic_ops)
return NULL;
iodev = container_of(dev, struct vgic_io_device, dev);
if (iodev->iodev_type != IODEV_ITS)
return NULL;
return iodev;
} }
/* /*
* Queries the KVM IO bus framework to get the ITS pointer from the given * Queries the KVM IO bus framework to get the ITS pointer from the given
* doorbell address. * doorbell address.
* We then call vgic_its_trigger_msi() with the decoded data. * We then call vgic_its_trigger_msi() with the decoded data.
* According to the KVM_SIGNAL_MSI API description returns 1 on success.
*/ */
int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi) int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
{ {
u64 address; u64 address;
struct kvm_io_device *kvm_io_dev; struct kvm_io_device *kvm_io_dev;
struct vgic_io_device *iodev; struct vgic_io_device *iodev;
int ret;
if (!vgic_has_its(kvm)) if (!vgic_has_its(kvm))
return -ENODEV; return -ENODEV;
...@@ -485,15 +509,28 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi) ...@@ -485,15 +509,28 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address); kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
if (!kvm_io_dev) if (!kvm_io_dev)
return -ENODEV; return -EINVAL;
iodev = container_of(kvm_io_dev, struct vgic_io_device, dev); iodev = vgic_get_its_iodev(kvm_io_dev);
if (!iodev)
return -EINVAL;
mutex_lock(&iodev->its->its_lock); mutex_lock(&iodev->its->its_lock);
vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data); ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
mutex_unlock(&iodev->its->its_lock); mutex_unlock(&iodev->its->its_lock);
if (ret < 0)
return ret;
/*
* KVM_SIGNAL_MSI demands a return value > 0 for success and 0
* if the guest has blocked the MSI. So we map any LPI mapping
* related error to that.
*/
if (ret)
return 0; return 0;
else
return 1;
} }
/* Requires the its_lock to be held. */ /* Requires the its_lock to be held. */
...@@ -502,6 +539,7 @@ static void its_free_itte(struct kvm *kvm, struct its_itte *itte) ...@@ -502,6 +539,7 @@ static void its_free_itte(struct kvm *kvm, struct its_itte *itte)
list_del(&itte->itte_list); list_del(&itte->itte_list);
/* This put matches the get in vgic_add_lpi. */ /* This put matches the get in vgic_add_lpi. */
if (itte->irq)
vgic_put_irq(kvm, itte->irq); vgic_put_irq(kvm, itte->irq);
kfree(itte); kfree(itte);
...@@ -697,6 +735,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, ...@@ -697,6 +735,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
struct its_device *device; struct its_device *device;
struct its_collection *collection, *new_coll = NULL; struct its_collection *collection, *new_coll = NULL;
int lpi_nr; int lpi_nr;
struct vgic_irq *irq;
device = find_its_device(its, device_id); device = find_its_device(its, device_id);
if (!device) if (!device)
...@@ -710,6 +749,10 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, ...@@ -710,6 +749,10 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser)) lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
return E_ITS_MAPTI_PHYSICALID_OOR; return E_ITS_MAPTI_PHYSICALID_OOR;
/* If there is an existing mapping, behavior is UNPREDICTABLE. */
if (find_itte(its, device_id, event_id))
return 0;
collection = find_collection(its, coll_id); collection = find_collection(its, coll_id);
if (!collection) { if (!collection) {
int ret = vgic_its_alloc_collection(its, &collection, coll_id); int ret = vgic_its_alloc_collection(its, &collection, coll_id);
...@@ -718,8 +761,6 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, ...@@ -718,8 +761,6 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
new_coll = collection; new_coll = collection;
} }
itte = find_itte(its, device_id, event_id);
if (!itte) {
itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL); itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
if (!itte) { if (!itte) {
if (new_coll) if (new_coll)
...@@ -729,11 +770,19 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, ...@@ -729,11 +770,19 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
itte->event_id = event_id; itte->event_id = event_id;
list_add_tail(&itte->itte_list, &device->itt_head); list_add_tail(&itte->itte_list, &device->itt_head);
}
itte->collection = collection; itte->collection = collection;
itte->lpi = lpi_nr; itte->lpi = lpi_nr;
itte->irq = vgic_add_lpi(kvm, lpi_nr);
irq = vgic_add_lpi(kvm, lpi_nr);
if (IS_ERR(irq)) {
if (new_coll)
vgic_its_free_collection(its, coll_id);
its_free_itte(kvm, itte);
return PTR_ERR(irq);
}
itte->irq = irq;
update_affinity_itte(kvm, itte); update_affinity_itte(kvm, itte);
/* /*
...@@ -981,9 +1030,7 @@ static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its, ...@@ -981,9 +1030,7 @@ static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
u32 msi_data = its_cmd_get_id(its_cmd); u32 msi_data = its_cmd_get_id(its_cmd);
u64 msi_devid = its_cmd_get_deviceid(its_cmd); u64 msi_devid = its_cmd_get_deviceid(its_cmd);
vgic_its_trigger_msi(kvm, its, msi_devid, msi_data); return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
return 0;
} }
/* /*
...@@ -1288,13 +1335,13 @@ void vgic_enable_lpis(struct kvm_vcpu *vcpu) ...@@ -1288,13 +1335,13 @@ void vgic_enable_lpis(struct kvm_vcpu *vcpu)
its_sync_lpi_pending_table(vcpu); its_sync_lpi_pending_table(vcpu);
} }
static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its) static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its)
{ {
struct vgic_io_device *iodev = &its->iodev; struct vgic_io_device *iodev = &its->iodev;
int ret; int ret;
if (its->initialized) if (!its->initialized)
return 0; return -EBUSY;
if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
return -ENXIO; return -ENXIO;
...@@ -1311,9 +1358,6 @@ static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its) ...@@ -1311,9 +1358,6 @@ static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its)
KVM_VGIC_V3_ITS_SIZE, &iodev->dev); KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
if (!ret)
its->initialized = true;
return ret; return ret;
} }
...@@ -1435,9 +1479,6 @@ static int vgic_its_set_attr(struct kvm_device *dev, ...@@ -1435,9 +1479,6 @@ static int vgic_its_set_attr(struct kvm_device *dev,
if (type != KVM_VGIC_ITS_ADDR_TYPE) if (type != KVM_VGIC_ITS_ADDR_TYPE)
return -ENODEV; return -ENODEV;
if (its->initialized)
return -EBUSY;
if (copy_from_user(&addr, uaddr, sizeof(addr))) if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT; return -EFAULT;
...@@ -1453,7 +1494,9 @@ static int vgic_its_set_attr(struct kvm_device *dev, ...@@ -1453,7 +1494,9 @@ static int vgic_its_set_attr(struct kvm_device *dev,
case KVM_DEV_ARM_VGIC_GRP_CTRL: case KVM_DEV_ARM_VGIC_GRP_CTRL:
switch (attr->attr) { switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT: case KVM_DEV_ARM_VGIC_CTRL_INIT:
return vgic_its_init_its(dev->kvm, its); its->initialized = true;
return 0;
} }
break; break;
} }
...@@ -1498,3 +1541,30 @@ int kvm_vgic_register_its_device(void) ...@@ -1498,3 +1541,30 @@ int kvm_vgic_register_its_device(void)
return kvm_register_device_ops(&kvm_arm_vgic_its_ops, return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
KVM_DEV_TYPE_ARM_VGIC_ITS); KVM_DEV_TYPE_ARM_VGIC_ITS);
} }
/*
* Registers all ITSes with the kvm_io_bus framework.
* To follow the existing VGIC initialization sequence, this has to be
* done as late as possible, just before the first VCPU runs.
*/
int vgic_register_its_iodevs(struct kvm *kvm)
{
struct kvm_device *dev;
int ret = 0;
list_for_each_entry(dev, &kvm->devices, vm_node) {
if (dev->ops != &kvm_arm_vgic_its_ops)
continue;
ret = vgic_register_its_iodev(kvm, dev->private);
if (ret)
return ret;
/*
* We don't need to care about tearing down previously
* registered ITSes, as the kvm_io_bus framework removes
* them for us if the VM gets destroyed.
*/
}
return ret;
}
...@@ -306,16 +306,19 @@ static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu, ...@@ -306,16 +306,19 @@ static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
{ {
struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
u64 propbaser = dist->propbaser; u64 old_propbaser, propbaser;
/* Storing a value with LPIs already enabled is undefined */ /* Storing a value with LPIs already enabled is undefined */
if (vgic_cpu->lpis_enabled) if (vgic_cpu->lpis_enabled)
return; return;
do {
old_propbaser = dist->propbaser;
propbaser = old_propbaser;
propbaser = update_64bit_reg(propbaser, addr & 4, len, val); propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
propbaser = vgic_sanitise_propbaser(propbaser); propbaser = vgic_sanitise_propbaser(propbaser);
} while (cmpxchg64(&dist->propbaser, old_propbaser,
dist->propbaser = propbaser; propbaser) != old_propbaser);
} }
static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu, static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
...@@ -331,16 +334,19 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu, ...@@ -331,16 +334,19 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
unsigned long val) unsigned long val)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
u64 pendbaser = vgic_cpu->pendbaser; u64 old_pendbaser, pendbaser;
/* Storing a value with LPIs already enabled is undefined */ /* Storing a value with LPIs already enabled is undefined */
if (vgic_cpu->lpis_enabled) if (vgic_cpu->lpis_enabled)
return; return;
do {
old_pendbaser = vgic_cpu->pendbaser;
pendbaser = old_pendbaser;
pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val); pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
pendbaser = vgic_sanitise_pendbaser(pendbaser); pendbaser = vgic_sanitise_pendbaser(pendbaser);
} while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
vgic_cpu->pendbaser = pendbaser; pendbaser) != old_pendbaser);
} }
/* /*
......
...@@ -289,6 +289,14 @@ int vgic_v3_map_resources(struct kvm *kvm) ...@@ -289,6 +289,14 @@ int vgic_v3_map_resources(struct kvm *kvm)
goto out; goto out;
} }
if (vgic_has_its(kvm)) {
ret = vgic_register_its_iodevs(kvm);
if (ret) {
kvm_err("Unable to register VGIC ITS MMIO regions\n");
goto out;
}
}
dist->ready = true; dist->ready = true;
out: out:
......
...@@ -117,17 +117,17 @@ static void vgic_irq_release(struct kref *ref) ...@@ -117,17 +117,17 @@ static void vgic_irq_release(struct kref *ref)
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{ {
struct vgic_dist *dist; struct vgic_dist *dist = &kvm->arch.vgic;
if (irq->intid < VGIC_MIN_LPI) if (irq->intid < VGIC_MIN_LPI)
return; return;
if (!kref_put(&irq->refcount, vgic_irq_release)) spin_lock(&dist->lpi_list_lock);
if (!kref_put(&irq->refcount, vgic_irq_release)) {
spin_unlock(&dist->lpi_list_lock);
return; return;
};
dist = &kvm->arch.vgic;
spin_lock(&dist->lpi_list_lock);
list_del(&irq->lpi_list); list_del(&irq->lpi_list);
dist->lpi_list_count--; dist->lpi_list_count--;
spin_unlock(&dist->lpi_list_lock); spin_unlock(&dist->lpi_list_lock);
......
...@@ -84,6 +84,7 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu); ...@@ -84,6 +84,7 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu);
int vgic_v3_probe(const struct gic_kvm_info *info); int vgic_v3_probe(const struct gic_kvm_info *info);
int vgic_v3_map_resources(struct kvm *kvm); int vgic_v3_map_resources(struct kvm *kvm);
int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address); int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address);
int vgic_register_its_iodevs(struct kvm *kvm);
bool vgic_has_its(struct kvm *kvm); bool vgic_has_its(struct kvm *kvm);
int kvm_vgic_register_its_device(void); int kvm_vgic_register_its_device(void);
void vgic_enable_lpis(struct kvm_vcpu *vcpu); void vgic_enable_lpis(struct kvm_vcpu *vcpu);
...@@ -140,6 +141,11 @@ static inline int vgic_register_redist_iodevs(struct kvm *kvm, ...@@ -140,6 +141,11 @@ static inline int vgic_register_redist_iodevs(struct kvm *kvm,
return -ENODEV; return -ENODEV;
} }
static inline int vgic_register_its_iodevs(struct kvm *kvm)
{
return -ENODEV;
}
static inline bool vgic_has_its(struct kvm *kvm) static inline bool vgic_has_its(struct kvm *kvm)
{ {
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment