Commit f0032773 authored by Oliver Upton's avatar Oliver Upton Committed by Marc Zyngier

KVM: arm64: Use config_lock to protect vgic state

Almost all of the vgic state is VM-scoped but accessed from the context
of a vCPU. These accesses were serialized on the kvm->lock which cannot
be nested within a vcpu->mutex critical section.

Move over the vgic state to using the config_lock. Tweak the lock
ordering where necessary to ensure that the config_lock is acquired
after the vcpu->mutex. Acquire the config_lock in kvm_vgic_create() to
avoid a race between the converted flows and GIC creation. Where
necessary, continue to acquire kvm->lock to avoid a race with vCPU
creation (i.e. flows that use lock_all_vcpus()).

Finally, promote the locking expectations in comments to lockdep
assertions and update the locking documentation for the config_lock as
well as vcpu->mutex.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20230327164747.2466958-5-oliver.upton@linux.dev
parent 4bba7f7d
...@@ -85,7 +85,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos) ...@@ -85,7 +85,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
struct kvm *kvm = s->private; struct kvm *kvm = s->private;
struct vgic_state_iter *iter; struct vgic_state_iter *iter;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter; iter = kvm->arch.vgic.iter;
if (iter) { if (iter) {
iter = ERR_PTR(-EBUSY); iter = ERR_PTR(-EBUSY);
...@@ -104,7 +104,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos) ...@@ -104,7 +104,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
if (end_of_vgic(iter)) if (end_of_vgic(iter))
iter = NULL; iter = NULL;
out: out:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
return iter; return iter;
} }
...@@ -132,12 +132,12 @@ static void vgic_debug_stop(struct seq_file *s, void *v) ...@@ -132,12 +132,12 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
if (IS_ERR(v)) if (IS_ERR(v))
return; return;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter; iter = kvm->arch.vgic.iter;
kfree(iter->lpi_array); kfree(iter->lpi_array);
kfree(iter); kfree(iter);
kvm->arch.vgic.iter = NULL; kvm->arch.vgic.iter = NULL;
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
} }
static void print_dist_state(struct seq_file *s, struct vgic_dist *dist) static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
......
...@@ -74,9 +74,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) ...@@ -74,9 +74,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
unsigned long i; unsigned long i;
int ret; int ret;
if (irqchip_in_kernel(kvm))
return -EEXIST;
/* /*
* This function is also called by the KVM_CREATE_IRQCHIP handler, * This function is also called by the KVM_CREATE_IRQCHIP handler,
* which had no chance yet to check the availability of the GICv2 * which had no chance yet to check the availability of the GICv2
...@@ -87,10 +84,20 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) ...@@ -87,10 +84,20 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
!kvm_vgic_global_state.can_emulate_gicv2) !kvm_vgic_global_state.can_emulate_gicv2)
return -ENODEV; return -ENODEV;
/* Must be held to avoid race with vCPU creation */
lockdep_assert_held(&kvm->lock);
ret = -EBUSY; ret = -EBUSY;
if (!lock_all_vcpus(kvm)) if (!lock_all_vcpus(kvm))
return ret; return ret;
mutex_lock(&kvm->arch.config_lock);
if (irqchip_in_kernel(kvm)) {
ret = -EEXIST;
goto out_unlock;
}
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu_has_run_once(vcpu)) if (vcpu_has_run_once(vcpu))
goto out_unlock; goto out_unlock;
...@@ -118,6 +125,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) ...@@ -118,6 +125,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
out_unlock: out_unlock:
mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm); unlock_all_vcpus(kvm);
return ret; return ret;
} }
...@@ -227,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -227,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
* KVM io device for the redistributor that belongs to this VCPU. * KVM io device for the redistributor that belongs to this VCPU.
*/ */
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.config_lock);
ret = vgic_register_redist_iodev(vcpu); ret = vgic_register_redist_iodev(vcpu);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->arch.config_lock);
} }
return ret; return ret;
} }
...@@ -250,7 +258,6 @@ static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu) ...@@ -250,7 +258,6 @@ static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
* The function is generally called when nr_spis has been explicitly set * The function is generally called when nr_spis has been explicitly set
* by the guest through the KVM DEVICE API. If not nr_spis is set to 256. * by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
* vgic_initialized() returns true when this function has succeeded. * vgic_initialized() returns true when this function has succeeded.
* Must be called with kvm->lock held!
*/ */
int vgic_init(struct kvm *kvm) int vgic_init(struct kvm *kvm)
{ {
...@@ -259,6 +266,8 @@ int vgic_init(struct kvm *kvm) ...@@ -259,6 +266,8 @@ int vgic_init(struct kvm *kvm)
int ret = 0, i; int ret = 0, i;
unsigned long idx; unsigned long idx;
lockdep_assert_held(&kvm->arch.config_lock);
if (vgic_initialized(kvm)) if (vgic_initialized(kvm))
return 0; return 0;
...@@ -373,12 +382,13 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -373,12 +382,13 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
} }
/* To be called with kvm->lock held */
static void __kvm_vgic_destroy(struct kvm *kvm) static void __kvm_vgic_destroy(struct kvm *kvm)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
unsigned long i; unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
vgic_debug_destroy(kvm); vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
...@@ -389,9 +399,9 @@ static void __kvm_vgic_destroy(struct kvm *kvm) ...@@ -389,9 +399,9 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
void kvm_vgic_destroy(struct kvm *kvm) void kvm_vgic_destroy(struct kvm *kvm)
{ {
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
__kvm_vgic_destroy(kvm); __kvm_vgic_destroy(kvm);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
} }
/** /**
...@@ -414,9 +424,9 @@ int vgic_lazy_init(struct kvm *kvm) ...@@ -414,9 +424,9 @@ int vgic_lazy_init(struct kvm *kvm)
if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
return -EBUSY; return -EBUSY;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
ret = vgic_init(kvm); ret = vgic_init(kvm);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
} }
return ret; return ret;
...@@ -441,7 +451,7 @@ int kvm_vgic_map_resources(struct kvm *kvm) ...@@ -441,7 +451,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
if (likely(vgic_ready(kvm))) if (likely(vgic_ready(kvm)))
return 0; return 0;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
if (vgic_ready(kvm)) if (vgic_ready(kvm))
goto out; goto out;
...@@ -459,7 +469,7 @@ int kvm_vgic_map_resources(struct kvm *kvm) ...@@ -459,7 +469,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
dist->ready = true; dist->ready = true;
out: out:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
return ret; return ret;
} }
......
...@@ -2045,6 +2045,13 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev, ...@@ -2045,6 +2045,13 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) { if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
ret = -ENXIO; ret = -ENXIO;
goto out; goto out;
...@@ -2058,11 +2065,6 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev, ...@@ -2058,11 +2065,6 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
goto out; goto out;
} }
if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY;
goto out;
}
addr = its->vgic_its_base + offset; addr = its->vgic_its_base + offset;
len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4; len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
...@@ -2076,8 +2078,9 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev, ...@@ -2076,8 +2078,9 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
} else { } else {
*reg = region->its_read(dev->kvm, its, addr, len); *reg = region->its_read(dev->kvm, its, addr, len);
} }
unlock_all_vcpus(dev->kvm);
out: out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
return ret; return ret;
} }
...@@ -2757,6 +2760,8 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) ...@@ -2757,6 +2760,8 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
return -EBUSY; return -EBUSY;
} }
mutex_lock(&kvm->arch.config_lock);
switch (attr) { switch (attr) {
case KVM_DEV_ARM_ITS_CTRL_RESET: case KVM_DEV_ARM_ITS_CTRL_RESET:
vgic_its_reset(kvm, its); vgic_its_reset(kvm, its);
...@@ -2769,6 +2774,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) ...@@ -2769,6 +2774,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
break; break;
} }
mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm); unlock_all_vcpus(kvm);
mutex_unlock(&its->its_lock); mutex_unlock(&its->its_lock);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
......
...@@ -46,7 +46,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev ...@@ -46,7 +46,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
struct vgic_dist *vgic = &kvm->arch.vgic; struct vgic_dist *vgic = &kvm->arch.vgic;
int r; int r;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) { switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
case KVM_VGIC_V2_ADDR_TYPE_DIST: case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
...@@ -68,7 +68,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev ...@@ -68,7 +68,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
r = -ENODEV; r = -ENODEV;
} }
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
return r; return r;
} }
...@@ -102,7 +102,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri ...@@ -102,7 +102,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
if (get_user(addr, uaddr)) if (get_user(addr, uaddr))
return -EFAULT; return -EFAULT;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.config_lock);
switch (attr->attr) { switch (attr->attr) {
case KVM_VGIC_V2_ADDR_TYPE_DIST: case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
...@@ -191,7 +191,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri ...@@ -191,7 +191,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
} }
out: out:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.config_lock);
if (!r && !write) if (!r && !write)
r = put_user(addr, uaddr); r = put_user(addr, uaddr);
...@@ -227,7 +227,7 @@ static int vgic_set_common_attr(struct kvm_device *dev, ...@@ -227,7 +227,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
(val & 31)) (val & 31))
return -EINVAL; return -EINVAL;
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->arch.config_lock);
if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis) if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
ret = -EBUSY; ret = -EBUSY;
...@@ -235,16 +235,16 @@ static int vgic_set_common_attr(struct kvm_device *dev, ...@@ -235,16 +235,16 @@ static int vgic_set_common_attr(struct kvm_device *dev,
dev->kvm->arch.vgic.nr_spis = dev->kvm->arch.vgic.nr_spis =
val - VGIC_NR_PRIVATE_IRQS; val - VGIC_NR_PRIVATE_IRQS;
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->arch.config_lock);
return ret; return ret;
} }
case KVM_DEV_ARM_VGIC_GRP_CTRL: { case KVM_DEV_ARM_VGIC_GRP_CTRL: {
switch (attr->attr) { switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT: case KVM_DEV_ARM_VGIC_CTRL_INIT:
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_init(dev->kvm); r = vgic_init(dev->kvm);
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->arch.config_lock);
return r; return r;
case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES: case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
/* /*
...@@ -260,7 +260,10 @@ static int vgic_set_common_attr(struct kvm_device *dev, ...@@ -260,7 +260,10 @@ static int vgic_set_common_attr(struct kvm_device *dev,
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
return -EBUSY; return -EBUSY;
} }
mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_v3_save_pending_tables(dev->kvm); r = vgic_v3_save_pending_tables(dev->kvm);
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm); unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
return r; return r;
...@@ -411,15 +414,17 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev, ...@@ -411,15 +414,17 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
ret = vgic_init(dev->kvm); ret = vgic_init(dev->kvm);
if (ret) if (ret)
goto out; goto out;
if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY;
goto out;
}
switch (attr->group) { switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val); ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
...@@ -432,8 +437,9 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev, ...@@ -432,8 +437,9 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
break; break;
} }
unlock_all_vcpus(dev->kvm);
out: out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
if (!ret && !is_write) if (!ret && !is_write)
...@@ -569,12 +575,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, ...@@ -569,12 +575,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->lock);
if (unlikely(!vgic_initialized(dev->kvm))) { if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY; mutex_unlock(&dev->kvm->lock);
goto out; return -EBUSY;
} }
if (!lock_all_vcpus(dev->kvm)) { mutex_lock(&dev->kvm->arch.config_lock);
if (unlikely(!vgic_initialized(dev->kvm))) {
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
...@@ -609,8 +617,9 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, ...@@ -609,8 +617,9 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
break; break;
} }
unlock_all_vcpus(dev->kvm);
out: out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock); mutex_unlock(&dev->kvm->lock);
if (!ret && uaccess && !is_write) { if (!ret && uaccess && !is_write) {
......
...@@ -111,7 +111,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, ...@@ -111,7 +111,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
case GICD_CTLR: { case GICD_CTLR: {
bool was_enabled, is_hwsgi; bool was_enabled, is_hwsgi;
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.config_lock);
was_enabled = dist->enabled; was_enabled = dist->enabled;
is_hwsgi = dist->nassgireq; is_hwsgi = dist->nassgireq;
...@@ -139,7 +139,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, ...@@ -139,7 +139,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
else if (!was_enabled && dist->enabled) else if (!was_enabled && dist->enabled)
vgic_kick_vcpus(vcpu->kvm); vgic_kick_vcpus(vcpu->kvm);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->arch.config_lock);
break; break;
} }
case GICD_TYPER: case GICD_TYPER:
......
...@@ -530,13 +530,13 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, ...@@ -530,13 +530,13 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
u32 val; u32 val;
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid); vgic_access_active_prepare(vcpu, intid);
val = __vgic_mmio_read_active(vcpu, addr, len); val = __vgic_mmio_read_active(vcpu, addr, len);
vgic_access_active_finish(vcpu, intid); vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->arch.config_lock);
return val; return val;
} }
...@@ -625,13 +625,13 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, ...@@ -625,13 +625,13 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
{ {
u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid); vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_cactive(vcpu, addr, len, val); __vgic_mmio_write_cactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid); vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->arch.config_lock);
} }
int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu, int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
...@@ -662,13 +662,13 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, ...@@ -662,13 +662,13 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
{ {
u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid); vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_sactive(vcpu, addr, len, val); __vgic_mmio_write_sactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid); vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->arch.config_lock);
} }
int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu, int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
......
...@@ -232,9 +232,8 @@ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq) ...@@ -232,9 +232,8 @@ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
* @kvm: Pointer to the VM being initialized * @kvm: Pointer to the VM being initialized
* *
* We may be called each time a vITS is created, or when the * We may be called each time a vITS is created, or when the
* vgic is initialized. This relies on kvm->lock to be * vgic is initialized. In both cases, the number of vcpus
* held. In both cases, the number of vcpus should now be * should now be fixed.
* fixed.
*/ */
int vgic_v4_init(struct kvm *kvm) int vgic_v4_init(struct kvm *kvm)
{ {
...@@ -243,6 +242,8 @@ int vgic_v4_init(struct kvm *kvm) ...@@ -243,6 +242,8 @@ int vgic_v4_init(struct kvm *kvm)
int nr_vcpus, ret; int nr_vcpus, ret;
unsigned long i; unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
if (!kvm_vgic_global_state.has_gicv4) if (!kvm_vgic_global_state.has_gicv4)
return 0; /* Nothing to see here... move along. */ return 0; /* Nothing to see here... move along. */
...@@ -309,14 +310,14 @@ int vgic_v4_init(struct kvm *kvm) ...@@ -309,14 +310,14 @@ int vgic_v4_init(struct kvm *kvm)
/** /**
* vgic_v4_teardown - Free the GICv4 data structures * vgic_v4_teardown - Free the GICv4 data structures
* @kvm: Pointer to the VM being destroyed * @kvm: Pointer to the VM being destroyed
*
* Relies on kvm->lock to be held.
*/ */
void vgic_v4_teardown(struct kvm *kvm) void vgic_v4_teardown(struct kvm *kvm)
{ {
struct its_vm *its_vm = &kvm->arch.vgic.its_vm; struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
int i; int i;
lockdep_assert_held(&kvm->arch.config_lock);
if (!its_vm->vpes) if (!its_vm->vpes)
return; return;
......
...@@ -24,11 +24,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { ...@@ -24,11 +24,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
/* /*
* Locking order is always: * Locking order is always:
* kvm->lock (mutex) * kvm->lock (mutex)
* its->cmd_lock (mutex) * vcpu->mutex (mutex)
* its->its_lock (mutex) * kvm->arch.config_lock (mutex)
* vgic_cpu->ap_list_lock must be taken with IRQs disabled * its->cmd_lock (mutex)
* kvm->lpi_list_lock must be taken with IRQs disabled * its->its_lock (mutex)
* vgic_irq->irq_lock must be taken with IRQs disabled * vgic_cpu->ap_list_lock must be taken with IRQs disabled
* kvm->lpi_list_lock must be taken with IRQs disabled
* vgic_irq->irq_lock must be taken with IRQs disabled
* *
* As the ap_list_lock might be taken from the timer interrupt handler, * As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower * we have to disable IRQs before taking this lock and everything lower
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment