Commit 26f31498 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-6.4-2' of...

Merge tag 'kvmarm-fixes-6.4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 6.4, take #2

- Address some fallout of the locking rework, this time affecting
  the way the vgic is configured

- Fix an issue where the page table walker frees a subtree and
  then proceeds with walking what it has just freed...

- Check that a given PA donated to the gues is actually memory
  (only affecting pKVM)

- Correctly handle MTE CMOs by Set/Way
parents b9846a69 a9f0e3d5
...@@ -632,9 +632,9 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size); ...@@ -632,9 +632,9 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
* *
* The walker will walk the page-table entries corresponding to the input * The walker will walk the page-table entries corresponding to the input
* address range specified, visiting entries according to the walker flags. * address range specified, visiting entries according to the walker flags.
* Invalid entries are treated as leaf entries. Leaf entries are reloaded * Invalid entries are treated as leaf entries. The visited page table entry is
* after invoking the walker callback, allowing the walker to descend into * reloaded after invoking the walker callback, allowing the walker to descend
* a newly installed table. * into a newly installed table.
* *
* Returning a negative error code from the walker callback function will * Returning a negative error code from the walker callback function will
* terminate the walk immediately with the same error code. * terminate the walk immediately with the same error code.
......
...@@ -115,8 +115,14 @@ ...@@ -115,8 +115,14 @@
#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) #define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4)
#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6)
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) #define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4)
#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6)
/* /*
* Automatically generated definitions for system registers, the * Automatically generated definitions for system registers, the
......
...@@ -575,7 +575,7 @@ struct pkvm_mem_donation { ...@@ -575,7 +575,7 @@ struct pkvm_mem_donation {
struct check_walk_data { struct check_walk_data {
enum pkvm_page_state desired; enum pkvm_page_state desired;
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte); enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr);
}; };
static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx, static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
...@@ -583,10 +583,7 @@ static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx, ...@@ -583,10 +583,7 @@ static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
{ {
struct check_walk_data *d = ctx->arg; struct check_walk_data *d = ctx->arg;
if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old))) return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM;
return -EINVAL;
return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM;
} }
static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
...@@ -601,8 +598,11 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, ...@@ -601,8 +598,11 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
return kvm_pgtable_walk(pgt, addr, size, &walker); return kvm_pgtable_walk(pgt, addr, size, &walker);
} }
static enum pkvm_page_state host_get_page_state(kvm_pte_t pte) static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr)
{ {
if (!addr_is_allowed_memory(addr))
return PKVM_NOPAGE;
if (!kvm_pte_valid(pte) && pte) if (!kvm_pte_valid(pte) && pte)
return PKVM_NOPAGE; return PKVM_NOPAGE;
...@@ -709,7 +709,7 @@ static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx ...@@ -709,7 +709,7 @@ static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx
return host_stage2_set_owner_locked(addr, size, host_id); return host_stage2_set_owner_locked(addr, size, host_id);
} }
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte) static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
{ {
if (!kvm_pte_valid(pte)) if (!kvm_pte_valid(pte))
return PKVM_NOPAGE; return PKVM_NOPAGE;
......
...@@ -209,14 +209,26 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, ...@@ -209,14 +209,26 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
.flags = flags, .flags = flags,
}; };
int ret = 0; int ret = 0;
bool reload = false;
kvm_pteref_t childp; kvm_pteref_t childp;
bool table = kvm_pte_table(ctx.old, level); bool table = kvm_pte_table(ctx.old, level);
if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE); ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
reload = true;
}
if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) { if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF); ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
reload = true;
}
/*
* Reload the page table after invoking the walker callback for leaf
* entries or after pre-order traversal, to allow the walker to descend
* into a newly installed or replaced table.
*/
if (reload) {
ctx.old = READ_ONCE(*ptep); ctx.old = READ_ONCE(*ptep);
table = kvm_pte_table(ctx.old, level); table = kvm_pte_table(ctx.old, level);
} }
......
...@@ -211,6 +211,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, ...@@ -211,6 +211,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
return true; return true;
} }
static bool access_dcgsw(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (!kvm_has_mte(vcpu->kvm)) {
kvm_inject_undefined(vcpu);
return false;
}
/* Treat MTE S/W ops as we treat the classic ones: with contempt */
return access_dcsw(vcpu, p, r);
}
static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift) static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
{ {
switch (r->aarch32_map) { switch (r->aarch32_map) {
...@@ -1756,8 +1769,14 @@ static bool access_spsr(struct kvm_vcpu *vcpu, ...@@ -1756,8 +1769,14 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
*/ */
static const struct sys_reg_desc sys_reg_descs[] = { static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_DC_ISW), access_dcsw }, { SYS_DESC(SYS_DC_ISW), access_dcsw },
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CSW), access_dcsw }, { SYS_DESC(SYS_DC_CSW), access_dcsw },
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CISW), access_dcsw }, { SYS_DESC(SYS_DC_CISW), access_dcsw },
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
DBG_BCR_BVR_WCR_WVR_EL1(0), DBG_BCR_BVR_WCR_WVR_EL1(0),
DBG_BCR_BVR_WCR_WVR_EL1(1), DBG_BCR_BVR_WCR_WVR_EL1(1),
......
...@@ -235,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -235,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
* KVM io device for the redistributor that belongs to this VCPU. * KVM io device for the redistributor that belongs to this VCPU.
*/ */
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
mutex_lock(&vcpu->kvm->arch.config_lock); mutex_lock(&vcpu->kvm->slots_lock);
ret = vgic_register_redist_iodev(vcpu); ret = vgic_register_redist_iodev(vcpu);
mutex_unlock(&vcpu->kvm->arch.config_lock); mutex_unlock(&vcpu->kvm->slots_lock);
} }
return ret; return ret;
} }
...@@ -406,7 +406,7 @@ void kvm_vgic_destroy(struct kvm *kvm) ...@@ -406,7 +406,7 @@ void kvm_vgic_destroy(struct kvm *kvm)
/** /**
* vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
* is a GICv2. A GICv3 must be explicitly initialized by the guest using the * is a GICv2. A GICv3 must be explicitly initialized by userspace using the
* KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group. * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group.
* @kvm: kvm struct pointer * @kvm: kvm struct pointer
*/ */
...@@ -446,11 +446,13 @@ int vgic_lazy_init(struct kvm *kvm) ...@@ -446,11 +446,13 @@ int vgic_lazy_init(struct kvm *kvm)
int kvm_vgic_map_resources(struct kvm *kvm) int kvm_vgic_map_resources(struct kvm *kvm)
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
gpa_t dist_base;
int ret = 0; int ret = 0;
if (likely(vgic_ready(kvm))) if (likely(vgic_ready(kvm)))
return 0; return 0;
mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->arch.config_lock); mutex_lock(&kvm->arch.config_lock);
if (vgic_ready(kvm)) if (vgic_ready(kvm))
goto out; goto out;
...@@ -463,13 +465,26 @@ int kvm_vgic_map_resources(struct kvm *kvm) ...@@ -463,13 +465,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
else else
ret = vgic_v3_map_resources(kvm); ret = vgic_v3_map_resources(kvm);
if (ret) if (ret) {
__kvm_vgic_destroy(kvm); __kvm_vgic_destroy(kvm);
else goto out;
dist->ready = true; }
dist->ready = true;
dist_base = dist->vgic_dist_base;
mutex_unlock(&kvm->arch.config_lock);
ret = vgic_register_dist_iodev(kvm, dist_base,
kvm_vgic_global_state.type);
if (ret) {
kvm_err("Unable to register VGIC dist MMIO regions\n");
kvm_vgic_destroy(kvm);
}
mutex_unlock(&kvm->slots_lock);
return ret;
out: out:
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->slots_lock);
return ret; return ret;
} }
......
...@@ -1936,6 +1936,7 @@ void vgic_lpi_translation_cache_destroy(struct kvm *kvm) ...@@ -1936,6 +1936,7 @@ void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
static int vgic_its_create(struct kvm_device *dev, u32 type) static int vgic_its_create(struct kvm_device *dev, u32 type)
{ {
int ret;
struct vgic_its *its; struct vgic_its *its;
if (type != KVM_DEV_TYPE_ARM_VGIC_ITS) if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
...@@ -1945,9 +1946,12 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) ...@@ -1945,9 +1946,12 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
if (!its) if (!its)
return -ENOMEM; return -ENOMEM;
mutex_lock(&dev->kvm->arch.config_lock);
if (vgic_initialized(dev->kvm)) { if (vgic_initialized(dev->kvm)) {
int ret = vgic_v4_init(dev->kvm); ret = vgic_v4_init(dev->kvm);
if (ret < 0) { if (ret < 0) {
mutex_unlock(&dev->kvm->arch.config_lock);
kfree(its); kfree(its);
return ret; return ret;
} }
...@@ -1960,12 +1964,10 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) ...@@ -1960,12 +1964,10 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
/* Yep, even more trickery for lock ordering... */ /* Yep, even more trickery for lock ordering... */
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
mutex_lock(&dev->kvm->arch.config_lock);
mutex_lock(&its->cmd_lock); mutex_lock(&its->cmd_lock);
mutex_lock(&its->its_lock); mutex_lock(&its->its_lock);
mutex_unlock(&its->its_lock); mutex_unlock(&its->its_lock);
mutex_unlock(&its->cmd_lock); mutex_unlock(&its->cmd_lock);
mutex_unlock(&dev->kvm->arch.config_lock);
#endif #endif
its->vgic_its_base = VGIC_ADDR_UNDEF; its->vgic_its_base = VGIC_ADDR_UNDEF;
...@@ -1986,7 +1988,11 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) ...@@ -1986,7 +1988,11 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
dev->private = its; dev->private = its;
return vgic_its_set_abi(its, NR_ITS_ABIS - 1); ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1);
mutex_unlock(&dev->kvm->arch.config_lock);
return ret;
} }
static void vgic_its_destroy(struct kvm_device *kvm_dev) static void vgic_its_destroy(struct kvm_device *kvm_dev)
......
...@@ -102,7 +102,11 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri ...@@ -102,7 +102,11 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
if (get_user(addr, uaddr)) if (get_user(addr, uaddr))
return -EFAULT; return -EFAULT;
mutex_lock(&kvm->arch.config_lock); /*
* Since we can't hold config_lock while registering the redistributor
* iodevs, take the slots_lock immediately.
*/
mutex_lock(&kvm->slots_lock);
switch (attr->attr) { switch (attr->attr) {
case KVM_VGIC_V2_ADDR_TYPE_DIST: case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
...@@ -182,6 +186,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri ...@@ -182,6 +186,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
if (r) if (r)
goto out; goto out;
mutex_lock(&kvm->arch.config_lock);
if (write) { if (write) {
r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size); r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
if (!r) if (!r)
...@@ -189,9 +194,10 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri ...@@ -189,9 +194,10 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
} else { } else {
addr = *addr_ptr; addr = *addr_ptr;
} }
mutex_unlock(&kvm->arch.config_lock);
out: out:
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->slots_lock);
if (!r && !write) if (!r && !write)
r = put_user(addr, uaddr); r = put_user(addr, uaddr);
......
...@@ -769,10 +769,13 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) ...@@ -769,10 +769,13 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
struct vgic_redist_region *rdreg; struct vgic_redist_region *rdreg;
gpa_t rd_base; gpa_t rd_base;
int ret; int ret = 0;
lockdep_assert_held(&kvm->slots_lock);
mutex_lock(&kvm->arch.config_lock);
if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
return 0; goto out_unlock;
/* /*
* We may be creating VCPUs before having set the base address for the * We may be creating VCPUs before having set the base address for the
...@@ -782,10 +785,12 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) ...@@ -782,10 +785,12 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
*/ */
rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions); rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
if (!rdreg) if (!rdreg)
return 0; goto out_unlock;
if (!vgic_v3_check_base(kvm)) if (!vgic_v3_check_base(kvm)) {
return -EINVAL; ret = -EINVAL;
goto out_unlock;
}
vgic_cpu->rdreg = rdreg; vgic_cpu->rdreg = rdreg;
vgic_cpu->rdreg_index = rdreg->free_index; vgic_cpu->rdreg_index = rdreg->free_index;
...@@ -799,16 +804,20 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) ...@@ -799,16 +804,20 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers); rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
rd_dev->redist_vcpu = vcpu; rd_dev->redist_vcpu = vcpu;
mutex_lock(&kvm->slots_lock); mutex_unlock(&kvm->arch.config_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base, ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
2 * SZ_64K, &rd_dev->dev); 2 * SZ_64K, &rd_dev->dev);
mutex_unlock(&kvm->slots_lock);
if (ret) if (ret)
return ret; return ret;
/* Protected by slots_lock */
rdreg->free_index++; rdreg->free_index++;
return 0; return 0;
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
return ret;
} }
static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
...@@ -834,12 +843,10 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm) ...@@ -834,12 +843,10 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
/* The current c failed, so iterate over the previous ones. */ /* The current c failed, so iterate over the previous ones. */
int i; int i;
mutex_lock(&kvm->slots_lock);
for (i = 0; i < c; i++) { for (i = 0; i < c; i++) {
vcpu = kvm_get_vcpu(kvm, i); vcpu = kvm_get_vcpu(kvm, i);
vgic_unregister_redist_iodev(vcpu); vgic_unregister_redist_iodev(vcpu);
} }
mutex_unlock(&kvm->slots_lock);
} }
return ret; return ret;
...@@ -938,7 +945,9 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count) ...@@ -938,7 +945,9 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
{ {
int ret; int ret;
mutex_lock(&kvm->arch.config_lock);
ret = vgic_v3_alloc_redist_region(kvm, index, addr, count); ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
mutex_unlock(&kvm->arch.config_lock);
if (ret) if (ret)
return ret; return ret;
...@@ -950,8 +959,10 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count) ...@@ -950,8 +959,10 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
if (ret) { if (ret) {
struct vgic_redist_region *rdreg; struct vgic_redist_region *rdreg;
mutex_lock(&kvm->arch.config_lock);
rdreg = vgic_v3_rdist_region_from_index(kvm, index); rdreg = vgic_v3_rdist_region_from_index(kvm, index);
vgic_v3_free_redist_region(rdreg); vgic_v3_free_redist_region(rdreg);
mutex_unlock(&kvm->arch.config_lock);
return ret; return ret;
} }
......
...@@ -1096,7 +1096,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, ...@@ -1096,7 +1096,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
enum vgic_type type) enum vgic_type type)
{ {
struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev; struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
int ret = 0;
unsigned int len; unsigned int len;
switch (type) { switch (type) {
...@@ -1114,10 +1113,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, ...@@ -1114,10 +1113,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
io_device->iodev_type = IODEV_DIST; io_device->iodev_type = IODEV_DIST;
io_device->redist_vcpu = NULL; io_device->redist_vcpu = NULL;
mutex_lock(&kvm->slots_lock); return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, len, &io_device->dev);
len, &io_device->dev);
mutex_unlock(&kvm->slots_lock);
return ret;
} }
...@@ -312,12 +312,6 @@ int vgic_v2_map_resources(struct kvm *kvm) ...@@ -312,12 +312,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
return ret; return ret;
} }
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
if (ret) {
kvm_err("Unable to register VGIC MMIO regions\n");
return ret;
}
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) { if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
kvm_vgic_global_state.vcpu_base, kvm_vgic_global_state.vcpu_base,
......
...@@ -539,7 +539,6 @@ int vgic_v3_map_resources(struct kvm *kvm) ...@@ -539,7 +539,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int ret = 0;
unsigned long c; unsigned long c;
kvm_for_each_vcpu(c, vcpu, kvm) { kvm_for_each_vcpu(c, vcpu, kvm) {
...@@ -569,12 +568,6 @@ int vgic_v3_map_resources(struct kvm *kvm) ...@@ -569,12 +568,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
return -EBUSY; return -EBUSY;
} }
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
if (ret) {
kvm_err("Unable to register VGICv3 dist MMIO regions\n");
return ret;
}
if (kvm_vgic_global_state.has_gicv4_1) if (kvm_vgic_global_state.has_gicv4_1)
vgic_v4_configure_vsgis(kvm); vgic_v4_configure_vsgis(kvm);
......
...@@ -184,13 +184,14 @@ static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu) ...@@ -184,13 +184,14 @@ static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
} }
} }
/* Must be called with the kvm lock held */
void vgic_v4_configure_vsgis(struct kvm *kvm) void vgic_v4_configure_vsgis(struct kvm *kvm)
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
unsigned long i; unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
kvm_arm_halt_guest(kvm); kvm_arm_halt_guest(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment