Commit c90aad55 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch 'kvm-arm64/vgic-5.13' into kvmarm-master/next

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents d8f37d29 94ac0835
...@@ -228,7 +228,7 @@ Groups: ...@@ -228,7 +228,7 @@ Groups:
KVM_DEV_ARM_VGIC_CTRL_INIT KVM_DEV_ARM_VGIC_CTRL_INIT
request the initialization of the VGIC, no additional parameter in request the initialization of the VGIC, no additional parameter in
kvm_device_attr.addr. kvm_device_attr.addr. Must be called after all VCPUs have been created.
KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES
save all LPI pending bits into guest RAM pending tables. save all LPI pending bits into guest RAM pending tables.
......
...@@ -335,13 +335,14 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm) ...@@ -335,13 +335,14 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
kfree(dist->spis); kfree(dist->spis);
dist->spis = NULL; dist->spis = NULL;
dist->nr_spis = 0; dist->nr_spis = 0;
dist->vgic_dist_base = VGIC_ADDR_UNDEF;
if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) { list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
list_del(&rdreg->list); vgic_v3_free_redist_region(rdreg);
kfree(rdreg);
}
INIT_LIST_HEAD(&dist->rd_regions); INIT_LIST_HEAD(&dist->rd_regions);
} else {
dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
} }
if (vgic_has_its(kvm)) if (vgic_has_its(kvm))
...@@ -362,6 +363,7 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -362,6 +363,7 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_flush_pending_lpis(vcpu); vgic_flush_pending_lpis(vcpu);
INIT_LIST_HEAD(&vgic_cpu->ap_list_head); INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
} }
/* To be called with kvm->lock held */ /* To be called with kvm->lock held */
......
...@@ -87,7 +87,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) ...@@ -87,7 +87,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
r = vgic_v3_set_redist_base(kvm, 0, *addr, 0); r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
goto out; goto out;
} }
rdreg = list_first_entry(&vgic->rd_regions, rdreg = list_first_entry_or_null(&vgic->rd_regions,
struct vgic_redist_region, list); struct vgic_redist_region, list);
if (!rdreg) if (!rdreg)
addr_ptr = &undef_value; addr_ptr = &undef_value;
...@@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct kvm_device *dev, ...@@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct kvm_device *dev,
u64 addr; u64 addr;
unsigned long type = (unsigned long)attr->attr; unsigned long type = (unsigned long)attr->attr;
if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT;
r = kvm_vgic_addr(dev->kvm, type, &addr, false); r = kvm_vgic_addr(dev->kvm, type, &addr, false);
if (r) if (r)
return (r == -ENODEV) ? -ENXIO : r; return (r == -ENODEV) ? -ENXIO : r;
......
...@@ -251,29 +251,34 @@ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu, ...@@ -251,29 +251,34 @@ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
vgic_enable_lpis(vcpu); vgic_enable_lpis(vcpu);
} }
static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu, static bool vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu *vcpu)
gpa_t addr, unsigned int len)
{ {
unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu); struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_redist_region *rdreg = vgic_cpu->rdreg; struct vgic_redist_region *iter, *rdreg = vgic_cpu->rdreg;
int target_vcpu_id = vcpu->vcpu_id;
gpa_t last_rdist_typer = rdreg->base + GICR_TYPER +
(rdreg->free_index - 1) * KVM_VGIC_V3_REDIST_SIZE;
u64 value;
value = (u64)(mpidr & GENMASK(23, 0)) << 32; if (!rdreg)
value |= ((target_vcpu_id & 0xffff) << 8); return false;
if (addr == last_rdist_typer) if (vgic_cpu->rdreg_index < rdreg->free_index - 1) {
value |= GICR_TYPER_LAST; return false;
if (vgic_has_its(vcpu->kvm)) } else if (rdreg->count && vgic_cpu->rdreg_index == (rdreg->count - 1)) {
value |= GICR_TYPER_PLPIS; struct list_head *rd_regions = &vgic->rd_regions;
gpa_t end = rdreg->base + rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
return extract_bytes(value, addr & 7, len); /*
* the rdist is the last one of the redist region,
* check whether there is no other contiguous rdist region
*/
list_for_each_entry(iter, rd_regions, list) {
if (iter->base == end && iter->free_index > 0)
return false;
}
}
return true;
} }
static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu, static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len) gpa_t addr, unsigned int len)
{ {
unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu); unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
...@@ -286,7 +291,9 @@ static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu, ...@@ -286,7 +291,9 @@ static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
if (vgic_has_its(vcpu->kvm)) if (vgic_has_its(vcpu->kvm))
value |= GICR_TYPER_PLPIS; value |= GICR_TYPER_PLPIS;
/* reporting of the Last bit is not supported for userspace */ if (vgic_mmio_vcpu_rdist_is_last(vcpu))
value |= GICR_TYPER_LAST;
return extract_bytes(value, addr & 7, len); return extract_bytes(value, addr & 7, len);
} }
...@@ -612,7 +619,7 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = { ...@@ -612,7 +619,7 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
VGIC_ACCESS_32bit), VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER, REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8, NULL, vgic_mmio_uaccess_write_wi, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_WAKER, REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
vgic_mmio_read_raz, vgic_mmio_write_wi, 4, vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
...@@ -714,6 +721,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) ...@@ -714,6 +721,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
return -EINVAL; return -EINVAL;
vgic_cpu->rdreg = rdreg; vgic_cpu->rdreg = rdreg;
vgic_cpu->rdreg_index = rdreg->free_index;
rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE; rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE;
...@@ -768,7 +776,7 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm) ...@@ -768,7 +776,7 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
} }
/** /**
* vgic_v3_insert_redist_region - Insert a new redistributor region * vgic_v3_alloc_redist_region - Allocate a new redistributor region
* *
* Performs various checks before inserting the rdist region in the list. * Performs various checks before inserting the rdist region in the list.
* Those tests depend on whether the size of the rdist region is known * Those tests depend on whether the size of the rdist region is known
...@@ -782,7 +790,7 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm) ...@@ -782,7 +790,7 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
* *
* Return 0 on success, < 0 otherwise * Return 0 on success, < 0 otherwise
*/ */
static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index, static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
gpa_t base, uint32_t count) gpa_t base, uint32_t count)
{ {
struct vgic_dist *d = &kvm->arch.vgic; struct vgic_dist *d = &kvm->arch.vgic;
...@@ -791,10 +799,6 @@ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index, ...@@ -791,10 +799,6 @@ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
size_t size = count * KVM_VGIC_V3_REDIST_SIZE; size_t size = count * KVM_VGIC_V3_REDIST_SIZE;
int ret; int ret;
/* single rdist region already set ?*/
if (!count && !list_empty(rd_regions))
return -EINVAL;
/* cross the end of memory ? */ /* cross the end of memory ? */
if (base + size < base) if (base + size < base)
return -EINVAL; return -EINVAL;
...@@ -805,11 +809,15 @@ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index, ...@@ -805,11 +809,15 @@ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
} else { } else {
rdreg = list_last_entry(rd_regions, rdreg = list_last_entry(rd_regions,
struct vgic_redist_region, list); struct vgic_redist_region, list);
if (index != rdreg->index + 1)
/* Don't mix single region and discrete redist regions */
if (!count && rdreg->count)
return -EINVAL; return -EINVAL;
/* Cannot add an explicitly sized regions after legacy region */ if (!count)
if (!rdreg->count) return -EEXIST;
if (index != rdreg->index + 1)
return -EINVAL; return -EINVAL;
} }
...@@ -848,11 +856,17 @@ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index, ...@@ -848,11 +856,17 @@ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
return ret; return ret;
} }
void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
{
list_del(&rdreg->list);
kfree(rdreg);
}
int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count) int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
{ {
int ret; int ret;
ret = vgic_v3_insert_redist_region(kvm, index, addr, count); ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
if (ret) if (ret)
return ret; return ret;
...@@ -861,8 +875,13 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count) ...@@ -861,8 +875,13 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
* afterwards will register the iodevs when needed. * afterwards will register the iodevs when needed.
*/ */
ret = vgic_register_all_redist_iodevs(kvm); ret = vgic_register_all_redist_iodevs(kvm);
if (ret) if (ret) {
struct vgic_redist_region *rdreg;
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
vgic_v3_free_redist_region(rdreg);
return ret; return ret;
}
return 0; return 0;
} }
......
...@@ -938,10 +938,9 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, ...@@ -938,10 +938,9 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
return region; return region;
} }
static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
gpa_t addr, u32 *val) gpa_t addr, u32 *val)
{ {
struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
const struct vgic_register_region *region; const struct vgic_register_region *region;
struct kvm_vcpu *r_vcpu; struct kvm_vcpu *r_vcpu;
...@@ -960,10 +959,9 @@ static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, ...@@ -960,10 +959,9 @@ static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
return 0; return 0;
} }
static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
gpa_t addr, const u32 *val) gpa_t addr, const u32 *val)
{ {
struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
const struct vgic_register_region *region; const struct vgic_register_region *region;
struct kvm_vcpu *r_vcpu; struct kvm_vcpu *r_vcpu;
...@@ -986,9 +984,9 @@ int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev, ...@@ -986,9 +984,9 @@ int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
bool is_write, int offset, u32 *val) bool is_write, int offset, u32 *val)
{ {
if (is_write) if (is_write)
return vgic_uaccess_write(vcpu, &dev->dev, offset, val); return vgic_uaccess_write(vcpu, dev, offset, val);
else else
return vgic_uaccess_read(vcpu, &dev->dev, offset, val); return vgic_uaccess_read(vcpu, dev, offset, val);
} }
static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
......
...@@ -293,6 +293,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg) ...@@ -293,6 +293,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm, struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
u32 index); u32 index);
void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg);
bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size); bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
......
...@@ -322,6 +322,7 @@ struct vgic_cpu { ...@@ -322,6 +322,7 @@ struct vgic_cpu {
*/ */
struct vgic_io_device rd_iodev; struct vgic_io_device rd_iodev;
struct vgic_redist_region *rdreg; struct vgic_redist_region *rdreg;
u32 rdreg_index;
/* Contains the attributes and gpa of the LPI pending tables. */ /* Contains the attributes and gpa of the LPI pending tables. */
u64 pendbaser; u64 pendbaser;
......
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
/aarch64/get-reg-list /aarch64/get-reg-list
/aarch64/get-reg-list-sve /aarch64/get-reg-list-sve
/aarch64/vgic_init
/s390x/memop /s390x/memop
/s390x/resets /s390x/resets
/s390x/sync_regs_test /s390x/sync_regs_test
......
...@@ -75,6 +75,7 @@ TEST_GEN_PROGS_x86_64 += steal_time ...@@ -75,6 +75,7 @@ TEST_GEN_PROGS_x86_64 += steal_time
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list-sve TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list-sve
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += demand_paging_test TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
......
This diff is collapsed.
...@@ -223,6 +223,15 @@ int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, ...@@ -223,6 +223,15 @@ int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
#endif #endif
void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid); void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid);
int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr);
int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr);
int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd);
int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test);
int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write);
int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write);
const char *exit_reason_str(unsigned int exit_reason); const char *exit_reason_str(unsigned int exit_reason);
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot); void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot);
......
...@@ -1728,6 +1728,81 @@ int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) ...@@ -1728,6 +1728,81 @@ int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
return ioctl(vm->kvm_fd, cmd, arg); return ioctl(vm->kvm_fd, cmd, arg);
} }
/*
* Device Ioctl
*/
int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
{
struct kvm_device_attr attribute = {
.group = group,
.attr = attr,
.flags = 0,
};
return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
}
int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
{
int ret = _kvm_device_check_attr(dev_fd, group, attr);
TEST_ASSERT(ret >= 0, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
return ret;
}
int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd)
{
struct kvm_create_device create_dev;
int ret;
create_dev.type = type;
create_dev.fd = -1;
create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
ret = ioctl(vm_get_fd(vm), KVM_CREATE_DEVICE, &create_dev);
*fd = create_dev.fd;
return ret;
}
int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test)
{
int fd, ret;
ret = _kvm_create_device(vm, type, test, &fd);
if (!test) {
TEST_ASSERT(ret >= 0,
"KVM_CREATE_DEVICE IOCTL failed, rc: %i errno: %i", ret, errno);
return fd;
}
return ret;
}
int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write)
{
struct kvm_device_attr kvmattr = {
.group = group,
.attr = attr,
.flags = 0,
.addr = (uintptr_t)val,
};
int ret;
ret = ioctl(dev_fd, write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
&kvmattr);
return ret;
}
int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write)
{
int ret = _kvm_device_access(dev_fd, group, attr, val, write);
TEST_ASSERT(ret >= 0, "KVM_SET|GET_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno);
return ret;
}
/* /*
* VM Dump * VM Dump
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment