Commit c90aad55 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch 'kvm-arm64/vgic-5.13' into kvmarm-master/next

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents d8f37d29 94ac0835
......@@ -228,7 +228,7 @@ Groups:
KVM_DEV_ARM_VGIC_CTRL_INIT
request the initialization of the VGIC, no additional parameter in
kvm_device_attr.addr.
kvm_device_attr.addr. Must be called after all VCPUs have been created.
KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES
save all LPI pending bits into guest RAM pending tables.
......
......@@ -335,13 +335,14 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
kfree(dist->spis);
dist->spis = NULL;
dist->nr_spis = 0;
dist->vgic_dist_base = VGIC_ADDR_UNDEF;
if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) {
list_del(&rdreg->list);
kfree(rdreg);
}
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
vgic_v3_free_redist_region(rdreg);
INIT_LIST_HEAD(&dist->rd_regions);
} else {
dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
}
if (vgic_has_its(kvm))
......@@ -362,6 +363,7 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_flush_pending_lpis(vcpu);
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
}
/* To be called with kvm->lock held */
......
......@@ -87,8 +87,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
goto out;
}
rdreg = list_first_entry(&vgic->rd_regions,
struct vgic_redist_region, list);
rdreg = list_first_entry_or_null(&vgic->rd_regions,
struct vgic_redist_region, list);
if (!rdreg)
addr_ptr = &undef_value;
else
......@@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct kvm_device *dev,
u64 addr;
unsigned long type = (unsigned long)attr->attr;
if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT;
r = kvm_vgic_addr(dev->kvm, type, &addr, false);
if (r)
return (r == -ENODEV) ? -ENXIO : r;
......
......@@ -251,30 +251,35 @@ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
vgic_enable_lpis(vcpu);
}
static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
static bool vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu *vcpu)
{
unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_redist_region *rdreg = vgic_cpu->rdreg;
int target_vcpu_id = vcpu->vcpu_id;
gpa_t last_rdist_typer = rdreg->base + GICR_TYPER +
(rdreg->free_index - 1) * KVM_VGIC_V3_REDIST_SIZE;
u64 value;
struct vgic_redist_region *iter, *rdreg = vgic_cpu->rdreg;
value = (u64)(mpidr & GENMASK(23, 0)) << 32;
value |= ((target_vcpu_id & 0xffff) << 8);
if (!rdreg)
return false;
if (addr == last_rdist_typer)
value |= GICR_TYPER_LAST;
if (vgic_has_its(vcpu->kvm))
value |= GICR_TYPER_PLPIS;
if (vgic_cpu->rdreg_index < rdreg->free_index - 1) {
return false;
} else if (rdreg->count && vgic_cpu->rdreg_index == (rdreg->count - 1)) {
struct list_head *rd_regions = &vgic->rd_regions;
gpa_t end = rdreg->base + rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
return extract_bytes(value, addr & 7, len);
/*
* the rdist is the last one of the redist region,
* check whether there is no other contiguous rdist region
*/
list_for_each_entry(iter, rd_regions, list) {
if (iter->base == end && iter->free_index > 0)
return false;
}
}
return true;
}
static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
{
unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
int target_vcpu_id = vcpu->vcpu_id;
......@@ -286,7 +291,9 @@ static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
if (vgic_has_its(vcpu->kvm))
value |= GICR_TYPER_PLPIS;
/* reporting of the Last bit is not supported for userspace */
if (vgic_mmio_vcpu_rdist_is_last(vcpu))
value |= GICR_TYPER_LAST;
return extract_bytes(value, addr & 7, len);
}
......@@ -612,7 +619,7 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8,
NULL, vgic_mmio_uaccess_write_wi, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
......@@ -714,6 +721,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
return -EINVAL;
vgic_cpu->rdreg = rdreg;
vgic_cpu->rdreg_index = rdreg->free_index;
rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE;
......@@ -768,7 +776,7 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
}
/**
* vgic_v3_insert_redist_region - Insert a new redistributor region
* vgic_v3_alloc_redist_region - Allocate a new redistributor region
*
* Performs various checks before inserting the rdist region in the list.
* Those tests depend on whether the size of the rdist region is known
......@@ -782,8 +790,8 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
*
* Return 0 on success, < 0 otherwise
*/
static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
gpa_t base, uint32_t count)
static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
gpa_t base, uint32_t count)
{
struct vgic_dist *d = &kvm->arch.vgic;
struct vgic_redist_region *rdreg;
......@@ -791,10 +799,6 @@ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
size_t size = count * KVM_VGIC_V3_REDIST_SIZE;
int ret;
/* single rdist region already set ?*/
if (!count && !list_empty(rd_regions))
return -EINVAL;
/* cross the end of memory ? */
if (base + size < base)
return -EINVAL;
......@@ -805,11 +809,15 @@ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
} else {
rdreg = list_last_entry(rd_regions,
struct vgic_redist_region, list);
if (index != rdreg->index + 1)
/* Don't mix single region and discrete redist regions */
if (!count && rdreg->count)
return -EINVAL;
/* Cannot add an explicitly sized regions after legacy region */
if (!rdreg->count)
if (!count)
return -EEXIST;
if (index != rdreg->index + 1)
return -EINVAL;
}
......@@ -848,11 +856,17 @@ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
return ret;
}
void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
{
list_del(&rdreg->list);
kfree(rdreg);
}
int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
{
int ret;
ret = vgic_v3_insert_redist_region(kvm, index, addr, count);
ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
if (ret)
return ret;
......@@ -861,8 +875,13 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
* afterwards will register the iodevs when needed.
*/
ret = vgic_register_all_redist_iodevs(kvm);
if (ret)
if (ret) {
struct vgic_redist_region *rdreg;
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
vgic_v3_free_redist_region(rdreg);
return ret;
}
return 0;
}
......
......@@ -938,10 +938,9 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
return region;
}
static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
gpa_t addr, u32 *val)
{
struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
const struct vgic_register_region *region;
struct kvm_vcpu *r_vcpu;
......@@ -960,10 +959,9 @@ static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
return 0;
}
static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
gpa_t addr, const u32 *val)
{
struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
const struct vgic_register_region *region;
struct kvm_vcpu *r_vcpu;
......@@ -986,9 +984,9 @@ int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
bool is_write, int offset, u32 *val)
{
if (is_write)
return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
return vgic_uaccess_write(vcpu, dev, offset, val);
else
return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
return vgic_uaccess_read(vcpu, dev, offset, val);
}
static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
......
......@@ -293,6 +293,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
u32 index);
void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg);
bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
......
......@@ -322,6 +322,7 @@ struct vgic_cpu {
*/
struct vgic_io_device rd_iodev;
struct vgic_redist_region *rdreg;
u32 rdreg_index;
/* Contains the attributes and gpa of the LPI pending tables. */
u64 pendbaser;
......
# SPDX-License-Identifier: GPL-2.0-only
/aarch64/get-reg-list
/aarch64/get-reg-list-sve
/aarch64/vgic_init
/s390x/memop
/s390x/resets
/s390x/sync_regs_test
......
......@@ -75,6 +75,7 @@ TEST_GEN_PROGS_x86_64 += steal_time
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list-sve
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
......
This diff is collapsed.
......@@ -223,6 +223,15 @@ int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
#endif
void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid);
int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr);
int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr);
int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd);
int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test);
int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write);
int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write);
const char *exit_reason_str(unsigned int exit_reason);
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot);
......
......@@ -1728,6 +1728,81 @@ int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
return ioctl(vm->kvm_fd, cmd, arg);
}
/*
* Device Ioctl
*/
int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
{
struct kvm_device_attr attribute = {
.group = group,
.attr = attr,
.flags = 0,
};
return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
}
int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
{
int ret = _kvm_device_check_attr(dev_fd, group, attr);
TEST_ASSERT(ret >= 0, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
return ret;
}
int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd)
{
struct kvm_create_device create_dev;
int ret;
create_dev.type = type;
create_dev.fd = -1;
create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
ret = ioctl(vm_get_fd(vm), KVM_CREATE_DEVICE, &create_dev);
*fd = create_dev.fd;
return ret;
}
int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test)
{
int fd, ret;
ret = _kvm_create_device(vm, type, test, &fd);
if (!test) {
TEST_ASSERT(ret >= 0,
"KVM_CREATE_DEVICE IOCTL failed, rc: %i errno: %i", ret, errno);
return fd;
}
return ret;
}
int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write)
{
struct kvm_device_attr kvmattr = {
.group = group,
.attr = attr,
.flags = 0,
.addr = (uintptr_t)val,
};
int ret;
ret = ioctl(dev_fd, write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
&kvmattr);
return ret;
}
int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write)
{
int ret = _kvm_device_access(dev_fd, group, attr, val, write);
TEST_ASSERT(ret >= 0, "KVM_SET|GET_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno);
return ret;
}
/*
* VM Dump
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment