Commit d455d366 authored by Marc Zyngier's avatar Marc Zyngier Committed by Oliver Upton

KVM: arm64: vgic-its: Treat the collection target address as a vcpu_id

Since our emulated ITS advertises GITS_TYPER.PTA=0, the target
address associated to a collection is a PE number and not
an address. So far, so good. However, the PE number is what userspace
has provided given us (aka the vcpu_id), and not the internal vcpu
index.

Make sure we consistently retrieve the vcpu by ID rather than
by index, adding a helper that deals with most of the cases.

We also get rid of the pointless (and bogus) comparisons to
online_vcpus, which don't really make sense.
Reviewed-by: default avatarZenghui Yu <yuzenghui@huawei.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20230927090911.3355209-3-maz@kernel.orgSigned-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent 9a0a75d3
...@@ -378,6 +378,12 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) ...@@ -378,6 +378,12 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
return ret; return ret;
} }
static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
struct its_collection *col)
{
return kvm_get_vcpu_by_id(kvm, col->target_addr);
}
/* /*
* Promotes the ITS view of affinity of an ITTE (which redistributor this LPI * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
* is targeting) to the VGIC's view, which deals with target VCPUs. * is targeting) to the VGIC's view, which deals with target VCPUs.
...@@ -391,7 +397,7 @@ static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite) ...@@ -391,7 +397,7 @@ static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
if (!its_is_collection_mapped(ite->collection)) if (!its_is_collection_mapped(ite->collection))
return; return;
vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr); vcpu = collection_to_vcpu(kvm, ite->collection);
update_affinity(ite->irq, vcpu); update_affinity(ite->irq, vcpu);
} }
...@@ -679,7 +685,7 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its, ...@@ -679,7 +685,7 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
if (!ite || !its_is_collection_mapped(ite->collection)) if (!ite || !its_is_collection_mapped(ite->collection))
return E_ITS_INT_UNMAPPED_INTERRUPT; return E_ITS_INT_UNMAPPED_INTERRUPT;
vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr); vcpu = collection_to_vcpu(kvm, ite->collection);
if (!vcpu) if (!vcpu)
return E_ITS_INT_UNMAPPED_INTERRUPT; return E_ITS_INT_UNMAPPED_INTERRUPT;
...@@ -887,7 +893,7 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its, ...@@ -887,7 +893,7 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
return E_ITS_MOVI_UNMAPPED_COLLECTION; return E_ITS_MOVI_UNMAPPED_COLLECTION;
ite->collection = collection; ite->collection = collection;
vcpu = kvm_get_vcpu(kvm, collection->target_addr); vcpu = collection_to_vcpu(kvm, collection);
vgic_its_invalidate_cache(kvm); vgic_its_invalidate_cache(kvm);
...@@ -1121,7 +1127,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, ...@@ -1121,7 +1127,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
} }
if (its_is_collection_mapped(collection)) if (its_is_collection_mapped(collection))
vcpu = kvm_get_vcpu(kvm, collection->target_addr); vcpu = collection_to_vcpu(kvm, collection);
irq = vgic_add_lpi(kvm, lpi_nr, vcpu); irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
if (IS_ERR(irq)) { if (IS_ERR(irq)) {
...@@ -1242,21 +1248,22 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its, ...@@ -1242,21 +1248,22 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
u64 *its_cmd) u64 *its_cmd)
{ {
u16 coll_id; u16 coll_id;
u32 target_addr;
struct its_collection *collection; struct its_collection *collection;
bool valid; bool valid;
valid = its_cmd_get_validbit(its_cmd); valid = its_cmd_get_validbit(its_cmd);
coll_id = its_cmd_get_collection(its_cmd); coll_id = its_cmd_get_collection(its_cmd);
target_addr = its_cmd_get_target_addr(its_cmd);
if (target_addr >= atomic_read(&kvm->online_vcpus))
return E_ITS_MAPC_PROCNUM_OOR;
if (!valid) { if (!valid) {
vgic_its_free_collection(its, coll_id); vgic_its_free_collection(its, coll_id);
vgic_its_invalidate_cache(kvm); vgic_its_invalidate_cache(kvm);
} else { } else {
struct kvm_vcpu *vcpu;
vcpu = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
if (!vcpu)
return E_ITS_MAPC_PROCNUM_OOR;
collection = find_collection(its, coll_id); collection = find_collection(its, coll_id);
if (!collection) { if (!collection) {
...@@ -1270,9 +1277,9 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its, ...@@ -1270,9 +1277,9 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
coll_id); coll_id);
if (ret) if (ret)
return ret; return ret;
collection->target_addr = target_addr; collection->target_addr = vcpu->vcpu_id;
} else { } else {
collection->target_addr = target_addr; collection->target_addr = vcpu->vcpu_id;
update_affinity_collection(kvm, its, collection); update_affinity_collection(kvm, its, collection);
} }
} }
...@@ -1382,7 +1389,7 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its, ...@@ -1382,7 +1389,7 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
if (!its_is_collection_mapped(collection)) if (!its_is_collection_mapped(collection))
return E_ITS_INVALL_UNMAPPED_COLLECTION; return E_ITS_INVALL_UNMAPPED_COLLECTION;
vcpu = kvm_get_vcpu(kvm, collection->target_addr); vcpu = collection_to_vcpu(kvm, collection);
vgic_its_invall(vcpu); vgic_its_invall(vcpu);
return 0; return 0;
...@@ -1399,23 +1406,21 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its, ...@@ -1399,23 +1406,21 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its, static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
u64 *its_cmd) u64 *its_cmd)
{ {
u32 target1_addr = its_cmd_get_target_addr(its_cmd);
u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
struct kvm_vcpu *vcpu1, *vcpu2; struct kvm_vcpu *vcpu1, *vcpu2;
struct vgic_irq *irq; struct vgic_irq *irq;
u32 *intids; u32 *intids;
int irq_count, i; int irq_count, i;
if (target1_addr >= atomic_read(&kvm->online_vcpus) || /* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
target2_addr >= atomic_read(&kvm->online_vcpus)) vcpu1 = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd));
vcpu2 = kvm_get_vcpu_by_id(kvm, its_cmd_mask_field(its_cmd, 3, 16, 32));
if (!vcpu1 || !vcpu2)
return E_ITS_MOVALL_PROCNUM_OOR; return E_ITS_MOVALL_PROCNUM_OOR;
if (target1_addr == target2_addr) if (vcpu1 == vcpu2)
return 0; return 0;
vcpu1 = kvm_get_vcpu(kvm, target1_addr);
vcpu2 = kvm_get_vcpu(kvm, target2_addr);
irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids); irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
if (irq_count < 0) if (irq_count < 0)
return irq_count; return irq_count;
...@@ -2258,7 +2263,7 @@ static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id, ...@@ -2258,7 +2263,7 @@ static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
return PTR_ERR(ite); return PTR_ERR(ite);
if (its_is_collection_mapped(collection)) if (its_is_collection_mapped(collection))
vcpu = kvm_get_vcpu(kvm, collection->target_addr); vcpu = kvm_get_vcpu_by_id(kvm, collection->target_addr);
irq = vgic_add_lpi(kvm, lpi_id, vcpu); irq = vgic_add_lpi(kvm, lpi_id, vcpu);
if (IS_ERR(irq)) { if (IS_ERR(irq)) {
...@@ -2573,7 +2578,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) ...@@ -2573,7 +2578,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
coll_id = val & KVM_ITS_CTE_ICID_MASK; coll_id = val & KVM_ITS_CTE_ICID_MASK;
if (target_addr != COLLECTION_NOT_MAPPED && if (target_addr != COLLECTION_NOT_MAPPED &&
target_addr >= atomic_read(&kvm->online_vcpus)) !kvm_get_vcpu_by_id(kvm, target_addr))
return -EINVAL; return -EINVAL;
collection = find_collection(its, coll_id); collection = find_collection(its, coll_id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment