Commit 8db29ea2 authored by Greg Kurz's avatar Greg Kurz Committed by Paul Mackerras

KVM: PPC: Book3S HV: XIVE: Compute the VP id in a common helper

Reduce code duplication by consolidating the checking of vCPU ids and VP
ids to a common helper used by both legacy and native XIVE KVM devices.
And explain the magic with a comment.
Signed-off-by: default avatarGreg Kurz <groug@kaod.org>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 8a4e7597
...@@ -1211,6 +1211,37 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) ...@@ -1211,6 +1211,37 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.xive_vcpu = NULL; vcpu->arch.xive_vcpu = NULL;
} }
static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
{
/* We have a block of KVM_MAX_VCPUS VPs. We just need to check
* raw vCPU ids are below the expected limit for this guest's
* core stride ; kvmppc_pack_vcpu_id() will pack them down to an
* index that can be safely used to compute a VP id that belongs
* to the VP block.
*/
return cpu < KVM_MAX_VCPUS * xive->kvm->arch.emul_smt_mode;
}
int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
{
u32 vp_id;
if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
pr_devel("Out of bounds !\n");
return -EINVAL;
}
vp_id = kvmppc_xive_vp(xive, cpu);
if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
pr_devel("Duplicate !\n");
return -EEXIST;
}
*vp = vp_id;
return 0;
}
int kvmppc_xive_connect_vcpu(struct kvm_device *dev, int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu) struct kvm_vcpu *vcpu, u32 cpu)
{ {
...@@ -1229,20 +1260,13 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, ...@@ -1229,20 +1260,13 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
return -EPERM; return -EPERM;
if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
return -EBUSY; return -EBUSY;
if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
pr_devel("Out of bounds !\n");
return -EINVAL;
}
/* We need to synchronize with queue provisioning */ /* We need to synchronize with queue provisioning */
mutex_lock(&xive->lock); mutex_lock(&xive->lock);
vp_id = kvmppc_xive_vp(xive, cpu); r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) { if (r)
pr_devel("Duplicate !\n");
r = -EEXIST;
goto bail; goto bail;
}
xc = kzalloc(sizeof(*xc), GFP_KERNEL); xc = kzalloc(sizeof(*xc), GFP_KERNEL);
if (!xc) { if (!xc) {
......
...@@ -296,6 +296,7 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, ...@@ -296,6 +296,7 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type); struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type);
void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
struct kvmppc_xive_vcpu *xc, int irq); struct kvmppc_xive_vcpu *xc, int irq);
int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp);
#endif /* CONFIG_KVM_XICS */ #endif /* CONFIG_KVM_XICS */
#endif /* _KVM_PPC_BOOK3S_XICS_H */ #endif /* _KVM_PPC_BOOK3S_XICS_H */
...@@ -118,19 +118,12 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, ...@@ -118,19 +118,12 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
return -EPERM; return -EPERM;
if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
return -EBUSY; return -EBUSY;
if (server_num >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
pr_devel("Out of bounds !\n");
return -EINVAL;
}
mutex_lock(&xive->lock); mutex_lock(&xive->lock);
vp_id = kvmppc_xive_vp(xive, server_num); rc = kvmppc_xive_compute_vp_id(xive, server_num, &vp_id);
if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) { if (rc)
pr_devel("Duplicate !\n");
rc = -EEXIST;
goto bail; goto bail;
}
xc = kzalloc(sizeof(*xc), GFP_KERNEL); xc = kzalloc(sizeof(*xc), GFP_KERNEL);
if (!xc) { if (!xc) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment