Commit 0c2c7c06 authored by Peter Gonda's avatar Peter Gonda Committed by Paolo Bonzini

KVM: SEV: Mark nested locking of vcpu->lock

svm_vm_migrate_from() uses sev_lock_vcpus_for_migration() to lock all
source and target vcpu->locks. Unfortunately there is an 8 subclass
limit, so a new subclass cannot be used for each vCPU. Instead maintain
ownership of the first vcpu's mutex.dep_map using a role specific
subclass: source vs target. Release the other vcpu's mutex.dep_maps.

Fixes: b5663931 ("KVM: SEV: Add support for SEV intra host migration")
Reported-by: John Sperbeck<jsperbeck@google.com>
Suggested-by: default avatarDavid Rientjes <rientjes@google.com>
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarPeter Gonda <pgonda@google.com>

Message-Id: <20220502165807.529624-1-pgonda@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 04144108
...@@ -1594,24 +1594,51 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) ...@@ -1594,24 +1594,51 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
atomic_set_release(&src_sev->migration_in_progress, 0); atomic_set_release(&src_sev->migration_in_progress, 0);
} }
/* vCPU mutex subclasses. */
enum sev_migration_role {
SEV_MIGRATION_SOURCE = 0,
SEV_MIGRATION_TARGET,
SEV_NR_MIGRATION_ROLES,
};
static int sev_lock_vcpus_for_migration(struct kvm *kvm) static int sev_lock_vcpus_for_migration(struct kvm *kvm,
enum sev_migration_role role)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
unsigned long i, j; unsigned long i, j;
bool first = true;
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (mutex_lock_killable(&vcpu->mutex)) if (mutex_lock_killable_nested(&vcpu->mutex, role))
goto out_unlock; goto out_unlock;
if (first) {
/*
* Reset the role to one that avoids colliding with
* the role used for the first vcpu mutex.
*/
role = SEV_NR_MIGRATION_ROLES;
first = false;
} else {
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
}
} }
return 0; return 0;
out_unlock: out_unlock:
first = true;
kvm_for_each_vcpu(j, vcpu, kvm) { kvm_for_each_vcpu(j, vcpu, kvm) {
if (i == j) if (i == j)
break; break;
if (first)
first = false;
else
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
mutex_unlock(&vcpu->mutex); mutex_unlock(&vcpu->mutex);
} }
return -EINTR; return -EINTR;
...@@ -1621,8 +1648,15 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm) ...@@ -1621,8 +1648,15 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
unsigned long i; unsigned long i;
bool first = true;
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (first)
first = false;
else
mutex_acquire(&vcpu->mutex.dep_map,
SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
mutex_unlock(&vcpu->mutex); mutex_unlock(&vcpu->mutex);
} }
} }
...@@ -1748,10 +1782,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd) ...@@ -1748,10 +1782,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
charged = true; charged = true;
} }
ret = sev_lock_vcpus_for_migration(kvm); ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
if (ret) if (ret)
goto out_dst_cgroup; goto out_dst_cgroup;
ret = sev_lock_vcpus_for_migration(source_kvm); ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
if (ret) if (ret)
goto out_dst_vcpu; goto out_dst_vcpu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment