Commit b5663931 authored by Peter Gonda's avatar Peter Gonda Committed by Paolo Bonzini

KVM: SEV: Add support for SEV intra host migration

For SEV to work with intra host migration, contents of the SEV info struct
such as the ASID (used to index the encryption key in the AMD SP) and
the list of memory regions need to be transferred to the target VM.
This change adds a commands for a target VMM to get a source SEV VM's sev
info.
Signed-off-by: default avatarPeter Gonda <pgonda@google.com>
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Reviewed-by: default avatarMarc Orr <marcorr@google.com>
Cc: Marc Orr <marcorr@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Wanpeng Li <wanpengli@tencent.com>
Cc: Jim Mattson <jmattson@google.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Message-Id: <20211021174303.385706-3-pgonda@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 91b692a0
...@@ -6911,6 +6911,20 @@ MAP_SHARED mmap will result in an -EINVAL return. ...@@ -6911,6 +6911,20 @@ MAP_SHARED mmap will result in an -EINVAL return.
When enabled the VMM may make use of the ``KVM_ARM_MTE_COPY_TAGS`` ioctl to When enabled the VMM may make use of the ``KVM_ARM_MTE_COPY_TAGS`` ioctl to
perform a bulk copy of tags to/from the guest. perform a bulk copy of tags to/from the guest.
7.29 KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM
-------------------------------------
Architectures: x86 SEV enabled
Type: vm
Parameters: args[0] is the fd of the source vm
Returns: 0 on success
This capability enables userspace to migrate the encryption context from the VM
indicated by the fd to the VM this is called on.
This is intended to support intra-host migration of VMs between userspace VMMs,
upgrading the VMM process without interrupting the guest.
8. Other capabilities. 8. Other capabilities.
====================== ======================
......
...@@ -1476,6 +1476,7 @@ struct kvm_x86_ops { ...@@ -1476,6 +1476,7 @@ struct kvm_x86_ops {
int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd); int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
int (*get_msr_feature)(struct kvm_msr_entry *entry); int (*get_msr_feature)(struct kvm_msr_entry *entry);
......
...@@ -1532,6 +1532,158 @@ static bool cmd_allowed_from_miror(u32 cmd_id) ...@@ -1532,6 +1532,158 @@ static bool cmd_allowed_from_miror(u32 cmd_id)
return false; return false;
} }
static int sev_lock_for_migration(struct kvm *kvm)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
/*
* Bail if this VM is already involved in a migration to avoid deadlock
* between two VMs trying to migrate to/from each other.
*/
if (atomic_cmpxchg_acquire(&sev->migration_in_progress, 0, 1))
return -EBUSY;
mutex_lock(&kvm->lock);
return 0;
}
static void sev_unlock_after_migration(struct kvm *kvm)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
mutex_unlock(&kvm->lock);
atomic_set_release(&sev->migration_in_progress, 0);
}
static int sev_lock_vcpus_for_migration(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
int i, j;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (mutex_lock_killable(&vcpu->mutex))
goto out_unlock;
}
return 0;
out_unlock:
kvm_for_each_vcpu(j, vcpu, kvm) {
if (i == j)
break;
mutex_unlock(&vcpu->mutex);
}
return -EINTR;
}
static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
int i;
kvm_for_each_vcpu(i, vcpu, kvm) {
mutex_unlock(&vcpu->mutex);
}
}
static void sev_migrate_from(struct kvm_sev_info *dst,
struct kvm_sev_info *src)
{
dst->active = true;
dst->asid = src->asid;
dst->handle = src->handle;
dst->pages_locked = src->pages_locked;
src->asid = 0;
src->active = false;
src->handle = 0;
src->pages_locked = 0;
if (dst->misc_cg != src->misc_cg)
sev_misc_cg_uncharge(src);
put_misc_cg(src->misc_cg);
src->misc_cg = NULL;
INIT_LIST_HEAD(&dst->regions_list);
list_replace_init(&src->regions_list, &dst->regions_list);
}
int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
{
struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
struct kvm_sev_info *src_sev;
struct file *source_kvm_file;
struct kvm *source_kvm;
int ret;
ret = sev_lock_for_migration(kvm);
if (ret)
return ret;
if (sev_guest(kvm)) {
ret = -EINVAL;
goto out_unlock;
}
source_kvm_file = fget(source_fd);
if (!file_is_kvm(source_kvm_file)) {
ret = -EBADF;
goto out_fput;
}
source_kvm = source_kvm_file->private_data;
ret = sev_lock_for_migration(source_kvm);
if (ret)
goto out_fput;
if (!sev_guest(source_kvm) || sev_es_guest(source_kvm)) {
ret = -EINVAL;
goto out_source;
}
src_sev = &to_kvm_svm(source_kvm)->sev_info;
dst_sev->misc_cg = get_current_misc_cg();
if (dst_sev->misc_cg != src_sev->misc_cg) {
ret = sev_misc_cg_try_charge(dst_sev);
if (ret)
goto out_dst_put_cgroup;
}
ret = sev_lock_vcpus_for_migration(kvm);
if (ret)
goto out_dst_cgroup;
ret = sev_lock_vcpus_for_migration(source_kvm);
if (ret)
goto out_dst_vcpu;
sev_migrate_from(dst_sev, src_sev);
kvm_vm_dead(source_kvm);
ret = 0;
sev_unlock_vcpus_for_migration(source_kvm);
out_dst_vcpu:
sev_unlock_vcpus_for_migration(kvm);
out_dst_cgroup:
if (ret < 0) {
sev_misc_cg_uncharge(dst_sev);
out_dst_put_cgroup:
put_misc_cg(dst_sev->misc_cg);
dst_sev->misc_cg = NULL;
}
out_source:
sev_unlock_after_migration(source_kvm);
out_fput:
if (source_kvm_file)
fput(source_kvm_file);
out_unlock:
sev_unlock_after_migration(kvm);
return ret;
}
int svm_mem_enc_op(struct kvm *kvm, void __user *argp) int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
{ {
struct kvm_sev_cmd sev_cmd; struct kvm_sev_cmd sev_cmd;
......
...@@ -4699,6 +4699,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { ...@@ -4699,6 +4699,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.mem_enc_unreg_region = svm_unregister_enc_region, .mem_enc_unreg_region = svm_unregister_enc_region,
.vm_copy_enc_context_from = svm_vm_copy_asid_from, .vm_copy_enc_context_from = svm_vm_copy_asid_from,
.vm_move_enc_context_from = svm_vm_migrate_from,
.can_emulate_instruction = svm_can_emulate_instruction, .can_emulate_instruction = svm_can_emulate_instruction,
......
...@@ -80,6 +80,7 @@ struct kvm_sev_info { ...@@ -80,6 +80,7 @@ struct kvm_sev_info {
u64 ap_jump_table; /* SEV-ES AP Jump Table address */ u64 ap_jump_table; /* SEV-ES AP Jump Table address */
struct kvm *enc_context_owner; /* Owner of copied encryption context */ struct kvm *enc_context_owner; /* Owner of copied encryption context */
struct misc_cg *misc_cg; /* For misc cgroup accounting */ struct misc_cg *misc_cg; /* For misc cgroup accounting */
atomic_t migration_in_progress;
}; };
struct kvm_svm { struct kvm_svm {
...@@ -562,6 +563,7 @@ int svm_register_enc_region(struct kvm *kvm, ...@@ -562,6 +563,7 @@ int svm_register_enc_region(struct kvm *kvm,
int svm_unregister_enc_region(struct kvm *kvm, int svm_unregister_enc_region(struct kvm *kvm,
struct kvm_enc_region *range); struct kvm_enc_region *range);
int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd); int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd);
void pre_sev_run(struct vcpu_svm *svm, int cpu); void pre_sev_run(struct vcpu_svm *svm, int cpu);
void __init sev_set_cpu_caps(void); void __init sev_set_cpu_caps(void);
void __init sev_hardware_setup(void); void __init sev_hardware_setup(void);
......
...@@ -5845,6 +5845,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, ...@@ -5845,6 +5845,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
if (kvm_x86_ops.vm_copy_enc_context_from) if (kvm_x86_ops.vm_copy_enc_context_from)
r = kvm_x86_ops.vm_copy_enc_context_from(kvm, cap->args[0]); r = kvm_x86_ops.vm_copy_enc_context_from(kvm, cap->args[0]);
return r; return r;
case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
r = -EINVAL;
if (kvm_x86_ops.vm_move_enc_context_from)
r = kvm_x86_ops.vm_move_enc_context_from(
kvm, cap->args[0]);
return r;
case KVM_CAP_EXIT_HYPERCALL: case KVM_CAP_EXIT_HYPERCALL:
if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) { if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) {
r = -EINVAL; r = -EINVAL;
......
...@@ -1130,6 +1130,7 @@ struct kvm_ppc_resize_hpt { ...@@ -1130,6 +1130,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_BINARY_STATS_FD 203 #define KVM_CAP_BINARY_STATS_FD 203
#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204 #define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
#define KVM_CAP_ARM_MTE 205 #define KVM_CAP_ARM_MTE 205
#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment