Commit a9dd6f09 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Allocate vcpu struct in common x86 code

Move allocation of VMX and SVM vcpus to common x86.  Although the struct
being allocated is technically a VMX/SVM struct, it can be interpreted
directly as a 'struct kvm_vcpu' because of the pre-existing requirement
that 'struct kvm_vcpu' be located at offset zero of the arch/vendor vcpu
struct.

Remove the message from the build-time assertions regarding placement of
the struct, as compatibility with the arch usercopy region is no longer
the sole dependent on 'struct kvm_vcpu' being at offset zero.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7f27179a
...@@ -1050,7 +1050,7 @@ struct kvm_x86_ops { ...@@ -1050,7 +1050,7 @@ struct kvm_x86_ops {
void (*vm_destroy)(struct kvm *kvm); void (*vm_destroy)(struct kvm *kvm);
/* Create, but do not attach this VCPU */ /* Create, but do not attach this VCPU */
struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); int (*vcpu_create)(struct kvm *kvm, struct kvm_vcpu *vcpu, unsigned id);
void (*vcpu_free)(struct kvm_vcpu *vcpu); void (*vcpu_free)(struct kvm_vcpu *vcpu);
void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event); void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
......
...@@ -2187,9 +2187,9 @@ static int avic_init_vcpu(struct vcpu_svm *svm) ...@@ -2187,9 +2187,9 @@ static int avic_init_vcpu(struct vcpu_svm *svm)
return ret; return ret;
} }
static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) static int svm_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int id)
{ {
struct kvm_vcpu *vcpu;
struct vcpu_svm *svm; struct vcpu_svm *svm;
struct page *page; struct page *page;
struct page *msrpm_pages; struct page *msrpm_pages;
...@@ -2197,22 +2197,15 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -2197,22 +2197,15 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
struct page *nested_msrpm_pages; struct page *nested_msrpm_pages;
int err; int err;
BUILD_BUG_ON_MSG(offsetof(struct vcpu_svm, vcpu) != 0, BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
"struct kvm_vcpu must be at offset 0 for arch usercopy region"); svm = to_svm(vcpu);
svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
if (!svm) {
err = -ENOMEM;
goto out;
}
vcpu = &svm->vcpu;
vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
GFP_KERNEL_ACCOUNT); GFP_KERNEL_ACCOUNT);
if (!vcpu->arch.user_fpu) { if (!vcpu->arch.user_fpu) {
printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n"); printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
err = -ENOMEM; err = -ENOMEM;
goto free_partial_svm; goto out;
} }
vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
...@@ -2225,7 +2218,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -2225,7 +2218,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
err = kvm_vcpu_init(vcpu, kvm, id); err = kvm_vcpu_init(vcpu, kvm, id);
if (err) if (err)
goto free_svm; goto free_guest_fpu;
err = -ENOMEM; err = -ENOMEM;
page = alloc_page(GFP_KERNEL_ACCOUNT); page = alloc_page(GFP_KERNEL_ACCOUNT);
...@@ -2269,7 +2262,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -2269,7 +2262,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm_init_osvw(vcpu); svm_init_osvw(vcpu);
return vcpu; return 0;
free_page4: free_page4:
__free_page(hsave_page); __free_page(hsave_page);
...@@ -2281,14 +2274,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -2281,14 +2274,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
__free_page(page); __free_page(page);
uninit: uninit:
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
free_svm: free_guest_fpu:
kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
free_user_fpu: free_user_fpu:
kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
free_partial_svm:
kmem_cache_free(kvm_vcpu_cache, svm);
out: out:
return ERR_PTR(err); return err;
} }
static void svm_clear_current_vmcb(struct vmcb *vmcb) static void svm_clear_current_vmcb(struct vmcb *vmcb)
...@@ -2317,7 +2308,6 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) ...@@ -2317,7 +2308,6 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
kmem_cache_free(kvm_vcpu_cache, svm);
} }
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
......
...@@ -6684,31 +6684,24 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) ...@@ -6684,31 +6684,24 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
kmem_cache_free(kvm_vcpu_cache, vmx);
} }
static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int id)
{ {
struct kvm_vcpu *vcpu;
struct vcpu_vmx *vmx; struct vcpu_vmx *vmx;
unsigned long *msr_bitmap; unsigned long *msr_bitmap;
int i, cpu, err; int i, cpu, err;
BUILD_BUG_ON_MSG(offsetof(struct vcpu_vmx, vcpu) != 0, BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
"struct kvm_vcpu must be at offset 0 for arch usercopy region"); vmx = to_vmx(vcpu);
vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
if (!vmx)
return ERR_PTR(-ENOMEM);
vcpu = &vmx->vcpu;
vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
GFP_KERNEL_ACCOUNT); GFP_KERNEL_ACCOUNT);
if (!vcpu->arch.user_fpu) { if (!vcpu->arch.user_fpu) {
printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n"); printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
err = -ENOMEM; err = -ENOMEM;
goto free_partial_vcpu; goto out;
} }
vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
...@@ -6829,7 +6822,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -6829,7 +6822,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
vmx->ept_pointer = INVALID_PAGE; vmx->ept_pointer = INVALID_PAGE;
return vcpu; return 0;
free_vmcs: free_vmcs:
free_loaded_vmcs(vmx->loaded_vmcs); free_loaded_vmcs(vmx->loaded_vmcs);
...@@ -6842,9 +6835,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -6842,9 +6835,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
free_user_fpu: free_user_fpu:
kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
free_partial_vcpu: out:
kmem_cache_free(kvm_vcpu_cache, vmx); return err;
return ERR_PTR(err);
} }
#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
......
...@@ -9172,26 +9172,34 @@ static void fx_init(struct kvm_vcpu *vcpu) ...@@ -9172,26 +9172,34 @@ static void fx_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{ {
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
kvmclock_reset(vcpu); kvmclock_reset(vcpu);
kvm_x86_ops->vcpu_free(vcpu); kvm_x86_ops->vcpu_free(vcpu);
free_cpumask_var(wbinvd_dirty_mask);
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
kmem_cache_free(kvm_vcpu_cache, vcpu);
} }
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
unsigned int id) unsigned int id)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int r;
if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
printk_once(KERN_WARNING printk_once(KERN_WARNING
"kvm: SMP vm created on host with unstable TSC; " "kvm: SMP vm created on host with unstable TSC; "
"guest TSC will not be reliable\n"); "guest TSC will not be reliable\n");
vcpu = kvm_x86_ops->vcpu_create(kvm, id); vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
if (!vcpu)
return ERR_PTR(-ENOMEM);
r = kvm_x86_ops->vcpu_create(kvm, vcpu, id);
if (r) {
kmem_cache_free(kvm_vcpu_cache, vcpu);
return ERR_PTR(r);
}
return vcpu; return vcpu;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment