Commit e6aa9abd authored by Joerg Roedel's avatar Joerg Roedel Committed by Avi Kivity

KVM: SVM: move nested svm state into seperate struct

This makes it more clear for which purpose these members in the vcpu_svm
exist.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Acked-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent a5c3832d
...@@ -70,6 +70,18 @@ static const u32 host_save_user_msrs[] = { ...@@ -70,6 +70,18 @@ static const u32 host_save_user_msrs[] = {
struct kvm_vcpu; struct kvm_vcpu;
struct nested_state {
struct vmcb *hsave;
u64 hsave_msr;
u64 vmcb;
/* These are the merged vectors */
u32 *msrpm;
/* gpa pointers to the real vectors */
u64 vmcb_msrpm;
};
struct vcpu_svm { struct vcpu_svm {
struct kvm_vcpu vcpu; struct kvm_vcpu vcpu;
struct vmcb *vmcb; struct vmcb *vmcb;
...@@ -85,16 +97,8 @@ struct vcpu_svm { ...@@ -85,16 +97,8 @@ struct vcpu_svm {
u64 host_gs_base; u64 host_gs_base;
u32 *msrpm; u32 *msrpm;
struct vmcb *hsave;
u64 hsave_msr;
u64 nested_vmcb;
/* These are the merged vectors */ struct nested_state nested;
u32 *nested_msrpm;
/* gpa pointers to the real vectors */
u64 nested_vmcb_msrpm;
}; };
/* enable NPT for AMD64 and X86 with PAE */ /* enable NPT for AMD64 and X86 with PAE */
...@@ -127,7 +131,7 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) ...@@ -127,7 +131,7 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
static inline bool is_nested(struct vcpu_svm *svm) static inline bool is_nested(struct vcpu_svm *svm)
{ {
return svm->nested_vmcb; return svm->nested.vmcb;
} }
static inline void enable_gif(struct vcpu_svm *svm) static inline void enable_gif(struct vcpu_svm *svm)
...@@ -636,7 +640,7 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -636,7 +640,7 @@ static void init_vmcb(struct vcpu_svm *svm)
} }
force_new_asid(&svm->vcpu); force_new_asid(&svm->vcpu);
svm->nested_vmcb = 0; svm->nested.vmcb = 0;
svm->vcpu.arch.hflags = 0; svm->vcpu.arch.hflags = 0;
enable_gif(svm); enable_gif(svm);
...@@ -699,9 +703,9 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -699,9 +703,9 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
hsave_page = alloc_page(GFP_KERNEL); hsave_page = alloc_page(GFP_KERNEL);
if (!hsave_page) if (!hsave_page)
goto uninit; goto uninit;
svm->hsave = page_address(hsave_page); svm->nested.hsave = page_address(hsave_page);
svm->nested_msrpm = page_address(nested_msrpm_pages); svm->nested.msrpm = page_address(nested_msrpm_pages);
svm->vmcb = page_address(page); svm->vmcb = page_address(page);
clear_page(svm->vmcb); clear_page(svm->vmcb);
...@@ -731,8 +735,8 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) ...@@ -731,8 +735,8 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
__free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
__free_page(virt_to_page(svm->hsave)); __free_page(virt_to_page(svm->nested.hsave));
__free_pages(virt_to_page(svm->nested_msrpm), MSRPM_ALLOC_ORDER); __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm); kmem_cache_free(kvm_vcpu_cache, svm);
} }
...@@ -1558,13 +1562,13 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override) ...@@ -1558,13 +1562,13 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
switch (svm->vmcb->control.exit_code) { switch (svm->vmcb->control.exit_code) {
case SVM_EXIT_MSR: case SVM_EXIT_MSR:
return nested_svm_do(svm, svm->nested_vmcb, return nested_svm_do(svm, svm->nested.vmcb,
svm->nested_vmcb_msrpm, NULL, svm->nested.vmcb_msrpm, NULL,
nested_svm_exit_handled_msr); nested_svm_exit_handled_msr);
default: break; default: break;
} }
return nested_svm_do(svm, svm->nested_vmcb, 0, &k, return nested_svm_do(svm, svm->nested.vmcb, 0, &k,
nested_svm_exit_handled_real); nested_svm_exit_handled_real);
} }
...@@ -1604,7 +1608,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1, ...@@ -1604,7 +1608,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
void *arg2, void *opaque) void *arg2, void *opaque)
{ {
struct vmcb *nested_vmcb = (struct vmcb *)arg1; struct vmcb *nested_vmcb = (struct vmcb *)arg1;
struct vmcb *hsave = svm->hsave; struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb; struct vmcb *vmcb = svm->vmcb;
/* Give the current vmcb to the guest */ /* Give the current vmcb to the guest */
...@@ -1679,7 +1683,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1, ...@@ -1679,7 +1683,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
svm->vmcb->control.exit_int_info = 0; svm->vmcb->control.exit_int_info = 0;
/* Exit nested SVM mode */ /* Exit nested SVM mode */
svm->nested_vmcb = 0; svm->nested.vmcb = 0;
return 0; return 0;
} }
...@@ -1687,7 +1691,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1, ...@@ -1687,7 +1691,7 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
static int nested_svm_vmexit(struct vcpu_svm *svm) static int nested_svm_vmexit(struct vcpu_svm *svm)
{ {
nsvm_printk("VMexit\n"); nsvm_printk("VMexit\n");
if (nested_svm_do(svm, svm->nested_vmcb, 0, if (nested_svm_do(svm, svm->nested.vmcb, 0,
NULL, nested_svm_vmexit_real)) NULL, nested_svm_vmexit_real))
return 1; return 1;
...@@ -1703,8 +1707,8 @@ static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1, ...@@ -1703,8 +1707,8 @@ static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1,
int i; int i;
u32 *nested_msrpm = (u32*)arg1; u32 *nested_msrpm = (u32*)arg1;
for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++) for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
svm->nested_msrpm[i] = svm->msrpm[i] | nested_msrpm[i]; svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
svm->vmcb->control.msrpm_base_pa = __pa(svm->nested_msrpm); svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
return 0; return 0;
} }
...@@ -1713,11 +1717,11 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1, ...@@ -1713,11 +1717,11 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
void *arg2, void *opaque) void *arg2, void *opaque)
{ {
struct vmcb *nested_vmcb = (struct vmcb *)arg1; struct vmcb *nested_vmcb = (struct vmcb *)arg1;
struct vmcb *hsave = svm->hsave; struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb; struct vmcb *vmcb = svm->vmcb;
/* nested_vmcb is our indicator if nested SVM is activated */ /* nested_vmcb is our indicator if nested SVM is activated */
svm->nested_vmcb = svm->vmcb->save.rax; svm->nested.vmcb = svm->vmcb->save.rax;
/* Clear internal status */ /* Clear internal status */
kvm_clear_exception_queue(&svm->vcpu); kvm_clear_exception_queue(&svm->vcpu);
...@@ -1795,7 +1799,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1, ...@@ -1795,7 +1799,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
svm->vmcb->control.intercept |= nested_vmcb->control.intercept; svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
svm->nested_vmcb_msrpm = nested_vmcb->control.msrpm_base_pa; svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
force_new_asid(&svm->vcpu); force_new_asid(&svm->vcpu);
svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info; svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info;
...@@ -1897,7 +1901,7 @@ static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1897,7 +1901,7 @@ static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
NULL, nested_svm_vmrun)) NULL, nested_svm_vmrun))
return 1; return 1;
if (nested_svm_do(svm, svm->nested_vmcb_msrpm, 0, if (nested_svm_do(svm, svm->nested.vmcb_msrpm, 0,
NULL, nested_svm_vmrun_msrpm)) NULL, nested_svm_vmrun_msrpm))
return 1; return 1;
...@@ -2107,7 +2111,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) ...@@ -2107,7 +2111,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
*data = svm->vmcb->save.last_excp_to; *data = svm->vmcb->save.last_excp_to;
break; break;
case MSR_VM_HSAVE_PA: case MSR_VM_HSAVE_PA:
*data = svm->hsave_msr; *data = svm->nested.hsave_msr;
break; break;
case MSR_VM_CR: case MSR_VM_CR:
*data = 0; *data = 0;
...@@ -2195,7 +2199,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) ...@@ -2195,7 +2199,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
svm_disable_lbrv(svm); svm_disable_lbrv(svm);
break; break;
case MSR_VM_HSAVE_PA: case MSR_VM_HSAVE_PA:
svm->hsave_msr = data; svm->nested.hsave_msr = data;
break; break;
case MSR_VM_CR: case MSR_VM_CR:
case MSR_VM_IGNNE: case MSR_VM_IGNNE:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment