Commit 2bb16bea authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: nSVM: Swap the parameter order for svm_copy_vmrun_state()/svm_copy_vmloadsave_state()

Make svm_copy_vmrun_state()/svm_copy_vmloadsave_state() interface match
'memcpy(dest, src)' to avoid any confusion.

No functional change intended.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210719090322.625277-1-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9a9e7481
...@@ -702,8 +702,8 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) ...@@ -702,8 +702,8 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
} }
/* Copy state save area fields which are handled by VMRUN */ /* Copy state save area fields which are handled by VMRUN */
void svm_copy_vmrun_state(struct vmcb_save_area *from_save, void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
struct vmcb_save_area *to_save) struct vmcb_save_area *from_save)
{ {
to_save->es = from_save->es; to_save->es = from_save->es;
to_save->cs = from_save->cs; to_save->cs = from_save->cs;
...@@ -722,7 +722,7 @@ void svm_copy_vmrun_state(struct vmcb_save_area *from_save, ...@@ -722,7 +722,7 @@ void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
to_save->cpl = 0; to_save->cpl = 0;
} }
void svm_copy_vmloadsave_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb) void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
{ {
to_vmcb->save.fs = from_vmcb->save.fs; to_vmcb->save.fs = from_vmcb->save.fs;
to_vmcb->save.gs = from_vmcb->save.gs; to_vmcb->save.gs = from_vmcb->save.gs;
...@@ -1385,7 +1385,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, ...@@ -1385,7 +1385,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
svm_copy_vmrun_state(save, &svm->vmcb01.ptr->save); svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
nested_load_control_from_vmcb12(svm, ctl); nested_load_control_from_vmcb12(svm, ctl);
svm_switch_vmcb(svm, &svm->nested.vmcb02); svm_switch_vmcb(svm, &svm->nested.vmcb02);
......
...@@ -2147,11 +2147,11 @@ static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload) ...@@ -2147,11 +2147,11 @@ static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
ret = kvm_skip_emulated_instruction(vcpu); ret = kvm_skip_emulated_instruction(vcpu);
if (vmload) { if (vmload) {
svm_copy_vmloadsave_state(vmcb12, svm->vmcb); svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
svm->sysenter_eip_hi = 0; svm->sysenter_eip_hi = 0;
svm->sysenter_esp_hi = 0; svm->sysenter_esp_hi = 0;
} else { } else {
svm_copy_vmloadsave_state(svm->vmcb, vmcb12); svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
} }
kvm_vcpu_unmap(vcpu, &map, true); kvm_vcpu_unmap(vcpu, &map, true);
...@@ -4345,8 +4345,8 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) ...@@ -4345,8 +4345,8 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
svm_copy_vmrun_state(&svm->vmcb01.ptr->save, svm_copy_vmrun_state(map_save.hva + 0x400,
map_save.hva + 0x400); &svm->vmcb01.ptr->save);
kvm_vcpu_unmap(vcpu, &map_save, true); kvm_vcpu_unmap(vcpu, &map_save, true);
} }
...@@ -4394,8 +4394,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) ...@@ -4394,8 +4394,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
&map_save) == -EINVAL) &map_save) == -EINVAL)
return 1; return 1;
svm_copy_vmrun_state(map_save.hva + 0x400, svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
&svm->vmcb01.ptr->save); map_save.hva + 0x400);
kvm_vcpu_unmap(vcpu, &map_save, true); kvm_vcpu_unmap(vcpu, &map_save, true);
} }
......
...@@ -464,9 +464,9 @@ void svm_leave_nested(struct vcpu_svm *svm); ...@@ -464,9 +464,9 @@ void svm_leave_nested(struct vcpu_svm *svm);
void svm_free_nested(struct vcpu_svm *svm); void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm); int svm_allocate_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct kvm_vcpu *vcpu); int nested_svm_vmrun(struct kvm_vcpu *vcpu);
void svm_copy_vmrun_state(struct vmcb_save_area *from_save, void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
struct vmcb_save_area *to_save); struct vmcb_save_area *from_save);
void svm_copy_vmloadsave_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb); void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
int nested_svm_vmexit(struct vcpu_svm *svm); int nested_svm_vmexit(struct vcpu_svm *svm);
static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment