Commit a7fc06dd authored by Michael Roth's avatar Michael Roth Committed by Paolo Bonzini

KVM: SVM: use .prepare_guest_switch() to handle CPU register save/setup

Currently we save host state like user-visible host MSRs, and do some
initial guest register setup for MSR_TSC_AUX and MSR_AMD64_TSC_RATIO
in svm_vcpu_load(). Defer this until just before we enter the guest by
moving the handling to kvm_x86_ops.prepare_guest_switch() similarly to
how it is done for the VMX implementation.

Additionally, since handling of saving/restoring host user MSRs is the
same both with/without SEV-ES enabled, move that handling to common
code.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarMichael Roth <michael.roth@amd.com>
Message-Id: <20210202190126.2185715-4-michael.roth@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 553cc15f
......@@ -2066,11 +2066,10 @@ void sev_es_create_vcpu(struct vcpu_svm *svm)
sev_enc_bit));
}
void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
{
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
struct vmcb_save_area *hostsa;
unsigned int i;
/*
* As an SEV-ES guest, hardware will restore the host state on VMEXIT,
......@@ -2079,13 +2078,6 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
*/
vmsave(__sme_page_pa(sd->save_area));
/*
* Certain MSRs are restored on VMEXIT, only save ones that aren't
* restored.
*/
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
/* XCR0 is restored on VMEXIT, save the current host value */
hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
......@@ -2097,18 +2089,6 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
hostsa->xss = host_xss;
}
void sev_es_vcpu_put(struct vcpu_svm *svm)
{
unsigned int i;
/*
* Certain MSRs are restored on VMEXIT and were saved with vmsave in
* sev_es_vcpu_load() above. Only restore ones that weren't.
*/
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
}
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
{
struct vcpu_svm *svm = to_svm(vcpu);
......
......@@ -1366,6 +1366,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm->vmsa = page_address(vmsa_page);
svm->asid_generation = 0;
svm->guest_state_loaded = false;
init_vmcb(svm);
svm_init_osvw(vcpu);
......@@ -1413,23 +1414,30 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
int i;
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
unsigned int i;
if (unlikely(cpu != vcpu->cpu)) {
svm->asid_generation = 0;
vmcb_mark_all_dirty(svm->vmcb);
}
if (svm->guest_state_loaded)
return;
if (sev_es_guest(svm->vcpu.kvm)) {
sev_es_vcpu_load(svm, cpu);
} else {
/*
* Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save
* area (non-sev-es). Save ones that aren't so we can restore them
* individually later.
*/
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
/*
* Save additional host state that will be restored on VMEXIT (sev-es)
* or subsequent vmload of host save area.
*/
if (sev_es_guest(svm->vcpu.kvm)) {
sev_es_prepare_guest_switch(svm, vcpu->cpu);
} else {
vmsave(__sme_page_pa(sd->save_area));
}
......@@ -1440,10 +1448,42 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
}
}
/* This assumes that the kernel never uses MSR_TSC_AUX */
if (static_cpu_has(X86_FEATURE_RDTSCP))
wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
svm->guest_state_loaded = true;
}
static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
unsigned int i;
if (!svm->guest_state_loaded)
return;
/*
* Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save
* area (non-sev-es). Restore the ones that weren't.
*/
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
svm->guest_state_loaded = false;
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
if (unlikely(cpu != vcpu->cpu)) {
svm->asid_generation = 0;
vmcb_mark_all_dirty(svm->vmcb);
}
if (sd->current_vmcb != svm->vmcb) {
sd->current_vmcb = svm->vmcb;
indirect_branch_prediction_barrier();
......@@ -1453,18 +1493,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
int i;
avic_vcpu_put(vcpu);
svm_prepare_host_switch(vcpu);
++vcpu->stat.host_state_reload;
if (sev_es_guest(svm->vcpu.kvm)) {
sev_es_vcpu_put(svm);
} else {
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
}
}
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
......@@ -3620,10 +3652,6 @@ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
invlpga(gva, svm->vmcb->control.asid);
}
static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
}
static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
......
......@@ -172,6 +172,8 @@ struct vcpu_svm {
u64 ghcb_sa_len;
bool ghcb_sa_sync;
bool ghcb_sa_free;
bool guest_state_loaded;
};
struct svm_cpu_data {
......@@ -567,9 +569,8 @@ int sev_handle_vmgexit(struct vcpu_svm *svm);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_create_vcpu(struct vcpu_svm *svm);
void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
void sev_es_vcpu_put(struct vcpu_svm *svm);
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
/* vmenter.S */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment