Commit c4762fda authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

x86: KVM: svm: remove hardcoded instruction length from intercepts

Various intercepts hard-code the respective instruction lengths to optimize
skip_emulated_instruction(): when next_rip is pre-set we skip
kvm_emulate_instruction(vcpu, EMULTYPE_SKIP). The optimization is, however,
incorrect: different (redundant) prefixes could be used to enlarge the
instruction. We can't really avoid decoding.

svm->next_rip is not used when CPU supports 'nrips' (X86_FEATURE_NRIPS)
feature: next RIP is provided in VMCB. The feature is not really new
(Opteron G3s had it already) and the change should have zero affect.

Remove manual svm->next_rip setting with hard-coded instruction lengths.
The only case where we now use svm->next_rip is EXIT_IOIO: the instruction
length is provided to us by hardware.

Hardcoded RIP advancement remains in vmrun_interception(), this is going to
be taken care of separately.
Reported-by: default avatarJim Mattson <jmattson@google.com>
Reviewed-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 02d4160f
...@@ -2900,13 +2900,11 @@ static int nop_on_interception(struct vcpu_svm *svm) ...@@ -2900,13 +2900,11 @@ static int nop_on_interception(struct vcpu_svm *svm)
static int halt_interception(struct vcpu_svm *svm) static int halt_interception(struct vcpu_svm *svm)
{ {
svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
return kvm_emulate_halt(&svm->vcpu); return kvm_emulate_halt(&svm->vcpu);
} }
static int vmmcall_interception(struct vcpu_svm *svm) static int vmmcall_interception(struct vcpu_svm *svm)
{ {
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
return kvm_emulate_hypercall(&svm->vcpu); return kvm_emulate_hypercall(&svm->vcpu);
} }
...@@ -3694,7 +3692,6 @@ static int vmload_interception(struct vcpu_svm *svm) ...@@ -3694,7 +3692,6 @@ static int vmload_interception(struct vcpu_svm *svm)
nested_vmcb = map.hva; nested_vmcb = map.hva;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
ret = kvm_skip_emulated_instruction(&svm->vcpu); ret = kvm_skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(nested_vmcb, svm->vmcb); nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
...@@ -3721,7 +3718,6 @@ static int vmsave_interception(struct vcpu_svm *svm) ...@@ -3721,7 +3718,6 @@ static int vmsave_interception(struct vcpu_svm *svm)
nested_vmcb = map.hva; nested_vmcb = map.hva;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
ret = kvm_skip_emulated_instruction(&svm->vcpu); ret = kvm_skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(svm->vmcb, nested_vmcb); nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
...@@ -3772,7 +3768,6 @@ static int stgi_interception(struct vcpu_svm *svm) ...@@ -3772,7 +3768,6 @@ static int stgi_interception(struct vcpu_svm *svm)
if (vgif_enabled(svm)) if (vgif_enabled(svm))
clr_intercept(svm, INTERCEPT_STGI); clr_intercept(svm, INTERCEPT_STGI);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
ret = kvm_skip_emulated_instruction(&svm->vcpu); ret = kvm_skip_emulated_instruction(&svm->vcpu);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
...@@ -3788,7 +3783,6 @@ static int clgi_interception(struct vcpu_svm *svm) ...@@ -3788,7 +3783,6 @@ static int clgi_interception(struct vcpu_svm *svm)
if (nested_svm_check_permissions(svm)) if (nested_svm_check_permissions(svm))
return 1; return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
ret = kvm_skip_emulated_instruction(&svm->vcpu); ret = kvm_skip_emulated_instruction(&svm->vcpu);
disable_gif(svm); disable_gif(svm);
...@@ -3813,7 +3807,6 @@ static int invlpga_interception(struct vcpu_svm *svm) ...@@ -3813,7 +3807,6 @@ static int invlpga_interception(struct vcpu_svm *svm)
/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu)); kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
return kvm_skip_emulated_instruction(&svm->vcpu); return kvm_skip_emulated_instruction(&svm->vcpu);
} }
...@@ -3836,7 +3829,6 @@ static int xsetbv_interception(struct vcpu_svm *svm) ...@@ -3836,7 +3829,6 @@ static int xsetbv_interception(struct vcpu_svm *svm)
u32 index = kvm_rcx_read(&svm->vcpu); u32 index = kvm_rcx_read(&svm->vcpu);
if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
return kvm_skip_emulated_instruction(&svm->vcpu); return kvm_skip_emulated_instruction(&svm->vcpu);
} }
...@@ -3918,7 +3910,6 @@ static int task_switch_interception(struct vcpu_svm *svm) ...@@ -3918,7 +3910,6 @@ static int task_switch_interception(struct vcpu_svm *svm)
static int cpuid_interception(struct vcpu_svm *svm) static int cpuid_interception(struct vcpu_svm *svm)
{ {
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
return kvm_emulate_cpuid(&svm->vcpu); return kvm_emulate_cpuid(&svm->vcpu);
} }
...@@ -4248,7 +4239,6 @@ static int rdmsr_interception(struct vcpu_svm *svm) ...@@ -4248,7 +4239,6 @@ static int rdmsr_interception(struct vcpu_svm *svm)
kvm_rax_write(&svm->vcpu, msr_info.data & 0xffffffff); kvm_rax_write(&svm->vcpu, msr_info.data & 0xffffffff);
kvm_rdx_write(&svm->vcpu, msr_info.data >> 32); kvm_rdx_write(&svm->vcpu, msr_info.data >> 32);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
return kvm_skip_emulated_instruction(&svm->vcpu); return kvm_skip_emulated_instruction(&svm->vcpu);
} }
} }
...@@ -4454,7 +4444,6 @@ static int wrmsr_interception(struct vcpu_svm *svm) ...@@ -4454,7 +4444,6 @@ static int wrmsr_interception(struct vcpu_svm *svm)
return 1; return 1;
} else { } else {
trace_kvm_msr_write(ecx, data); trace_kvm_msr_write(ecx, data);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
return kvm_skip_emulated_instruction(&svm->vcpu); return kvm_skip_emulated_instruction(&svm->vcpu);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment