Commit 0ce97a2b authored by Sean Christopherson's avatar Sean Christopherson Committed by Radim Krčmář

KVM: x86: Rename emulate_instruction() to kvm_emulate_instruction()

Lack of the kvm_ prefix gives the impression that it's a VMX or SVM
specific function, and there's no conflict that prevents adding the
kvm_ prefix.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent 6c3dfeb6
...@@ -1243,7 +1243,7 @@ enum emulation_result { ...@@ -1243,7 +1243,7 @@ enum emulation_result {
int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
int emulation_type, void *insn, int insn_len); int emulation_type, void *insn, int insn_len);
static inline int emulate_instruction(struct kvm_vcpu *vcpu, static inline int kvm_emulate_instruction(struct kvm_vcpu *vcpu,
int emulation_type) int emulation_type)
{ {
return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
......
...@@ -776,7 +776,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) ...@@ -776,7 +776,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
} }
if (!svm->next_rip) { if (!svm->next_rip) {
if (emulate_instruction(vcpu, EMULTYPE_SKIP) != if (kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) !=
EMULATE_DONE) EMULATE_DONE)
printk(KERN_DEBUG "%s: NOP\n", __func__); printk(KERN_DEBUG "%s: NOP\n", __func__);
return; return;
...@@ -2715,7 +2715,7 @@ static int gp_interception(struct vcpu_svm *svm) ...@@ -2715,7 +2715,7 @@ static int gp_interception(struct vcpu_svm *svm)
WARN_ON_ONCE(!enable_vmware_backdoor); WARN_ON_ONCE(!enable_vmware_backdoor);
er = emulate_instruction(vcpu, er = kvm_emulate_instruction(vcpu,
EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
if (er == EMULATE_USER_EXIT) if (er == EMULATE_USER_EXIT)
return 0; return 0;
...@@ -2819,7 +2819,7 @@ static int io_interception(struct vcpu_svm *svm) ...@@ -2819,7 +2819,7 @@ static int io_interception(struct vcpu_svm *svm)
string = (io_info & SVM_IOIO_STR_MASK) != 0; string = (io_info & SVM_IOIO_STR_MASK) != 0;
in = (io_info & SVM_IOIO_TYPE_MASK) != 0; in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
if (string) if (string)
return emulate_instruction(vcpu, 0) == EMULATE_DONE; return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
port = io_info >> 16; port = io_info >> 16;
size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
...@@ -3861,7 +3861,7 @@ static int iret_interception(struct vcpu_svm *svm) ...@@ -3861,7 +3861,7 @@ static int iret_interception(struct vcpu_svm *svm)
static int invlpg_interception(struct vcpu_svm *svm) static int invlpg_interception(struct vcpu_svm *svm)
{ {
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
return kvm_skip_emulated_instruction(&svm->vcpu); return kvm_skip_emulated_instruction(&svm->vcpu);
...@@ -3869,7 +3869,7 @@ static int invlpg_interception(struct vcpu_svm *svm) ...@@ -3869,7 +3869,7 @@ static int invlpg_interception(struct vcpu_svm *svm)
static int emulate_on_interception(struct vcpu_svm *svm) static int emulate_on_interception(struct vcpu_svm *svm)
{ {
return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
} }
static int rsm_interception(struct vcpu_svm *svm) static int rsm_interception(struct vcpu_svm *svm)
...@@ -4700,7 +4700,7 @@ static int avic_unaccelerated_access_interception(struct vcpu_svm *svm) ...@@ -4700,7 +4700,7 @@ static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
ret = avic_unaccel_trap_write(svm); ret = avic_unaccel_trap_write(svm);
} else { } else {
/* Handling Fault */ /* Handling Fault */
ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE); ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
} }
return ret; return ret;
......
...@@ -6983,7 +6983,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, ...@@ -6983,7 +6983,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
* Cause the #SS fault with 0 error code in VM86 mode. * Cause the #SS fault with 0 error code in VM86 mode.
*/ */
if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) {
if (vcpu->arch.halt_request) { if (vcpu->arch.halt_request) {
vcpu->arch.halt_request = 0; vcpu->arch.halt_request = 0;
return kvm_vcpu_halt(vcpu); return kvm_vcpu_halt(vcpu);
...@@ -7054,7 +7054,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) ...@@ -7054,7 +7054,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
WARN_ON_ONCE(!enable_vmware_backdoor); WARN_ON_ONCE(!enable_vmware_backdoor);
er = emulate_instruction(vcpu, er = kvm_emulate_instruction(vcpu,
EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
if (er == EMULATE_USER_EXIT) if (er == EMULATE_USER_EXIT)
return 0; return 0;
...@@ -7157,7 +7157,7 @@ static int handle_io(struct kvm_vcpu *vcpu) ...@@ -7157,7 +7157,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
++vcpu->stat.io_exits; ++vcpu->stat.io_exits;
if (string) if (string)
return emulate_instruction(vcpu, 0) == EMULATE_DONE; return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
port = exit_qualification >> 16; port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1; size = (exit_qualification & 7) + 1;
...@@ -7231,7 +7231,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) ...@@ -7231,7 +7231,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
static int handle_desc(struct kvm_vcpu *vcpu) static int handle_desc(struct kvm_vcpu *vcpu)
{ {
WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
return emulate_instruction(vcpu, 0) == EMULATE_DONE; return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
} }
static int handle_cr(struct kvm_vcpu *vcpu) static int handle_cr(struct kvm_vcpu *vcpu)
...@@ -7480,7 +7480,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu) ...@@ -7480,7 +7480,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu)
static int handle_invd(struct kvm_vcpu *vcpu) static int handle_invd(struct kvm_vcpu *vcpu)
{ {
return emulate_instruction(vcpu, 0) == EMULATE_DONE; return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
} }
static int handle_invlpg(struct kvm_vcpu *vcpu) static int handle_invlpg(struct kvm_vcpu *vcpu)
...@@ -7547,7 +7547,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu) ...@@ -7547,7 +7547,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
} }
return emulate_instruction(vcpu, 0) == EMULATE_DONE; return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
} }
static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
...@@ -7704,7 +7704,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) ...@@ -7704,7 +7704,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
else else
return emulate_instruction(vcpu, EMULTYPE_SKIP) == return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) ==
EMULATE_DONE; EMULATE_DONE;
} }
...@@ -7748,7 +7748,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) ...@@ -7748,7 +7748,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
if (kvm_test_request(KVM_REQ_EVENT, vcpu)) if (kvm_test_request(KVM_REQ_EVENT, vcpu))
return 1; return 1;
err = emulate_instruction(vcpu, 0); err = kvm_emulate_instruction(vcpu, 0);
if (err == EMULATE_USER_EXIT) { if (err == EMULATE_USER_EXIT) {
++vcpu->stat.mmio_exits; ++vcpu->stat.mmio_exits;
......
...@@ -4987,7 +4987,7 @@ int handle_ud(struct kvm_vcpu *vcpu) ...@@ -4987,7 +4987,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
emul_type = 0; emul_type = 0;
} }
er = emulate_instruction(vcpu, emul_type); er = kvm_emulate_instruction(vcpu, emul_type);
if (er == EMULATE_USER_EXIT) if (er == EMULATE_USER_EXIT)
return 0; return 0;
if (er != EMULATE_DONE) if (er != EMULATE_DONE)
...@@ -7740,7 +7740,7 @@ static inline int complete_emulated_io(struct kvm_vcpu *vcpu) ...@@ -7740,7 +7740,7 @@ static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{ {
int r; int r;
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r != EMULATE_DONE) if (r != EMULATE_DONE)
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment