Commit 4aa2691d authored by Wei Huang's avatar Wei Huang Committed by Paolo Bonzini

KVM: x86: Factor out x86 instruction emulation with decoding

Move the instruction decode part out of x86_emulate_instruction() for it
to be used in other places. Also kvm_clear_exception_queue() is moved
inside the if-statement as it doesn't apply when KVM are coming back from
userspace.
Co-developed-by: default avatarBandan Das <bsd@redhat.com>
Signed-off-by: default avatarBandan Das <bsd@redhat.com>
Signed-off-by: default avatarWei Huang <wei.huang2@amd.com>
Message-Id: <20210126081831.570253-2-wei.huang2@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9a3ecd5e
...@@ -7317,35 +7317,21 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) ...@@ -7317,35 +7317,21 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
return false; return false;
} }
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, /*
int emulation_type, void *insn, int insn_len) * Decode to be emulated instruction. Return EMULATION_OK if success.
*/
int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
void *insn, int insn_len)
{ {
int r; int r = EMULATION_OK;
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
bool writeback = true;
bool write_fault_to_spt;
if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, insn, insn_len)))
return 1;
vcpu->arch.l1tf_flush_l1d = true;
/*
* Clear write_fault_to_shadow_pgtable here to ensure it is
* never reused.
*/
write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
vcpu->arch.write_fault_to_shadow_pgtable = false;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu); init_emulate_ctxt(vcpu);
/* /*
* We will reenter on the same instruction since * We will reenter on the same instruction since we do not set
* we do not set complete_userspace_io. This does not * complete_userspace_io. This does not handle watchpoints yet,
* handle watchpoints yet, those would be handled in * those would be handled in the emulate_ops.
* the emulate_ops.
*/ */
if (!(emulation_type & EMULTYPE_SKIP) && if (!(emulation_type & EMULTYPE_SKIP) &&
kvm_vcpu_check_breakpoint(vcpu, &r)) kvm_vcpu_check_breakpoint(vcpu, &r))
...@@ -7362,6 +7348,36 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -7362,6 +7348,36 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
trace_kvm_emulate_insn_start(vcpu); trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation; ++vcpu->stat.insn_emulation;
return r;
}
EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction);
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len)
{
int r;
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
bool writeback = true;
bool write_fault_to_spt;
if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, insn, insn_len)))
return 1;
vcpu->arch.l1tf_flush_l1d = true;
/*
* Clear write_fault_to_shadow_pgtable here to ensure it is
* never reused.
*/
write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
vcpu->arch.write_fault_to_shadow_pgtable = false;
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
kvm_clear_exception_queue(vcpu);
r = x86_decode_emulated_instruction(vcpu, emulation_type,
insn, insn_len);
if (r != EMULATION_OK) { if (r != EMULATION_OK) {
if ((emulation_type & EMULTYPE_TRAP_UD) || if ((emulation_type & EMULTYPE_TRAP_UD) ||
(emulation_type & EMULTYPE_TRAP_UD_FORCED)) { (emulation_type & EMULTYPE_TRAP_UD_FORCED)) {
......
...@@ -273,6 +273,8 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -273,6 +273,8 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
int page_num); int page_num);
bool kvm_vector_hashing_enabled(void); bool kvm_vector_hashing_enabled(void);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code); void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
void *insn, int insn_len);
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len); int emulation_type, void *insn, int insn_len);
fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment