Commit 9dac77fa authored by Avi Kivity's avatar Avi Kivity

KVM: x86 emulator: fold decode_cache into x86_emulate_ctxt

This saves a lot of pointless casts x86_emulate_ctxt and decode_cache.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 36dd9bb5
...@@ -229,7 +229,26 @@ struct read_cache { ...@@ -229,7 +229,26 @@ struct read_cache {
unsigned long end; unsigned long end;
}; };
struct decode_cache { struct x86_emulate_ctxt {
struct x86_emulate_ops *ops;
/* Register state before/after emulation. */
unsigned long eflags;
unsigned long eip; /* eip before instruction emulation */
/* Emulated execution mode, represented by an X86EMUL_MODE value. */
int mode;
/* interruptibility state, as a result of execution of STI or MOV SS */
int interruptibility;
bool guest_mode; /* guest running a nested guest */
bool perm_ok; /* do not check permissions if true */
bool only_vendor_specific_insn;
bool have_exception;
struct x86_exception exception;
/* decode cache */
u8 twobyte; u8 twobyte;
u8 b; u8 b;
u8 intercept; u8 intercept;
...@@ -261,29 +280,6 @@ struct decode_cache { ...@@ -261,29 +280,6 @@ struct decode_cache {
struct read_cache mem_read; struct read_cache mem_read;
}; };
struct x86_emulate_ctxt {
struct x86_emulate_ops *ops;
/* Register state before/after emulation. */
unsigned long eflags;
unsigned long eip; /* eip before instruction emulation */
/* Emulated execution mode, represented by an X86EMUL_MODE value. */
int mode;
/* interruptibility state, as a result of execution of STI or MOV SS */
int interruptibility;
bool guest_mode; /* guest running a nested guest */
bool perm_ok; /* do not check permissions if true */
bool only_vendor_specific_insn;
bool have_exception;
struct x86_exception exception;
/* decode cache */
struct decode_cache decode;
};
/* Repeat String Operation Prefix */ /* Repeat String Operation Prefix */
#define REPE_PREFIX 0xf3 #define REPE_PREFIX 0xf3
#define REPNE_PREFIX 0xf2 #define REPNE_PREFIX 0xf2
......
This diff is collapsed.
...@@ -675,12 +675,12 @@ TRACE_EVENT(kvm_emulate_insn, ...@@ -675,12 +675,12 @@ TRACE_EVENT(kvm_emulate_insn,
), ),
TP_fast_assign( TP_fast_assign(
__entry->rip = vcpu->arch.emulate_ctxt.decode.fetch.start; __entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
__entry->len = vcpu->arch.emulate_ctxt.decode._eip __entry->len = vcpu->arch.emulate_ctxt._eip
- vcpu->arch.emulate_ctxt.decode.fetch.start; - vcpu->arch.emulate_ctxt.fetch.start;
memcpy(__entry->insn, memcpy(__entry->insn,
vcpu->arch.emulate_ctxt.decode.fetch.data, vcpu->arch.emulate_ctxt.fetch.data,
15); 15);
__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode); __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
__entry->failed = failed; __entry->failed = failed;
......
...@@ -4507,24 +4507,24 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu) ...@@ -4507,24 +4507,24 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu)
kvm_queue_exception(vcpu, ctxt->exception.vector); kvm_queue_exception(vcpu, ctxt->exception.vector);
} }
static void init_decode_cache(struct decode_cache *c, static void init_decode_cache(struct x86_emulate_ctxt *ctxt,
const unsigned long *regs) const unsigned long *regs)
{ {
memset(c, 0, offsetof(struct decode_cache, regs)); memset(&ctxt->twobyte, 0,
memcpy(c->regs, regs, sizeof(c->regs)); (void *)&ctxt->regs - (void *)&ctxt->twobyte);
memcpy(ctxt->regs, regs, sizeof(ctxt->regs));
c->fetch.start = 0; ctxt->fetch.start = 0;
c->fetch.end = 0; ctxt->fetch.end = 0;
c->io_read.pos = 0; ctxt->io_read.pos = 0;
c->io_read.end = 0; ctxt->io_read.end = 0;
c->mem_read.pos = 0; ctxt->mem_read.pos = 0;
c->mem_read.end = 0; ctxt->mem_read.end = 0;
} }
static void init_emulate_ctxt(struct kvm_vcpu *vcpu) static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
{ {
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
struct decode_cache *c = &ctxt->decode;
int cs_db, cs_l; int cs_db, cs_l;
/* /*
...@@ -4546,28 +4546,27 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) ...@@ -4546,28 +4546,27 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
X86EMUL_MODE_PROT16; X86EMUL_MODE_PROT16;
ctxt->guest_mode = is_guest_mode(vcpu); ctxt->guest_mode = is_guest_mode(vcpu);
init_decode_cache(c, vcpu->arch.regs); init_decode_cache(ctxt, vcpu->arch.regs);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false; vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
} }
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
{ {
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
struct decode_cache *c = &ctxt->decode;
int ret; int ret;
init_emulate_ctxt(vcpu); init_emulate_ctxt(vcpu);
c->op_bytes = 2; ctxt->op_bytes = 2;
c->ad_bytes = 2; ctxt->ad_bytes = 2;
c->_eip = ctxt->eip + inc_eip; ctxt->_eip = ctxt->eip + inc_eip;
ret = emulate_int_real(ctxt, irq); ret = emulate_int_real(ctxt, irq);
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return EMULATE_FAIL; return EMULATE_FAIL;
ctxt->eip = c->_eip; ctxt->eip = ctxt->_eip;
memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
kvm_rip_write(vcpu, ctxt->eip); kvm_rip_write(vcpu, ctxt->eip);
kvm_set_rflags(vcpu, ctxt->eflags); kvm_set_rflags(vcpu, ctxt->eflags);
...@@ -4631,7 +4630,6 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -4631,7 +4630,6 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
{ {
int r; int r;
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
struct decode_cache *c = &ctxt->decode;
bool writeback = true; bool writeback = true;
kvm_clear_exception_queue(vcpu); kvm_clear_exception_queue(vcpu);
...@@ -4661,7 +4659,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -4661,7 +4659,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
} }
if (emulation_type & EMULTYPE_SKIP) { if (emulation_type & EMULTYPE_SKIP) {
kvm_rip_write(vcpu, c->_eip); kvm_rip_write(vcpu, ctxt->_eip);
return EMULATE_DONE; return EMULATE_DONE;
} }
...@@ -4669,7 +4667,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -4669,7 +4667,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
changes registers values during IO operation */ changes registers values during IO operation */
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
vcpu->arch.emulate_regs_need_sync_from_vcpu = false; vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
memcpy(c->regs, vcpu->arch.regs, sizeof c->regs); memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs);
} }
restart: restart:
...@@ -4707,7 +4705,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -4707,7 +4705,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
toggle_interruptibility(vcpu, ctxt->interruptibility); toggle_interruptibility(vcpu, ctxt->interruptibility);
kvm_set_rflags(vcpu, ctxt->eflags); kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false; vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip); kvm_rip_write(vcpu, ctxt->eip);
} else } else
...@@ -5718,8 +5716,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -5718,8 +5716,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
* that usually, but some bad designed PV devices (vmware * that usually, but some bad designed PV devices (vmware
* backdoor interface) need this to work * backdoor interface) need this to work
*/ */
struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false; vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
} }
regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
...@@ -5849,7 +5847,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, ...@@ -5849,7 +5847,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
bool has_error_code, u32 error_code) bool has_error_code, u32 error_code)
{ {
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
struct decode_cache *c = &ctxt->decode;
int ret; int ret;
init_emulate_ctxt(vcpu); init_emulate_ctxt(vcpu);
...@@ -5860,7 +5857,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, ...@@ -5860,7 +5857,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
if (ret) if (ret)
return EMULATE_FAIL; return EMULATE_FAIL;
memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
kvm_rip_write(vcpu, ctxt->eip); kvm_rip_write(vcpu, ctxt->eip);
kvm_set_rflags(vcpu, ctxt->eflags); kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment