Commit e269fb21 authored by Jan Kiszka's avatar Jan Kiszka Committed by Avi Kivity

KVM: x86: Push potential exception error code on task switches

When a fault triggers a task switch, the error code, if existent, has to
be pushed on the new task's stack. Implement the missing bits.
Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 0760d448
...@@ -230,6 +230,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, ...@@ -230,6 +230,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops); struct x86_emulate_ops *ops);
int emulator_task_switch(struct x86_emulate_ctxt *ctxt, int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, struct x86_emulate_ops *ops,
u16 tss_selector, int reason); u16 tss_selector, int reason,
bool has_error_code, u32 error_code);
#endif /* _ASM_X86_KVM_X86_EMULATE_H */ #endif /* _ASM_X86_KVM_X86_EMULATE_H */
...@@ -595,7 +595,8 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, ...@@ -595,7 +595,8 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
bool has_error_code, u32 error_code);
void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
......
...@@ -244,6 +244,7 @@ struct __attribute__ ((__packed__)) vmcb { ...@@ -244,6 +244,7 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36 #define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38 #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
#define SVM_EXIT_READ_CR0 0x000 #define SVM_EXIT_READ_CR0 0x000
#define SVM_EXIT_READ_CR3 0x003 #define SVM_EXIT_READ_CR3 0x003
......
...@@ -2344,8 +2344,9 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, ...@@ -2344,8 +2344,9 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
} }
static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, struct x86_emulate_ops *ops,
u16 tss_selector, int reason) u16 tss_selector, int reason,
bool has_error_code, u32 error_code)
{ {
struct desc_struct curr_tss_desc, next_tss_desc; struct desc_struct curr_tss_desc, next_tss_desc;
int ret; int ret;
...@@ -2418,12 +2419,22 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, ...@@ -2418,12 +2419,22 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu); ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu); ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
if (has_error_code) {
struct decode_cache *c = &ctxt->decode;
c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
c->lock_prefix = 0;
c->src.val = (unsigned long) error_code;
emulate_push(ctxt);
}
return ret; return ret;
} }
int emulator_task_switch(struct x86_emulate_ctxt *ctxt, int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, struct x86_emulate_ops *ops,
u16 tss_selector, int reason) u16 tss_selector, int reason,
bool has_error_code, u32 error_code)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
int rc; int rc;
...@@ -2431,12 +2442,15 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, ...@@ -2431,12 +2442,15 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
memset(c, 0, sizeof(struct decode_cache)); memset(c, 0, sizeof(struct decode_cache));
c->eip = ctxt->eip; c->eip = ctxt->eip;
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
c->dst.type = OP_NONE;
rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason); rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
has_error_code, error_code);
if (rc == X86EMUL_CONTINUE) { if (rc == X86EMUL_CONTINUE) {
memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs); memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
kvm_rip_write(ctxt->vcpu, c->eip); kvm_rip_write(ctxt->vcpu, c->eip);
rc = writeback(ctxt, ops);
} }
return rc; return rc;
......
...@@ -2222,6 +2222,8 @@ static int task_switch_interception(struct vcpu_svm *svm) ...@@ -2222,6 +2222,8 @@ static int task_switch_interception(struct vcpu_svm *svm)
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
uint32_t idt_v = uint32_t idt_v =
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
bool has_error_code = false;
u32 error_code = 0;
tss_selector = (u16)svm->vmcb->control.exit_info_1; tss_selector = (u16)svm->vmcb->control.exit_info_1;
...@@ -2242,6 +2244,12 @@ static int task_switch_interception(struct vcpu_svm *svm) ...@@ -2242,6 +2244,12 @@ static int task_switch_interception(struct vcpu_svm *svm)
svm->vcpu.arch.nmi_injected = false; svm->vcpu.arch.nmi_injected = false;
break; break;
case SVM_EXITINTINFO_TYPE_EXEPT: case SVM_EXITINTINFO_TYPE_EXEPT:
if (svm->vmcb->control.exit_info_2 &
(1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
has_error_code = true;
error_code =
(u32)svm->vmcb->control.exit_info_2;
}
kvm_clear_exception_queue(&svm->vcpu); kvm_clear_exception_queue(&svm->vcpu);
break; break;
case SVM_EXITINTINFO_TYPE_INTR: case SVM_EXITINTINFO_TYPE_INTR:
...@@ -2258,7 +2266,8 @@ static int task_switch_interception(struct vcpu_svm *svm) ...@@ -2258,7 +2266,8 @@ static int task_switch_interception(struct vcpu_svm *svm)
(int_vec == OF_VECTOR || int_vec == BP_VECTOR))) (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
skip_emulated_instruction(&svm->vcpu); skip_emulated_instruction(&svm->vcpu);
return kvm_task_switch(&svm->vcpu, tss_selector, reason); return kvm_task_switch(&svm->vcpu, tss_selector, reason,
has_error_code, error_code);
} }
static int cpuid_interception(struct vcpu_svm *svm) static int cpuid_interception(struct vcpu_svm *svm)
......
...@@ -3271,6 +3271,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) ...@@ -3271,6 +3271,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qualification; unsigned long exit_qualification;
bool has_error_code = false;
u32 error_code = 0;
u16 tss_selector; u16 tss_selector;
int reason, type, idt_v; int reason, type, idt_v;
...@@ -3293,6 +3295,13 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) ...@@ -3293,6 +3295,13 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
kvm_clear_interrupt_queue(vcpu); kvm_clear_interrupt_queue(vcpu);
break; break;
case INTR_TYPE_HARD_EXCEPTION: case INTR_TYPE_HARD_EXCEPTION:
if (vmx->idt_vectoring_info &
VECTORING_INFO_DELIVER_CODE_MASK) {
has_error_code = true;
error_code =
vmcs_read32(IDT_VECTORING_ERROR_CODE);
}
/* fall through */
case INTR_TYPE_SOFT_EXCEPTION: case INTR_TYPE_SOFT_EXCEPTION:
kvm_clear_exception_queue(vcpu); kvm_clear_exception_queue(vcpu);
break; break;
...@@ -3307,7 +3316,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) ...@@ -3307,7 +3316,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
type != INTR_TYPE_NMI_INTR)) type != INTR_TYPE_NMI_INTR))
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
if (!kvm_task_switch(vcpu, tss_selector, reason)) if (!kvm_task_switch(vcpu, tss_selector, reason, has_error_code,
error_code))
return 0; return 0;
/* clear all local breakpoint enable flags */ /* clear all local breakpoint enable flags */
......
...@@ -4778,7 +4778,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, ...@@ -4778,7 +4778,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
bool has_error_code, u32 error_code)
{ {
int cs_db, cs_l, ret; int cs_db, cs_l, ret;
cache_all_regs(vcpu); cache_all_regs(vcpu);
...@@ -4796,7 +4797,8 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) ...@@ -4796,7 +4797,8 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops, ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops,
tss_selector, reason); tss_selector, reason, has_error_code,
error_code);
if (ret == X86EMUL_CONTINUE) if (ret == X86EMUL_CONTINUE)
kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment