Commit 2390218b authored by Avi Kivity's avatar Avi Kivity

KVM: Fix mov cr3 #GP at wrong instruction

On Intel, we call skip_emulated_instruction() even if we injected a #GP,
resulting in the #GP pointing at the wrong address.

Fix by injecting the exception and skipping the instruction at the same place,
so we can do just one or the other.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent a83b29c6
...@@ -598,7 +598,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, ...@@ -598,7 +598,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
bool has_error_code, u32 error_code); bool has_error_code, u32 error_code);
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
......
...@@ -3203,7 +3203,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, ...@@ -3203,7 +3203,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{ {
kvm_set_cr3(vcpu, vcpu->arch.cr3); (void)kvm_set_cr3(vcpu, vcpu->arch.cr3);
return 1; return 1;
} }
......
...@@ -1963,7 +1963,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -1963,7 +1963,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vmcb->save.cr3 = hsave->save.cr3; svm->vmcb->save.cr3 = hsave->save.cr3;
svm->vcpu.arch.cr3 = hsave->save.cr3; svm->vcpu.arch.cr3 = hsave->save.cr3;
} else { } else {
kvm_set_cr3(&svm->vcpu, hsave->save.cr3); (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
} }
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax); kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp); kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
...@@ -2086,7 +2086,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) ...@@ -2086,7 +2086,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
svm->vmcb->save.cr3 = nested_vmcb->save.cr3; svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
} else } else
kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
/* Guest paging mode is active - reset mmu */ /* Guest paging mode is active - reset mmu */
kvm_mmu_reset_context(&svm->vcpu); kvm_mmu_reset_context(&svm->vcpu);
......
...@@ -3185,8 +3185,8 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -3185,8 +3185,8 @@ static int handle_cr(struct kvm_vcpu *vcpu)
complete_insn_gp(vcpu, err); complete_insn_gp(vcpu, err);
return 1; return 1;
case 3: case 3:
kvm_set_cr3(vcpu, val); err = kvm_set_cr3(vcpu, val);
skip_emulated_instruction(vcpu); complete_insn_gp(vcpu, err);
return 1; return 1;
case 4: case 4:
err = kvm_set_cr4(vcpu, val); err = kvm_set_cr4(vcpu, val);
......
...@@ -565,7 +565,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -565,7 +565,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
} }
EXPORT_SYMBOL_GPL(kvm_set_cr4); EXPORT_SYMBOL_GPL(kvm_set_cr4);
static int __kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{ {
if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
kvm_mmu_sync_roots(vcpu); kvm_mmu_sync_roots(vcpu);
...@@ -604,12 +604,6 @@ static int __kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -604,12 +604,6 @@ static int __kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vcpu->arch.mmu.new_cr3(vcpu); vcpu->arch.mmu.new_cr3(vcpu);
return 0; return 0;
} }
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
if (__kvm_set_cr3(vcpu, cr3))
kvm_inject_gp(vcpu, 0);
}
EXPORT_SYMBOL_GPL(kvm_set_cr3); EXPORT_SYMBOL_GPL(kvm_set_cr3);
int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
...@@ -3726,7 +3720,7 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu) ...@@ -3726,7 +3720,7 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
vcpu->arch.cr2 = val; vcpu->arch.cr2 = val;
break; break;
case 3: case 3:
res = __kvm_set_cr3(vcpu, val); res = kvm_set_cr3(vcpu, val);
break; break;
case 4: case 4:
res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment