Commit 3eeb3288 authored by Avi Kivity's avatar Avi Kivity Committed by Marcelo Tosatti

KVM: Add a helper for checking if the guest is in protected mode

Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 6b52d186
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include "x86.h"
#include "mmu.h" /* for is_long_mode() */ #include "mmu.h" /* for is_long_mode() */
/* /*
...@@ -1515,7 +1516,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt) ...@@ -1515,7 +1516,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
/* syscall is not available in real mode */ /* syscall is not available in real mode */
if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL
|| !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) || !is_protmode(ctxt->vcpu))
return -1; return -1;
setup_syscalls_segments(ctxt, &cs, &ss); setup_syscalls_segments(ctxt, &cs, &ss);
...@@ -1568,8 +1569,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt) ...@@ -1568,8 +1569,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
return -1; return -1;
/* inject #GP if in real mode or paging is disabled */ /* inject #GP if in real mode or paging is disabled */
if (ctxt->mode == X86EMUL_MODE_REAL || if (ctxt->mode == X86EMUL_MODE_REAL || !is_protmode(ctxt->vcpu)) {
!kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) {
kvm_inject_gp(ctxt->vcpu, 0); kvm_inject_gp(ctxt->vcpu, 0);
return -1; return -1;
} }
...@@ -1634,8 +1634,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) ...@@ -1634,8 +1634,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
return -1; return -1;
/* inject #GP if in real mode or paging is disabled */ /* inject #GP if in real mode or paging is disabled */
if (ctxt->mode == X86EMUL_MODE_REAL if (ctxt->mode == X86EMUL_MODE_REAL || !is_protmode(ctxt->vcpu)) {
|| !kvm_read_cr0_bits(ctxt->vcpu, X86_CR0_PE)) {
kvm_inject_gp(ctxt->vcpu, 0); kvm_inject_gp(ctxt->vcpu, 0);
return -1; return -1;
} }
......
...@@ -1845,7 +1845,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu, ...@@ -1845,7 +1845,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
static int vmx_get_cpl(struct kvm_vcpu *vcpu) static int vmx_get_cpl(struct kvm_vcpu *vcpu)
{ {
if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) /* if real mode */ if (!is_protmode(vcpu))
return 0; return 0;
if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */ if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
...@@ -2100,7 +2100,7 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) ...@@ -2100,7 +2100,7 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
static bool guest_state_valid(struct kvm_vcpu *vcpu) static bool guest_state_valid(struct kvm_vcpu *vcpu)
{ {
/* real mode guest state checks */ /* real mode guest state checks */
if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) { if (!is_protmode(vcpu)) {
if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
return false; return false;
if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
......
...@@ -3786,8 +3786,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) ...@@ -3786,8 +3786,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
* hypercall generates UD from non zero cpl and real mode * hypercall generates UD from non zero cpl and real mode
* per HYPER-V spec * per HYPER-V spec
*/ */
if (kvm_x86_ops->get_cpl(vcpu) != 0 || if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 0; return 0;
} }
...@@ -4751,7 +4750,7 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, ...@@ -4751,7 +4750,7 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
{ {
struct kvm_segment kvm_seg; struct kvm_segment kvm_seg;
if (is_vm86_segment(vcpu, seg) || !(kvm_read_cr0_bits(vcpu, X86_CR0_PE))) if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
return kvm_load_realmode_segment(vcpu, selector, seg); return kvm_load_realmode_segment(vcpu, selector, seg);
if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg)) if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
return 1; return 1;
...@@ -5103,7 +5102,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -5103,7 +5102,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
/* Older userspace won't unhalt the vcpu on reset. */ /* Older userspace won't unhalt the vcpu on reset. */
if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
!(kvm_read_cr0_bits(vcpu, X86_CR0_PE))) !is_protmode(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
vcpu_put(vcpu); vcpu_put(vcpu);
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define ARCH_X86_KVM_X86_H #define ARCH_X86_KVM_X86_H
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
{ {
...@@ -35,4 +36,9 @@ static inline bool kvm_exception_is_soft(unsigned int nr) ...@@ -35,4 +36,9 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index); u32 function, u32 index);
static inline bool is_protmode(struct kvm_vcpu *vcpu)
{
return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment