Commit f9eb4af6 authored by Eugene Korenevsky's avatar Eugene Korenevsky Committed by Paolo Bonzini

KVM: nVMX: VMX instructions: add checks for #GP/#SS exceptions

According to Intel SDM several checks must be applied for memory operands
of VMX instructions.

Long mode: #GP(0) or #SS(0) depending on the segment must be thrown
if the memory address is in a non-canonical form.

Protected mode, checks in chronological order:
- The segment type must be checked with access type (read or write) taken
into account.
	For write access: #GP(0) must be generated if the destination operand
		is located in a read-only data segment or any code segment.
	For read access: #GP(0) must be generated if if the source operand is
		located in an execute-only code segment.
- Usability of the segment must be checked. #GP(0) or #SS(0) depending on the
	segment must be thrown if the segment is unusable.
- Limit check. #GP(0) or #SS(0) depending on the segment must be
	thrown if the memory operand effective address is outside the segment
	limit.
Signed-off-by: default avatarEugene Korenevsky <ekorenevsky@gmail.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 0da029ed
...@@ -6408,8 +6408,12 @@ static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) ...@@ -6408,8 +6408,12 @@ static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
*/ */
static int get_vmx_mem_address(struct kvm_vcpu *vcpu, static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
unsigned long exit_qualification, unsigned long exit_qualification,
u32 vmx_instruction_info, gva_t *ret) u32 vmx_instruction_info, bool wr, gva_t *ret)
{ {
gva_t off;
bool exn;
struct kvm_segment s;
/* /*
* According to Vol. 3B, "Information for VM Exits Due to Instruction * According to Vol. 3B, "Information for VM Exits Due to Instruction
* Execution", on an exit, vmx_instruction_info holds most of the * Execution", on an exit, vmx_instruction_info holds most of the
...@@ -6434,22 +6438,63 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, ...@@ -6434,22 +6438,63 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
/* Addr = segment_base + offset */ /* Addr = segment_base + offset */
/* offset = base + [index * scale] + displacement */ /* offset = base + [index * scale] + displacement */
*ret = vmx_get_segment_base(vcpu, seg_reg); off = exit_qualification; /* holds the displacement */
if (base_is_valid) if (base_is_valid)
*ret += kvm_register_read(vcpu, base_reg); off += kvm_register_read(vcpu, base_reg);
if (index_is_valid) if (index_is_valid)
*ret += kvm_register_read(vcpu, index_reg)<<scaling; off += kvm_register_read(vcpu, index_reg)<<scaling;
*ret += exit_qualification; /* holds the displacement */ vmx_get_segment(vcpu, &s, seg_reg);
*ret = s.base + off;
if (addr_size == 1) /* 32 bit */ if (addr_size == 1) /* 32 bit */
*ret &= 0xffffffff; *ret &= 0xffffffff;
/* /* Checks for #GP/#SS exceptions. */
* TODO: throw #GP (and return 1) in various cases that the VM* exn = false;
* instructions require it - e.g., offset beyond segment limit, if (is_protmode(vcpu)) {
* unusable or unreadable/unwritable segment, non-canonical 64-bit /* Protected mode: apply checks for segment validity in the
* address, and so on. Currently these are not checked. * following order:
*/ * - segment type check (#GP(0) may be thrown)
* - usability check (#GP(0)/#SS(0))
* - limit check (#GP(0)/#SS(0))
*/
if (wr)
/* #GP(0) if the destination operand is located in a
* read-only data segment or any code segment.
*/
exn = ((s.type & 0xa) == 0 || (s.type & 8));
else
/* #GP(0) if the source operand is located in an
* execute-only code segment
*/
exn = ((s.type & 0xa) == 8);
}
if (exn) {
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return 1;
}
if (is_long_mode(vcpu)) {
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is an only check for long mode.
*/
exn = is_noncanonical_address(*ret);
} else if (is_protmode(vcpu)) {
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
*/
exn = (s.unusable != 0);
/* Protected mode: #GP(0)/#SS(0) if the memory
* operand is outside the segment limit.
*/
exn = exn || (off + sizeof(u64) > s.limit);
}
if (exn) {
kvm_queue_exception_e(vcpu,
seg_reg == VCPU_SREG_SS ?
SS_VECTOR : GP_VECTOR,
0);
return 1;
}
return 0; return 0;
} }
...@@ -6471,7 +6516,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, ...@@ -6471,7 +6516,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
int maxphyaddr = cpuid_maxphyaddr(vcpu); int maxphyaddr = cpuid_maxphyaddr(vcpu);
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmcs_read32(VMX_INSTRUCTION_INFO), &gva)) vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
return 1; return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
...@@ -6999,7 +7044,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) ...@@ -6999,7 +7044,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
field_value); field_value);
} else { } else {
if (get_vmx_mem_address(vcpu, exit_qualification, if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, &gva)) vmx_instruction_info, true, &gva))
return 1; return 1;
/* _system ok, as nested_vmx_check_permission verified cpl=0 */ /* _system ok, as nested_vmx_check_permission verified cpl=0 */
kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
...@@ -7036,7 +7081,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -7036,7 +7081,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
(((vmx_instruction_info) >> 3) & 0xf)); (((vmx_instruction_info) >> 3) & 0xf));
else { else {
if (get_vmx_mem_address(vcpu, exit_qualification, if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, &gva)) vmx_instruction_info, false, &gva))
return 1; return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
&field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
...@@ -7128,7 +7173,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) ...@@ -7128,7 +7173,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
return 1; return 1;
if (get_vmx_mem_address(vcpu, exit_qualification, if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, &vmcs_gva)) vmx_instruction_info, true, &vmcs_gva))
return 1; return 1;
/* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */ /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
...@@ -7184,7 +7229,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -7184,7 +7229,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
* operand is read even if it isn't needed (e.g., for type==global) * operand is read even if it isn't needed (e.g., for type==global)
*/ */
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmx_instruction_info, &gva)) vmx_instruction_info, false, &gva))
return 1; return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
sizeof(operand), &e)) { sizeof(operand), &e)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment