Commit 6e202097 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Add error handling to VMREAD helper

Now that VMREAD flows require a taken branch, courtesy of commit

  3901336e ("x86/kvm: Don't call kvm_spurious_fault() from .fixup")

bite the bullet and add full error handling to VMREAD, i.e. replace the
JMP added by __ex()/____kvm_handle_fault_on_reboot() with a hinted Jcc.

To minimize the code footprint, add a helper function, vmread_error(),
to handle both faults and failures so that the inline flow has a single
CALL.
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 52a9fcbc
...@@ -11,9 +11,8 @@ ...@@ -11,9 +11,8 @@
#include "vmcs.h" #include "vmcs.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x) #define __ex(x) __kvm_handle_fault_on_reboot(x)
#define __ex_clear(x, reg) \
____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg)
asmlinkage void vmread_error(unsigned long field, bool fault);
void vmwrite_error(unsigned long field, unsigned long value); void vmwrite_error(unsigned long field, unsigned long value);
void vmclear_error(struct vmcs *vmcs, u64 phys_addr); void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
void vmptrld_error(struct vmcs *vmcs, u64 phys_addr); void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
...@@ -68,8 +67,22 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field) ...@@ -68,8 +67,22 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
{ {
unsigned long value; unsigned long value;
asm volatile (__ex_clear("vmread %1, %0", "%k0") asm volatile("1: vmread %2, %1\n\t"
: "=r"(value) : "r"(field)); ".byte 0x3e\n\t" /* branch taken hint */
"ja 3f\n\t"
"mov %2, %%" _ASM_ARG1 "\n\t"
"xor %%" _ASM_ARG2 ", %%" _ASM_ARG2 "\n\t"
"2: call vmread_error\n\t"
"xor %k1, %k1\n\t"
"3:\n\t"
".pushsection .fixup, \"ax\"\n\t"
"4: mov %2, %%" _ASM_ARG1 "\n\t"
"mov $1, %%" _ASM_ARG2 "\n\t"
"jmp 2b\n\t"
".popsection\n\t"
_ASM_EXTABLE(1b, 4b)
: ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
return value; return value;
} }
......
...@@ -349,6 +349,14 @@ do { \ ...@@ -349,6 +349,14 @@ do { \
pr_warn_ratelimited(fmt); \ pr_warn_ratelimited(fmt); \
} while (0) } while (0)
asmlinkage void vmread_error(unsigned long field, bool fault)
{
if (fault)
kvm_spurious_fault();
else
vmx_insn_failed("kvm: vmread failed: field=%lx\n", field);
}
noinline void vmwrite_error(unsigned long field, unsigned long value) noinline void vmwrite_error(unsigned long field, unsigned long value)
{ {
vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n", vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment