Commit 1c3c87d7 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-selftests-6.7-rcN' of https://github.com/kvm-x86/linux into HEAD

KVM selftests fixes for 6.8 merge window:

 - Fix an annoying goof where the NX hugepage test prints out garbage
   instead of the magic token needed to run the text.

 - Fix build errors when a header is delete/moved due to a missing flag
   in the Makefile.

 - Detect if KVM bugged/killed a selftest's VM and print out a helpful
   message instead of complaining that a random ioctl() failed.

 - Annotate the guest printf/assert helpers with __printf(), and fix the
   various bugs that were lurking due to lack of said annotation.

A small subset of these was included in 6.7-rc as well.
parents 80583d0c 1b2658e4
...@@ -86,7 +86,6 @@ TEST_GEN_PROGS_x86_64 += x86_64/hyperv_svm_test ...@@ -86,7 +86,6 @@ TEST_GEN_PROGS_x86_64 += x86_64/hyperv_svm_test
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_tlb_flush TEST_GEN_PROGS_x86_64 += x86_64/hyperv_tlb_flush
TEST_GEN_PROGS_x86_64 += x86_64/kvm_clock_test TEST_GEN_PROGS_x86_64 += x86_64/kvm_clock_test
TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
TEST_GEN_PROGS_x86_64 += x86_64/monitor_mwait_test TEST_GEN_PROGS_x86_64 += x86_64/monitor_mwait_test
TEST_GEN_PROGS_x86_64 += x86_64/nested_exceptions_test TEST_GEN_PROGS_x86_64 += x86_64/nested_exceptions_test
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
...@@ -226,7 +225,7 @@ else ...@@ -226,7 +225,7 @@ else
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
endif endif
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
-Wno-gnu-variable-sized-type-not-at-end -MD\ -Wno-gnu-variable-sized-type-not-at-end -MD -MP \
-fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \ -fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
-fno-builtin-strnlen \ -fno-builtin-strnlen \
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \ -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
......
...@@ -267,6 +267,13 @@ static inline bool kvm_has_cap(long cap) ...@@ -267,6 +267,13 @@ static inline bool kvm_has_cap(long cap)
#define __KVM_SYSCALL_ERROR(_name, _ret) \ #define __KVM_SYSCALL_ERROR(_name, _ret) \
"%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno) "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
/*
* Use the "inner", double-underscore macro when reporting errors from within
* other macros so that the name of ioctl() and not its literal numeric value
* is printed on error. The "outer" macro is strongly preferred when reporting
* errors "directly", i.e. without an additional layer of macros, as it reduces
* the probability of passing in the wrong string.
*/
#define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret) #define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret)
#define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret) #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
...@@ -279,17 +286,13 @@ static inline bool kvm_has_cap(long cap) ...@@ -279,17 +286,13 @@ static inline bool kvm_has_cap(long cap)
#define __kvm_ioctl(kvm_fd, cmd, arg) \ #define __kvm_ioctl(kvm_fd, cmd, arg) \
kvm_do_ioctl(kvm_fd, cmd, arg) kvm_do_ioctl(kvm_fd, cmd, arg)
#define kvm_ioctl(kvm_fd, cmd, arg) \
#define _kvm_ioctl(kvm_fd, cmd, name, arg) \
({ \ ({ \
int ret = __kvm_ioctl(kvm_fd, cmd, arg); \ int ret = __kvm_ioctl(kvm_fd, cmd, arg); \
\ \
TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \ TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret)); \
}) })
#define kvm_ioctl(kvm_fd, cmd, arg) \
_kvm_ioctl(kvm_fd, cmd, #cmd, arg)
static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
#define __vm_ioctl(vm, cmd, arg) \ #define __vm_ioctl(vm, cmd, arg) \
...@@ -298,17 +301,42 @@ static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } ...@@ -298,17 +301,42 @@ static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
kvm_do_ioctl((vm)->fd, cmd, arg); \ kvm_do_ioctl((vm)->fd, cmd, arg); \
}) })
#define _vm_ioctl(vm, cmd, name, arg) \ /*
* Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if
* the ioctl() failed because KVM killed/bugged the VM. To detect a dead VM,
* probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before
* selftests existed and (b) should never outright fail, i.e. is supposed to
* return 0 or 1. If KVM kills a VM, KVM returns -EIO for all ioctl()s for the
* VM and its vCPUs, including KVM_CHECK_EXTENSION.
*/
#define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm) \
do { \
int __errno = errno; \
\
static_assert_is_vm(vm); \
\
if (cond) \
break; \
\
if (errno == EIO && \
__vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) { \
TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO"); \
TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues"); \
} \
errno = __errno; \
TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret)); \
} while (0)
#define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm) \
__TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
#define vm_ioctl(vm, cmd, arg) \
({ \ ({ \
int ret = __vm_ioctl(vm, cmd, arg); \ int ret = __vm_ioctl(vm, cmd, arg); \
\ \
TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \ __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \
}) })
#define vm_ioctl(vm, cmd, arg) \
_vm_ioctl(vm, cmd, #cmd, arg)
static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { } static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
#define __vcpu_ioctl(vcpu, cmd, arg) \ #define __vcpu_ioctl(vcpu, cmd, arg) \
...@@ -317,16 +345,13 @@ static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { } ...@@ -317,16 +345,13 @@ static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
kvm_do_ioctl((vcpu)->fd, cmd, arg); \ kvm_do_ioctl((vcpu)->fd, cmd, arg); \
}) })
#define _vcpu_ioctl(vcpu, cmd, name, arg) \ #define vcpu_ioctl(vcpu, cmd, arg) \
({ \ ({ \
int ret = __vcpu_ioctl(vcpu, cmd, arg); \ int ret = __vcpu_ioctl(vcpu, cmd, arg); \
\ \
TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \ __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm); \
}) })
#define vcpu_ioctl(vcpu, cmd, arg) \
_vcpu_ioctl(vcpu, cmd, #cmd, arg)
/* /*
* Looks up and returns the value corresponding to the capability * Looks up and returns the value corresponding to the capability
* (KVM_CAP_*) given by cap. * (KVM_CAP_*) given by cap.
...@@ -335,7 +360,7 @@ static inline int vm_check_cap(struct kvm_vm *vm, long cap) ...@@ -335,7 +360,7 @@ static inline int vm_check_cap(struct kvm_vm *vm, long cap)
{ {
int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap); int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret)); TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm);
return ret; return ret;
} }
...@@ -442,7 +467,7 @@ static inline int vm_get_stats_fd(struct kvm_vm *vm) ...@@ -442,7 +467,7 @@ static inline int vm_get_stats_fd(struct kvm_vm *vm)
{ {
int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL); int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd)); TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm);
return fd; return fd;
} }
...@@ -684,7 +709,7 @@ static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu) ...@@ -684,7 +709,7 @@ static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
{ {
int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL); int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd)); TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm);
return fd; return fd;
} }
......
...@@ -191,7 +191,7 @@ static inline uint32_t atoi_non_negative(const char *name, const char *num_str) ...@@ -191,7 +191,7 @@ static inline uint32_t atoi_non_negative(const char *name, const char *num_str)
} }
int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args); int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args);
int guest_snprintf(char *buf, int n, const char *fmt, ...); __printf(3, 4) int guest_snprintf(char *buf, int n, const char *fmt, ...);
char *strdup_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2), nonnull(1))); char *strdup_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2), nonnull(1)));
......
...@@ -34,9 +34,10 @@ void ucall_arch_do_ucall(vm_vaddr_t uc); ...@@ -34,9 +34,10 @@ void ucall_arch_do_ucall(vm_vaddr_t uc);
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu); void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu);
void ucall(uint64_t cmd, int nargs, ...); void ucall(uint64_t cmd, int nargs, ...);
void ucall_fmt(uint64_t cmd, const char *fmt, ...); __printf(2, 3) void ucall_fmt(uint64_t cmd, const char *fmt, ...);
void ucall_assert(uint64_t cmd, const char *exp, const char *file, __printf(5, 6) void ucall_assert(uint64_t cmd, const char *exp,
unsigned int line, const char *fmt, ...); const char *file, unsigned int line,
const char *fmt, ...);
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa); void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
int ucall_nr_pages_required(uint64_t page_size); int ucall_nr_pages_required(uint64_t page_size);
......
...@@ -1271,7 +1271,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) ...@@ -1271,7 +1271,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
vcpu->vm = vm; vcpu->vm = vm;
vcpu->id = vcpu_id; vcpu->id = vcpu_id;
vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id); vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id);
TEST_ASSERT(vcpu->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu->fd)); TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm);
TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size " TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
"smaller than expected, vcpu_mmap_sz: %i expected_min: %zi", "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
......
...@@ -157,17 +157,17 @@ static void guest_code_move_memory_region(void) ...@@ -157,17 +157,17 @@ static void guest_code_move_memory_region(void)
*/ */
val = guest_spin_on_val(0); val = guest_spin_on_val(0);
__GUEST_ASSERT(val == 1 || val == MMIO_VAL, __GUEST_ASSERT(val == 1 || val == MMIO_VAL,
"Expected '1' or MMIO ('%llx'), got '%llx'", MMIO_VAL, val); "Expected '1' or MMIO ('%lx'), got '%lx'", MMIO_VAL, val);
/* Spin until the misaligning memory region move completes. */ /* Spin until the misaligning memory region move completes. */
val = guest_spin_on_val(MMIO_VAL); val = guest_spin_on_val(MMIO_VAL);
__GUEST_ASSERT(val == 1 || val == 0, __GUEST_ASSERT(val == 1 || val == 0,
"Expected '0' or '1' (no MMIO), got '%llx'", val); "Expected '0' or '1' (no MMIO), got '%lx'", val);
/* Spin until the memory region starts to get re-aligned. */ /* Spin until the memory region starts to get re-aligned. */
val = guest_spin_on_val(0); val = guest_spin_on_val(0);
__GUEST_ASSERT(val == 1 || val == MMIO_VAL, __GUEST_ASSERT(val == 1 || val == MMIO_VAL,
"Expected '1' or MMIO ('%llx'), got '%llx'", MMIO_VAL, val); "Expected '1' or MMIO ('%lx'), got '%lx'", MMIO_VAL, val);
/* Spin until the re-aligning memory region move completes. */ /* Spin until the re-aligning memory region move completes. */
val = guest_spin_on_val(MMIO_VAL); val = guest_spin_on_val(MMIO_VAL);
......
...@@ -55,18 +55,18 @@ static void guest_msr(struct msr_data *msr) ...@@ -55,18 +55,18 @@ static void guest_msr(struct msr_data *msr)
if (msr->fault_expected) if (msr->fault_expected)
__GUEST_ASSERT(vector == GP_VECTOR, __GUEST_ASSERT(vector == GP_VECTOR,
"Expected #GP on %sMSR(0x%x), got vector '0x%x'", "Expected #GP on %sMSR(0x%x), got vector '0x%x'",
msr->idx, msr->write ? "WR" : "RD", vector); msr->write ? "WR" : "RD", msr->idx, vector);
else else
__GUEST_ASSERT(!vector, __GUEST_ASSERT(!vector,
"Expected success on %sMSR(0x%x), got vector '0x%x'", "Expected success on %sMSR(0x%x), got vector '0x%x'",
msr->idx, msr->write ? "WR" : "RD", vector); msr->write ? "WR" : "RD", msr->idx, vector);
if (vector || is_write_only_msr(msr->idx)) if (vector || is_write_only_msr(msr->idx))
goto done; goto done;
if (msr->write) if (msr->write)
__GUEST_ASSERT(!vector, __GUEST_ASSERT(!vector,
"WRMSR(0x%x) to '0x%llx', RDMSR read '0x%llx'", "WRMSR(0x%x) to '0x%lx', RDMSR read '0x%lx'",
msr->idx, msr->write_val, msr_val); msr->idx, msr->write_val, msr_val);
/* Invariant TSC bit appears when TSC invariant control MSR is written to */ /* Invariant TSC bit appears when TSC invariant control MSR is written to */
...@@ -102,11 +102,11 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall) ...@@ -102,11 +102,11 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
vector = __hyperv_hypercall(hcall->control, input, output, &res); vector = __hyperv_hypercall(hcall->control, input, output, &res);
if (hcall->ud_expected) { if (hcall->ud_expected) {
__GUEST_ASSERT(vector == UD_VECTOR, __GUEST_ASSERT(vector == UD_VECTOR,
"Expected #UD for control '%u', got vector '0x%x'", "Expected #UD for control '%lu', got vector '0x%x'",
hcall->control, vector); hcall->control, vector);
} else { } else {
__GUEST_ASSERT(!vector, __GUEST_ASSERT(!vector,
"Expected no exception for control '%u', got vector '0x%x'", "Expected no exception for control '%lu', got vector '0x%x'",
hcall->control, vector); hcall->control, vector);
GUEST_ASSERT_EQ(res, hcall->expect); GUEST_ASSERT_EQ(res, hcall->expect);
} }
......
/*
* mmio_warning_test
*
* Copyright (C) 2019, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Test that we don't get a kernel warning when we call KVM_RUN after a
* triple fault occurs. To get the triple fault to occur we call KVM_RUN
* on a VCPU that hasn't been properly setup.
*
*/
#define _GNU_SOURCE
#include <fcntl.h>
#include <kvm_util.h>
#include <linux/kvm.h>
#include <processor.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <test_util.h>
#include <unistd.h>
#define NTHREAD 4
#define NPROCESS 5
struct thread_context {
int kvmcpu;
struct kvm_run *run;
};
void *thr(void *arg)
{
struct thread_context *tc = (struct thread_context *)arg;
int res;
int kvmcpu = tc->kvmcpu;
struct kvm_run *run = tc->run;
res = ioctl(kvmcpu, KVM_RUN, 0);
pr_info("ret1=%d exit_reason=%d suberror=%d\n",
res, run->exit_reason, run->internal.suberror);
return 0;
}
void test(void)
{
int i, kvm, kvmvm, kvmcpu;
pthread_t th[NTHREAD];
struct kvm_run *run;
struct thread_context tc;
kvm = open("/dev/kvm", O_RDWR);
TEST_ASSERT(kvm != -1, "failed to open /dev/kvm");
kvmvm = __kvm_ioctl(kvm, KVM_CREATE_VM, NULL);
TEST_ASSERT(kvmvm > 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, kvmvm));
kvmcpu = ioctl(kvmvm, KVM_CREATE_VCPU, 0);
TEST_ASSERT(kvmcpu != -1, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, kvmcpu));
run = (struct kvm_run *)mmap(0, 4096, PROT_READ|PROT_WRITE, MAP_SHARED,
kvmcpu, 0);
tc.kvmcpu = kvmcpu;
tc.run = run;
srand(getpid());
for (i = 0; i < NTHREAD; i++) {
pthread_create(&th[i], NULL, thr, (void *)(uintptr_t)&tc);
usleep(rand() % 10000);
}
for (i = 0; i < NTHREAD; i++)
pthread_join(th[i], NULL);
}
int get_warnings_count(void)
{
int warnings;
FILE *f;
f = popen("dmesg | grep \"WARNING:\" | wc -l", "r");
if (fscanf(f, "%d", &warnings) < 1)
warnings = 0;
pclose(f);
return warnings;
}
int main(void)
{
int warnings_before, warnings_after;
TEST_REQUIRE(host_cpu_is_intel);
TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
warnings_before = get_warnings_count();
for (int i = 0; i < NPROCESS; ++i) {
int status;
int pid = fork();
if (pid < 0)
exit(1);
if (pid == 0) {
test();
exit(0);
}
while (waitpid(pid, &status, __WALL) != pid)
;
}
warnings_after = get_warnings_count();
TEST_ASSERT(warnings_before == warnings_after,
"Warnings found in kernel. Run 'dmesg' to inspect them.");
return 0;
}
...@@ -27,10 +27,12 @@ do { \ ...@@ -27,10 +27,12 @@ do { \
\ \
if (fault_wanted) \ if (fault_wanted) \
__GUEST_ASSERT((vector) == UD_VECTOR, \ __GUEST_ASSERT((vector) == UD_VECTOR, \
"Expected #UD on " insn " for testcase '0x%x', got '0x%x'", vector); \ "Expected #UD on " insn " for testcase '0x%x', got '0x%x'", \
testcase, vector); \
else \ else \
__GUEST_ASSERT(!(vector), \ __GUEST_ASSERT(!(vector), \
"Expected success on " insn " for testcase '0x%x', got '0x%x'", vector); \ "Expected success on " insn " for testcase '0x%x', got '0x%x'", \
testcase, vector); \
} while (0) } while (0)
static void guest_monitor_wait(int testcase) static void guest_monitor_wait(int testcase)
......
...@@ -259,7 +259,7 @@ int main(int argc, char **argv) ...@@ -259,7 +259,7 @@ int main(int argc, char **argv)
__TEST_REQUIRE(token == MAGIC_TOKEN, __TEST_REQUIRE(token == MAGIC_TOKEN,
"This test must be run with the magic token %d.\n" "This test must be run with the magic token %d.\n"
"This is done by nx_huge_pages_test.sh, which\n" "This is done by nx_huge_pages_test.sh, which\n"
"also handles environment setup for the test."); "also handles environment setup for the test.", MAGIC_TOKEN);
run_test(reclaim_period_ms, false, reboot_permissions); run_test(reclaim_period_ms, false, reboot_permissions);
run_test(reclaim_period_ms, true, reboot_permissions); run_test(reclaim_period_ms, true, reboot_permissions);
......
...@@ -35,7 +35,7 @@ do { \ ...@@ -35,7 +35,7 @@ do { \
\ \
for (i = 0; i < size; i++) \ for (i = 0; i < size; i++) \
__GUEST_ASSERT(mem[i] == pattern, \ __GUEST_ASSERT(mem[i] == pattern, \
"Guest expected 0x%x at offset %lu (gpa 0x%llx), got 0x%x", \ "Guest expected 0x%x at offset %lu (gpa 0x%lx), got 0x%x", \
pattern, i, gpa + i, mem[i]); \ pattern, i, gpa + i, mem[i]); \
} while (0) } while (0)
......
...@@ -103,7 +103,7 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i ...@@ -103,7 +103,7 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i
run_guest(vmcb, svm->vmcb_gpa); run_guest(vmcb, svm->vmcb_gpa);
__GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL, __GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL,
"Expected VMMCAL #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'", "Expected VMMCAL #VMEXIT, got '0x%x', info1 = '0x%lx, info2 = '0x%lx'",
vmcb->control.exit_code, vmcb->control.exit_code,
vmcb->control.exit_info_1, vmcb->control.exit_info_2); vmcb->control.exit_info_1, vmcb->control.exit_info_2);
...@@ -133,7 +133,7 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i ...@@ -133,7 +133,7 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i
run_guest(vmcb, svm->vmcb_gpa); run_guest(vmcb, svm->vmcb_gpa);
__GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_HLT, __GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_HLT,
"Expected HLT #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'", "Expected HLT #VMEXIT, got '0x%x', info1 = '0x%lx, info2 = '0x%lx'",
vmcb->control.exit_code, vmcb->control.exit_code,
vmcb->control.exit_info_1, vmcb->control.exit_info_2); vmcb->control.exit_info_1, vmcb->control.exit_info_2);
......
...@@ -56,7 +56,7 @@ static void guest_test_perf_capabilities_gp(uint64_t val) ...@@ -56,7 +56,7 @@ static void guest_test_perf_capabilities_gp(uint64_t val)
uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val); uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val);
__GUEST_ASSERT(vector == GP_VECTOR, __GUEST_ASSERT(vector == GP_VECTOR,
"Expected #GP for value '0x%llx', got vector '0x%x'", "Expected #GP for value '0x%lx', got vector '0x%x'",
val, vector); val, vector);
} }
......
...@@ -25,7 +25,7 @@ do { \ ...@@ -25,7 +25,7 @@ do { \
\ \
__GUEST_ASSERT((__supported & (xfeatures)) != (xfeatures) || \ __GUEST_ASSERT((__supported & (xfeatures)) != (xfeatures) || \
__supported == ((xfeatures) | (dependencies)), \ __supported == ((xfeatures) | (dependencies)), \
"supported = 0x%llx, xfeatures = 0x%llx, dependencies = 0x%llx", \ "supported = 0x%lx, xfeatures = 0x%llx, dependencies = 0x%llx", \
__supported, (xfeatures), (dependencies)); \ __supported, (xfeatures), (dependencies)); \
} while (0) } while (0)
...@@ -42,7 +42,7 @@ do { \ ...@@ -42,7 +42,7 @@ do { \
uint64_t __supported = (supported_xcr0) & (xfeatures); \ uint64_t __supported = (supported_xcr0) & (xfeatures); \
\ \
__GUEST_ASSERT(!__supported || __supported == (xfeatures), \ __GUEST_ASSERT(!__supported || __supported == (xfeatures), \
"supported = 0x%llx, xfeatures = 0x%llx", \ "supported = 0x%lx, xfeatures = 0x%llx", \
__supported, (xfeatures)); \ __supported, (xfeatures)); \
} while (0) } while (0)
...@@ -81,7 +81,7 @@ static void guest_code(void) ...@@ -81,7 +81,7 @@ static void guest_code(void)
vector = xsetbv_safe(0, supported_xcr0); vector = xsetbv_safe(0, supported_xcr0);
__GUEST_ASSERT(!vector, __GUEST_ASSERT(!vector,
"Expected success on XSETBV(0x%llx), got vector '0x%x'", "Expected success on XSETBV(0x%lx), got vector '0x%x'",
supported_xcr0, vector); supported_xcr0, vector);
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
...@@ -90,7 +90,7 @@ static void guest_code(void) ...@@ -90,7 +90,7 @@ static void guest_code(void)
vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i)); vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i));
__GUEST_ASSERT(vector == GP_VECTOR, __GUEST_ASSERT(vector == GP_VECTOR,
"Expected #GP on XSETBV(0x%llx), supported XCR0 = %llx, got vector '0x%x'", "Expected #GP on XSETBV(0x%llx), supported XCR0 = %lx, got vector '0x%x'",
BIT_ULL(i), supported_xcr0, vector); BIT_ULL(i), supported_xcr0, vector);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment