Commit 6d85f51a authored by Thomas Huth's avatar Thomas Huth Committed by Sean Christopherson

KVM: selftests: Rename the ASSERT_EQ macro

There is already an ASSERT_EQ macro in the file
tools/testing/selftests/kselftest_harness.h, so currently KVM selftests
can't include test_util.h from the KVM selftests together with that file.
Rename the macro in the KVM selftests to TEST_ASSERT_EQ to avoid the
problem - it is also more similar to the other macros in test_util.h that
way.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarThomas Huth <thuth@redhat.com>
Reviewed-by: default avatarPhilippe Mathieu-Daudé <philmd@linaro.org>
Link: https://lore.kernel.org/r/20230712075910.22480-2-thuth@redhat.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 7e4966e6
...@@ -98,7 +98,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu) ...@@ -98,7 +98,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu)
uint64_t val; uint64_t val;
vcpu_get_reg(vcpu, reg_id, &val); vcpu_get_reg(vcpu, reg_id, &val);
ASSERT_EQ(val, 0); TEST_ASSERT_EQ(val, 0);
/* /*
* Expect the ioctl to succeed with no effect on the register * Expect the ioctl to succeed with no effect on the register
...@@ -107,7 +107,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu) ...@@ -107,7 +107,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu)
vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL); vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
vcpu_get_reg(vcpu, reg_id, &val); vcpu_get_reg(vcpu, reg_id, &val);
ASSERT_EQ(val, 0); TEST_ASSERT_EQ(val, 0);
} }
} }
...@@ -127,14 +127,14 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu) ...@@ -127,14 +127,14 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu)
uint64_t val; uint64_t val;
vcpu_get_reg(vcpu, reg_id, &val); vcpu_get_reg(vcpu, reg_id, &val);
ASSERT_EQ(val, 0); TEST_ASSERT_EQ(val, 0);
r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL); r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
TEST_ASSERT(r < 0 && errno == EINVAL, TEST_ASSERT(r < 0 && errno == EINVAL,
"unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno); "unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
vcpu_get_reg(vcpu, reg_id, &val); vcpu_get_reg(vcpu, reg_id, &val);
ASSERT_EQ(val, 0); TEST_ASSERT_EQ(val, 0);
} }
} }
......
...@@ -318,7 +318,7 @@ static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg, ...@@ -318,7 +318,7 @@ static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING, TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING,
"The only expected UFFD mode is MISSING"); "The only expected UFFD mode is MISSING");
ASSERT_EQ(addr, (uint64_t)args->hva); TEST_ASSERT_EQ(addr, (uint64_t)args->hva);
pr_debug("uffd fault: addr=%p write=%d\n", pr_debug("uffd fault: addr=%p write=%d\n",
(void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE)); (void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE));
...@@ -432,7 +432,7 @@ static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run) ...@@ -432,7 +432,7 @@ static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run)
region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA); region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
hva = (void *)region->region.userspace_addr; hva = (void *)region->region.userspace_addr;
ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr); TEST_ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr);
memcpy(hva, run->mmio.data, run->mmio.len); memcpy(hva, run->mmio.data, run->mmio.len);
events.mmio_exits += 1; events.mmio_exits += 1;
...@@ -631,9 +631,9 @@ static void setup_default_handlers(struct test_desc *test) ...@@ -631,9 +631,9 @@ static void setup_default_handlers(struct test_desc *test)
static void check_event_counts(struct test_desc *test) static void check_event_counts(struct test_desc *test)
{ {
ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults); TEST_ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults);
ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits); TEST_ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits);
ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs); TEST_ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs);
} }
static void print_test_banner(enum vm_guest_mode mode, struct test_params *p) static void print_test_banner(enum vm_guest_mode mode, struct test_params *p)
......
...@@ -53,11 +53,11 @@ void test_assert(bool exp, const char *exp_str, ...@@ -53,11 +53,11 @@ void test_assert(bool exp, const char *exp_str,
#define TEST_ASSERT(e, fmt, ...) \ #define TEST_ASSERT(e, fmt, ...) \
test_assert((e), #e, __FILE__, __LINE__, fmt, ##__VA_ARGS__) test_assert((e), #e, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
#define ASSERT_EQ(a, b) do { \ #define TEST_ASSERT_EQ(a, b) do { \
typeof(a) __a = (a); \ typeof(a) __a = (a); \
typeof(b) __b = (b); \ typeof(b) __b = (b); \
TEST_ASSERT(__a == __b, \ TEST_ASSERT(__a == __b, \
"ASSERT_EQ(%s, %s) failed.\n" \ "TEST_ASSERT_EQ(%s, %s) failed.\n" \
"\t%s is %#lx\n" \ "\t%s is %#lx\n" \
"\t%s is %#lx", \ "\t%s is %#lx", \
#a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \ #a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \
......
...@@ -994,7 +994,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, ...@@ -994,7 +994,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
if (src_type == VM_MEM_SRC_ANONYMOUS_THP) if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
alignment = max(backing_src_pagesz, alignment); alignment = max(backing_src_pagesz, alignment);
ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz)); TEST_ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
/* Add enough memory to align up if necessary */ /* Add enough memory to align up if necessary */
if (alignment > 1) if (alignment > 1)
......
...@@ -55,7 +55,7 @@ static void rendezvous_with_boss(void) ...@@ -55,7 +55,7 @@ static void rendezvous_with_boss(void)
static void run_vcpu(struct kvm_vcpu *vcpu) static void run_vcpu(struct kvm_vcpu *vcpu)
{ {
vcpu_run(vcpu); vcpu_run(vcpu);
ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
} }
static void *vcpu_worker(void *data) static void *vcpu_worker(void *data)
......
...@@ -237,8 +237,8 @@ static void test_get_cmma_basic(void) ...@@ -237,8 +237,8 @@ static void test_get_cmma_basic(void)
/* GET_CMMA_BITS without CMMA enabled should fail */ /* GET_CMMA_BITS without CMMA enabled should fail */
rc = vm_get_cmma_bits(vm, 0, &errno_out); rc = vm_get_cmma_bits(vm, 0, &errno_out);
ASSERT_EQ(rc, -1); TEST_ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_out, ENXIO); TEST_ASSERT_EQ(errno_out, ENXIO);
enable_cmma(vm); enable_cmma(vm);
vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa); vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
...@@ -247,31 +247,31 @@ static void test_get_cmma_basic(void) ...@@ -247,31 +247,31 @@ static void test_get_cmma_basic(void)
/* GET_CMMA_BITS without migration mode and without peeking should fail */ /* GET_CMMA_BITS without migration mode and without peeking should fail */
rc = vm_get_cmma_bits(vm, 0, &errno_out); rc = vm_get_cmma_bits(vm, 0, &errno_out);
ASSERT_EQ(rc, -1); TEST_ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_out, EINVAL); TEST_ASSERT_EQ(errno_out, EINVAL);
/* GET_CMMA_BITS without migration mode and with peeking should work */ /* GET_CMMA_BITS without migration mode and with peeking should work */
rc = vm_get_cmma_bits(vm, KVM_S390_CMMA_PEEK, &errno_out); rc = vm_get_cmma_bits(vm, KVM_S390_CMMA_PEEK, &errno_out);
ASSERT_EQ(rc, 0); TEST_ASSERT_EQ(rc, 0);
ASSERT_EQ(errno_out, 0); TEST_ASSERT_EQ(errno_out, 0);
enable_dirty_tracking(vm); enable_dirty_tracking(vm);
enable_migration_mode(vm); enable_migration_mode(vm);
/* GET_CMMA_BITS with invalid flags */ /* GET_CMMA_BITS with invalid flags */
rc = vm_get_cmma_bits(vm, 0xfeedc0fe, &errno_out); rc = vm_get_cmma_bits(vm, 0xfeedc0fe, &errno_out);
ASSERT_EQ(rc, -1); TEST_ASSERT_EQ(rc, -1);
ASSERT_EQ(errno_out, EINVAL); TEST_ASSERT_EQ(errno_out, EINVAL);
kvm_vm_free(vm); kvm_vm_free(vm);
} }
static void assert_exit_was_hypercall(struct kvm_vcpu *vcpu) static void assert_exit_was_hypercall(struct kvm_vcpu *vcpu)
{ {
ASSERT_EQ(vcpu->run->exit_reason, 13); TEST_ASSERT_EQ(vcpu->run->exit_reason, 13);
ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4); TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4);
ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300); TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300);
ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000); TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000);
} }
static void test_migration_mode(void) static void test_migration_mode(void)
...@@ -283,8 +283,8 @@ static void test_migration_mode(void) ...@@ -283,8 +283,8 @@ static void test_migration_mode(void)
/* enabling migration mode on a VM without memory should fail */ /* enabling migration mode on a VM without memory should fail */
rc = __enable_migration_mode(vm); rc = __enable_migration_mode(vm);
ASSERT_EQ(rc, -1); TEST_ASSERT_EQ(rc, -1);
ASSERT_EQ(errno, EINVAL); TEST_ASSERT_EQ(errno, EINVAL);
TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off"); TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
errno = 0; errno = 0;
...@@ -304,8 +304,8 @@ static void test_migration_mode(void) ...@@ -304,8 +304,8 @@ static void test_migration_mode(void)
/* migration mode when memslots have dirty tracking off should fail */ /* migration mode when memslots have dirty tracking off should fail */
rc = __enable_migration_mode(vm); rc = __enable_migration_mode(vm);
ASSERT_EQ(rc, -1); TEST_ASSERT_EQ(rc, -1);
ASSERT_EQ(errno, EINVAL); TEST_ASSERT_EQ(errno, EINVAL);
TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off"); TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
errno = 0; errno = 0;
...@@ -314,7 +314,7 @@ static void test_migration_mode(void) ...@@ -314,7 +314,7 @@ static void test_migration_mode(void)
/* enabling migration mode should work now */ /* enabling migration mode should work now */
rc = __enable_migration_mode(vm); rc = __enable_migration_mode(vm);
ASSERT_EQ(rc, 0); TEST_ASSERT_EQ(rc, 0);
TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on"); TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
errno = 0; errno = 0;
...@@ -350,7 +350,7 @@ static void test_migration_mode(void) ...@@ -350,7 +350,7 @@ static void test_migration_mode(void)
*/ */
vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES); vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
rc = __enable_migration_mode(vm); rc = __enable_migration_mode(vm);
ASSERT_EQ(rc, 0); TEST_ASSERT_EQ(rc, 0);
TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on"); TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
errno = 0; errno = 0;
...@@ -394,9 +394,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm) ...@@ -394,9 +394,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
}; };
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf)); memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args); vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
ASSERT_EQ(args.count, MAIN_PAGE_COUNT); TEST_ASSERT_EQ(args.count, MAIN_PAGE_COUNT);
ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT); TEST_ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT);
ASSERT_EQ(args.start_gfn, 0); TEST_ASSERT_EQ(args.start_gfn, 0);
/* ...and then - after a hole - the TEST_DATA memslot should follow */ /* ...and then - after a hole - the TEST_DATA memslot should follow */
args = (struct kvm_s390_cmma_log){ args = (struct kvm_s390_cmma_log){
...@@ -407,9 +407,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm) ...@@ -407,9 +407,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
}; };
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf)); memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args); vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT); TEST_ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT);
ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN); TEST_ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN);
ASSERT_EQ(args.remaining, 0); TEST_ASSERT_EQ(args.remaining, 0);
/* ...and nothing else should be there */ /* ...and nothing else should be there */
args = (struct kvm_s390_cmma_log){ args = (struct kvm_s390_cmma_log){
...@@ -420,9 +420,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm) ...@@ -420,9 +420,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
}; };
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf)); memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args); vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
ASSERT_EQ(args.count, 0); TEST_ASSERT_EQ(args.count, 0);
ASSERT_EQ(args.start_gfn, 0); TEST_ASSERT_EQ(args.start_gfn, 0);
ASSERT_EQ(args.remaining, 0); TEST_ASSERT_EQ(args.remaining, 0);
} }
/** /**
...@@ -498,11 +498,11 @@ static void assert_cmma_dirty(u64 first_dirty_gfn, ...@@ -498,11 +498,11 @@ static void assert_cmma_dirty(u64 first_dirty_gfn,
u64 dirty_gfn_count, u64 dirty_gfn_count,
const struct kvm_s390_cmma_log *res) const struct kvm_s390_cmma_log *res)
{ {
ASSERT_EQ(res->start_gfn, first_dirty_gfn); TEST_ASSERT_EQ(res->start_gfn, first_dirty_gfn);
ASSERT_EQ(res->count, dirty_gfn_count); TEST_ASSERT_EQ(res->count, dirty_gfn_count);
for (size_t i = 0; i < dirty_gfn_count; i++) for (size_t i = 0; i < dirty_gfn_count; i++)
ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */ TEST_ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */
ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */ TEST_ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */
} }
static void test_get_skip_holes(void) static void test_get_skip_holes(void)
......
...@@ -281,8 +281,8 @@ enum stage { ...@@ -281,8 +281,8 @@ enum stage {
if (uc.cmd == UCALL_ABORT) { \ if (uc.cmd == UCALL_ABORT) { \
REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \ REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \
} \ } \
ASSERT_EQ(uc.cmd, UCALL_SYNC); \ TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \
ASSERT_EQ(uc.args[1], __stage); \ TEST_ASSERT_EQ(uc.args[1], __stage); \
}) \ }) \
static void prepare_mem12(void) static void prepare_mem12(void)
...@@ -808,7 +808,7 @@ static void test_termination(void) ...@@ -808,7 +808,7 @@ static void test_termination(void)
HOST_SYNC(t.vcpu, STAGE_IDLED); HOST_SYNC(t.vcpu, STAGE_IDLED);
MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168)); MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));
/* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */ /* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */
ASSERT_EQ(teid & teid_mask, 0); TEST_ASSERT_EQ(teid & teid_mask, 0);
kvm_vm_free(t.kvm_vm); kvm_vm_free(t.kvm_vm);
} }
......
...@@ -191,8 +191,8 @@ static void guest_code(void) ...@@ -191,8 +191,8 @@ static void guest_code(void)
get_ucall(__vcpu, &uc); \ get_ucall(__vcpu, &uc); \
if (uc.cmd == UCALL_ABORT) \ if (uc.cmd == UCALL_ABORT) \
REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \ REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \
ASSERT_EQ(uc.cmd, UCALL_SYNC); \ TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \
ASSERT_EQ(uc.args[1], __stage); \ TEST_ASSERT_EQ(uc.args[1], __stage); \
}) })
#define HOST_SYNC(vcpu, stage) \ #define HOST_SYNC(vcpu, stage) \
......
...@@ -72,7 +72,7 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) ...@@ -72,7 +72,7 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
vcpu_run(vcpu); vcpu_run(vcpu);
ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC); TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
vcpu_last_completed_iteration[vcpu_idx] = current_iteration; vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
...@@ -179,12 +179,12 @@ static void run_test(enum vm_guest_mode mode, void *unused) ...@@ -179,12 +179,12 @@ static void run_test(enum vm_guest_mode mode, void *unused)
* with that capability. * with that capability.
*/ */
if (dirty_log_manual_caps) { if (dirty_log_manual_caps) {
ASSERT_EQ(stats_clear_pass[0].hugepages, 0); TEST_ASSERT_EQ(stats_clear_pass[0].hugepages, 0);
ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages); TEST_ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages);
ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages); TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages);
} else { } else {
ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0); TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0);
ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages); TEST_ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages);
} }
/* /*
...@@ -192,9 +192,9 @@ static void run_test(enum vm_guest_mode mode, void *unused) ...@@ -192,9 +192,9 @@ static void run_test(enum vm_guest_mode mode, void *unused)
* memory again, the page counts should be the same as they were * memory again, the page counts should be the same as they were
* right after initial population of memory. * right after initial population of memory.
*/ */
ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k); TEST_ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k);
ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m); TEST_ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m);
ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g); TEST_ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g);
} }
static void help(char *name) static void help(char *name)
......
...@@ -35,7 +35,7 @@ int main(int argc, char *argv[]) ...@@ -35,7 +35,7 @@ int main(int argc, char *argv[])
vcpu_run(vcpu); vcpu_run(vcpu);
handle_flds_emulation_failure_exit(vcpu); handle_flds_emulation_failure_exit(vcpu);
vcpu_run(vcpu); vcpu_run(vcpu);
ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
kvm_vm_free(vm); kvm_vm_free(vm);
return 0; return 0;
......
...@@ -247,12 +247,12 @@ int main(int argc, char *argv[]) ...@@ -247,12 +247,12 @@ int main(int argc, char *argv[])
/* Verify the pending events comes back out the same as it went in. */ /* Verify the pending events comes back out the same as it went in. */
vcpu_events_get(vcpu, &events); vcpu_events_get(vcpu, &events);
ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD, TEST_ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD,
KVM_VCPUEVENT_VALID_PAYLOAD); KVM_VCPUEVENT_VALID_PAYLOAD);
ASSERT_EQ(events.exception.pending, true); TEST_ASSERT_EQ(events.exception.pending, true);
ASSERT_EQ(events.exception.nr, SS_VECTOR); TEST_ASSERT_EQ(events.exception.nr, SS_VECTOR);
ASSERT_EQ(events.exception.has_error_code, true); TEST_ASSERT_EQ(events.exception.has_error_code, true);
ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE); TEST_ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE);
/* /*
* Run for real with the pending #SS, L1 should get a VM-Exit due to * Run for real with the pending #SS, L1 should get a VM-Exit due to
......
...@@ -57,7 +57,7 @@ int main(void) ...@@ -57,7 +57,7 @@ int main(void)
for (i = 0; i < KVM_MAX_VCPUS; i++) for (i = 0; i < KVM_MAX_VCPUS; i++)
vcpu_set_msr(vcpus[i], MSR_IA32_APICBASE, LAPIC_X2APIC); vcpu_set_msr(vcpus[i], MSR_IA32_APICBASE, LAPIC_X2APIC);
ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0); TEST_ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0);
vcpuN = vcpus[KVM_MAX_VCPUS - 1]; vcpuN = vcpus[KVM_MAX_VCPUS - 1];
for (t = time(NULL) + TIMEOUT; time(NULL) < t;) { for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
...@@ -65,8 +65,8 @@ int main(void) ...@@ -65,8 +65,8 @@ int main(void)
vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_DISABLED); vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_DISABLED);
} }
ASSERT_EQ(pthread_cancel(thread), 0); TEST_ASSERT_EQ(pthread_cancel(thread), 0);
ASSERT_EQ(pthread_join(thread, NULL), 0); TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
kvm_vm_free(vm); kvm_vm_free(vm);
......
...@@ -176,7 +176,7 @@ static void race_sync_regs(void *racer) ...@@ -176,7 +176,7 @@ static void race_sync_regs(void *racer)
!!(run->s.regs.sregs.cr4 & X86_CR4_PAE), !!(run->s.regs.sregs.cr4 & X86_CR4_PAE),
!!(run->s.regs.sregs.efer & EFER_LME)); !!(run->s.regs.sregs.efer & EFER_LME));
ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0); TEST_ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0);
for (t = time(NULL) + TIMEOUT; time(NULL) < t;) { for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
__vcpu_run(vcpu); __vcpu_run(vcpu);
...@@ -187,8 +187,8 @@ static void race_sync_regs(void *racer) ...@@ -187,8 +187,8 @@ static void race_sync_regs(void *racer)
} }
} }
ASSERT_EQ(pthread_cancel(thread), 0); TEST_ASSERT_EQ(pthread_cancel(thread), 0);
ASSERT_EQ(pthread_join(thread, NULL), 0); TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
kvm_vm_free(vm); kvm_vm_free(vm);
} }
......
...@@ -103,39 +103,39 @@ int main(void) ...@@ -103,39 +103,39 @@ int main(void)
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
val = 0; val = 0;
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC affect both MSRs. */ /* Guest: writes to MSR_IA32_TSC affect both MSRs. */
run_vcpu(vcpu, 1); run_vcpu(vcpu, 1);
val = 1ull * GUEST_STEP; val = 1ull * GUEST_STEP;
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs. */ /* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs. */
run_vcpu(vcpu, 2); run_vcpu(vcpu, 2);
val = 2ull * GUEST_STEP; val = 2ull * GUEST_STEP;
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* /*
* Host: writes to MSR_IA32_TSC set the host-side offset * Host: writes to MSR_IA32_TSC set the host-side offset
* and therefore do not change MSR_IA32_TSC_ADJUST. * and therefore do not change MSR_IA32_TSC_ADJUST.
*/ */
vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val); vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
run_vcpu(vcpu, 3); run_vcpu(vcpu, 3);
/* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC. */ /* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC. */
vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456); vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456); TEST_ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456);
/* Restore previous value. */ /* Restore previous value. */
vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val); vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* /*
* Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the * Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
...@@ -143,8 +143,8 @@ int main(void) ...@@ -143,8 +143,8 @@ int main(void)
*/ */
run_vcpu(vcpu, 4); run_vcpu(vcpu, 4);
val = 3ull * GUEST_STEP; val = 3ull * GUEST_STEP;
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* /*
* Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side * Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
...@@ -152,8 +152,8 @@ int main(void) ...@@ -152,8 +152,8 @@ int main(void)
*/ */
run_vcpu(vcpu, 5); run_vcpu(vcpu, 5);
val = 4ull * GUEST_STEP; val = 4ull * GUEST_STEP;
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST); TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
kvm_vm_free(vm); kvm_vm_free(vm);
......
...@@ -50,7 +50,7 @@ static void set_timer(void) ...@@ -50,7 +50,7 @@ static void set_timer(void)
timer.it_value.tv_sec = 0; timer.it_value.tv_sec = 0;
timer.it_value.tv_usec = 200; timer.it_value.tv_usec = 200;
timer.it_interval = timer.it_value; timer.it_interval = timer.it_value;
ASSERT_EQ(setitimer(ITIMER_REAL, &timer, NULL), 0); TEST_ASSERT_EQ(setitimer(ITIMER_REAL, &timer, NULL), 0);
} }
static void set_or_clear_invalid_guest_state(struct kvm_vcpu *vcpu, bool set) static void set_or_clear_invalid_guest_state(struct kvm_vcpu *vcpu, bool set)
......
...@@ -103,7 +103,8 @@ static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap) ...@@ -103,7 +103,8 @@ static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap)
TEST_FAIL("Unexpected ucall: %lu", uc.cmd); TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
} }
ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), host_cap.capabilities); TEST_ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES),
host_cap.capabilities);
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
......
...@@ -65,17 +65,17 @@ static void ____test_icr(struct xapic_vcpu *x, uint64_t val) ...@@ -65,17 +65,17 @@ static void ____test_icr(struct xapic_vcpu *x, uint64_t val)
vcpu_ioctl(vcpu, KVM_SET_LAPIC, &xapic); vcpu_ioctl(vcpu, KVM_SET_LAPIC, &xapic);
vcpu_run(vcpu); vcpu_run(vcpu);
ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC); TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
ASSERT_EQ(uc.args[1], val); TEST_ASSERT_EQ(uc.args[1], val);
vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic); vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) | icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) |
(u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32; (u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32;
if (!x->is_x2apic) { if (!x->is_x2apic) {
val &= (-1u | (0xffull << (32 + 24))); val &= (-1u | (0xffull << (32 + 24)));
ASSERT_EQ(icr, val & ~APIC_ICR_BUSY); TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
} else { } else {
ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY); TEST_ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY);
} }
} }
......
...@@ -108,16 +108,16 @@ int main(int argc, char *argv[]) ...@@ -108,16 +108,16 @@ int main(int argc, char *argv[])
vcpu_run(vcpu); vcpu_run(vcpu);
if (run->exit_reason == KVM_EXIT_XEN) { if (run->exit_reason == KVM_EXIT_XEN) {
ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL); TEST_ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL);
ASSERT_EQ(run->xen.u.hcall.cpl, 0); TEST_ASSERT_EQ(run->xen.u.hcall.cpl, 0);
ASSERT_EQ(run->xen.u.hcall.longmode, 1); TEST_ASSERT_EQ(run->xen.u.hcall.longmode, 1);
ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE); TEST_ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE);
ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1)); TEST_ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1));
ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2)); TEST_ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2));
ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3)); TEST_ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3));
ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4)); TEST_ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4));
ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5)); TEST_ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5));
ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6)); TEST_ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6));
run->xen.u.hcall.result = RETVALUE; run->xen.u.hcall.result = RETVALUE;
continue; continue;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment