Commit 8fcee042 authored by Sean Christopherson's avatar Sean Christopherson

KVM: selftests: Restore assert for non-nested VMs in access tracking test

Restore the assert (on x86-64) that <10% of pages are still idle when NOT
running as a nested VM in the access tracking test.  The original assert
was converted to a "warning" to avoid false failures when running the
test in a VM, but the non-nested case does not suffer from the same
"infinite TLB size" issue.

Using the HYPERVISOR flag isn't infallible as VMMs aren't strictly
required to enumerate the "feature" in CPUID, but practically speaking
anyone that is running KVM selftests in VMs is going to be using a VMM
and hypervisor that sets the HYPERVISOR flag.

Cc: David Matlack <dmatlack@google.com>
Reviewed-by: default avatarEmanuele Giuseppe Esposito <eesposit@redhat.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221129175300.4052283-3-seanjc@google.com
parent a33004e8
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include "test_util.h" #include "test_util.h"
#include "memstress.h" #include "memstress.h"
#include "guest_modes.h" #include "guest_modes.h"
#include "processor.h"
/* Global variable used to synchronize all of the vCPU threads. */ /* Global variable used to synchronize all of the vCPU threads. */
static int iteration; static int iteration;
...@@ -180,15 +181,21 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm, ...@@ -180,15 +181,21 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm,
* access tracking but low enough as to not make the test too brittle * access tracking but low enough as to not make the test too brittle
* over time and across architectures. * over time and across architectures.
* *
* Note that when run in nested virtualization, this check will trigger * When running the guest as a nested VM, "warn" instead of asserting
* much more frequently because TLB size is unlimited and since no flush * as the TLB size is effectively unlimited and the KVM doesn't
* happens, much more pages are cached there and guest won't see the * explicitly flush the TLB when aging SPTEs. As a result, more pages
* "idle" bit cleared. * are cached and the guest won't see the "idle" bit cleared.
*/ */
if (still_idle >= pages / 10) if (still_idle >= pages / 10) {
#ifdef __x86_64__
TEST_ASSERT(this_cpu_has(X86_FEATURE_HYPERVISOR),
"vCPU%d: Too many pages still idle (%lu out of %lu)",
vcpu_idx, still_idle, pages);
#endif
printf("WARNING: vCPU%d: Too many pages still idle (%lu out of %lu), " printf("WARNING: vCPU%d: Too many pages still idle (%lu out of %lu), "
"this will affect performance results.\n", "this will affect performance results.\n",
vcpu_idx, still_idle, pages); vcpu_idx, still_idle, pages);
}
close(page_idle_fd); close(page_idle_fd);
close(pagemap_fd); close(pagemap_fd);
......
...@@ -94,6 +94,7 @@ struct kvm_x86_cpu_feature { ...@@ -94,6 +94,7 @@ struct kvm_x86_cpu_feature {
#define X86_FEATURE_XSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26) #define X86_FEATURE_XSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26)
#define X86_FEATURE_OSXSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27) #define X86_FEATURE_OSXSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27)
#define X86_FEATURE_RDRAND KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30) #define X86_FEATURE_RDRAND KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30)
#define X86_FEATURE_HYPERVISOR KVM_X86_CPU_FEATURE(0x1, 0, ECX, 31)
#define X86_FEATURE_PAE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 6) #define X86_FEATURE_PAE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 6)
#define X86_FEATURE_MCE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 7) #define X86_FEATURE_MCE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 7)
#define X86_FEATURE_APIC KVM_X86_CPU_FEATURE(0x1, 0, EDX, 9) #define X86_FEATURE_APIC KVM_X86_CPU_FEATURE(0x1, 0, EDX, 9)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment