Commit e6faa049 authored by Jinrong Liang's avatar Jinrong Liang Committed by Sean Christopherson

KVM: selftests: Add pmu.h and lib/pmu.c for common PMU assets

Add a PMU library for x86 selftests to help eliminate open-coded event
encodings, and to reduce the amount of copy+paste between PMU selftests.

Use the new common macro definitions in the existing PMU event filter test.

Cc: Aaron Lewis <aaronlewis@google.com>
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarJinrong Liang <cloudliang@tencent.com>
Co-developed-by: default avatarSean Christopherson <seanjc@google.com>
Tested-by: default avatarDapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20240109230250.424295-16-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 370d5363
...@@ -36,6 +36,7 @@ LIBKVM_x86_64 += lib/x86_64/apic.c ...@@ -36,6 +36,7 @@ LIBKVM_x86_64 += lib/x86_64/apic.c
LIBKVM_x86_64 += lib/x86_64/handlers.S LIBKVM_x86_64 += lib/x86_64/handlers.S
LIBKVM_x86_64 += lib/x86_64/hyperv.c LIBKVM_x86_64 += lib/x86_64/hyperv.c
LIBKVM_x86_64 += lib/x86_64/memstress.c LIBKVM_x86_64 += lib/x86_64/memstress.c
LIBKVM_x86_64 += lib/x86_64/pmu.c
LIBKVM_x86_64 += lib/x86_64/processor.c LIBKVM_x86_64 += lib/x86_64/processor.c
LIBKVM_x86_64 += lib/x86_64/svm.c LIBKVM_x86_64 += lib/x86_64/svm.c
LIBKVM_x86_64 += lib/x86_64/ucall.c LIBKVM_x86_64 += lib/x86_64/ucall.c
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2023, Tencent, Inc.
*/
#ifndef SELFTEST_KVM_PMU_H
#define SELFTEST_KVM_PMU_H
#include <stdint.h>
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
/*
* Encode an eventsel+umask pair into event-select MSR format. Note, this is
* technically AMD's format, as Intel's format only supports 8 bits for the
* event selector, i.e. doesn't use bits 24:16 for the selector. But, OR-ing
* in '0' is a nop and won't clobber the CMASK.
*/
#define RAW_EVENT(eventsel, umask) (((eventsel & 0xf00UL) << 24) | \
((eventsel) & 0xff) | \
((umask) & 0xff) << 8)
/*
* These are technically Intel's definitions, but except for CMASK (see above),
* AMD's layout is compatible with Intel's.
*/
#define ARCH_PERFMON_EVENTSEL_EVENT GENMASK_ULL(7, 0)
#define ARCH_PERFMON_EVENTSEL_UMASK GENMASK_ULL(15, 8)
#define ARCH_PERFMON_EVENTSEL_USR BIT_ULL(16)
#define ARCH_PERFMON_EVENTSEL_OS BIT_ULL(17)
#define ARCH_PERFMON_EVENTSEL_EDGE BIT_ULL(18)
#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL BIT_ULL(19)
#define ARCH_PERFMON_EVENTSEL_INT BIT_ULL(20)
#define ARCH_PERFMON_EVENTSEL_ANY BIT_ULL(21)
#define ARCH_PERFMON_EVENTSEL_ENABLE BIT_ULL(22)
#define ARCH_PERFMON_EVENTSEL_INV BIT_ULL(23)
#define ARCH_PERFMON_EVENTSEL_CMASK GENMASK_ULL(31, 24)
/* RDPMC control flags, Intel only. */
#define INTEL_RDPMC_METRICS BIT_ULL(29)
#define INTEL_RDPMC_FIXED BIT_ULL(30)
#define INTEL_RDPMC_FAST BIT_ULL(31)
/* Fixed PMC controls, Intel only. */
#define FIXED_PMC_GLOBAL_CTRL_ENABLE(_idx) BIT_ULL((32 + (_idx)))
#define FIXED_PMC_KERNEL BIT_ULL(0)
#define FIXED_PMC_USER BIT_ULL(1)
#define FIXED_PMC_ANYTHREAD BIT_ULL(2)
#define FIXED_PMC_ENABLE_PMI BIT_ULL(3)
#define FIXED_PMC_NR_BITS 4
#define FIXED_PMC_CTRL(_idx, _val) ((_val) << ((_idx) * FIXED_PMC_NR_BITS))
#define PMU_CAP_FW_WRITES BIT_ULL(13)
#define PMU_CAP_LBR_FMT 0x3f
#define INTEL_ARCH_CPU_CYCLES RAW_EVENT(0x3c, 0x00)
#define INTEL_ARCH_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00)
#define INTEL_ARCH_REFERENCE_CYCLES RAW_EVENT(0x3c, 0x01)
#define INTEL_ARCH_LLC_REFERENCES RAW_EVENT(0x2e, 0x4f)
#define INTEL_ARCH_LLC_MISSES RAW_EVENT(0x2e, 0x41)
#define INTEL_ARCH_BRANCHES_RETIRED RAW_EVENT(0xc4, 0x00)
#define INTEL_ARCH_BRANCHES_MISPREDICTED RAW_EVENT(0xc5, 0x00)
#define INTEL_ARCH_TOPDOWN_SLOTS RAW_EVENT(0xa4, 0x01)
#define AMD_ZEN_CORE_CYCLES RAW_EVENT(0x76, 0x00)
#define AMD_ZEN_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00)
#define AMD_ZEN_BRANCHES_RETIRED RAW_EVENT(0xc2, 0x00)
#define AMD_ZEN_BRANCHES_MISPREDICTED RAW_EVENT(0xc3, 0x00)
/*
* Note! The order and thus the index of the architectural events matters as
* support for each event is enumerated via CPUID using the index of the event.
*/
enum intel_pmu_architectural_events {
INTEL_ARCH_CPU_CYCLES_INDEX,
INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX,
INTEL_ARCH_REFERENCE_CYCLES_INDEX,
INTEL_ARCH_LLC_REFERENCES_INDEX,
INTEL_ARCH_LLC_MISSES_INDEX,
INTEL_ARCH_BRANCHES_RETIRED_INDEX,
INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX,
INTEL_ARCH_TOPDOWN_SLOTS_INDEX,
NR_INTEL_ARCH_EVENTS,
};
enum amd_pmu_zen_events {
AMD_ZEN_CORE_CYCLES_INDEX,
AMD_ZEN_INSTRUCTIONS_INDEX,
AMD_ZEN_BRANCHES_INDEX,
AMD_ZEN_BRANCH_MISSES_INDEX,
NR_AMD_ZEN_EVENTS,
};
extern const uint64_t intel_pmu_arch_events[];
extern const uint64_t amd_pmu_zen_events[];
#endif /* SELFTEST_KVM_PMU_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2023, Tencent, Inc.
*/
#include <stdint.h>
#include <linux/kernel.h>
#include "kvm_util.h"
#include "pmu.h"
const uint64_t intel_pmu_arch_events[] = {
INTEL_ARCH_CPU_CYCLES,
INTEL_ARCH_INSTRUCTIONS_RETIRED,
INTEL_ARCH_REFERENCE_CYCLES,
INTEL_ARCH_LLC_REFERENCES,
INTEL_ARCH_LLC_MISSES,
INTEL_ARCH_BRANCHES_RETIRED,
INTEL_ARCH_BRANCHES_MISPREDICTED,
INTEL_ARCH_TOPDOWN_SLOTS,
};
kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS);
const uint64_t amd_pmu_zen_events[] = {
AMD_ZEN_CORE_CYCLES,
AMD_ZEN_INSTRUCTIONS_RETIRED,
AMD_ZEN_BRANCHES_RETIRED,
AMD_ZEN_BRANCHES_MISPREDICTED,
};
kvm_static_assert(ARRAY_SIZE(amd_pmu_zen_events) == NR_AMD_ZEN_EVENTS);
...@@ -11,72 +11,18 @@ ...@@ -11,72 +11,18 @@
*/ */
#define _GNU_SOURCE /* for program_invocation_short_name */ #define _GNU_SOURCE /* for program_invocation_short_name */
#include "test_util.h"
#include "kvm_util.h" #include "kvm_util.h"
#include "pmu.h"
#include "processor.h" #include "processor.h"
#include "test_util.h"
/*
* In lieu of copying perf_event.h into tools...
*/
#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
/* End of stuff taken from perf_event.h. */
/* Oddly, this isn't in perf_event.h. */
#define ARCH_PERFMON_BRANCHES_RETIRED 5
#define NUM_BRANCHES 42 #define NUM_BRANCHES 42
#define INTEL_PMC_IDX_FIXED 32
/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
#define MAX_FILTER_EVENTS 300
#define MAX_TEST_EVENTS 10 #define MAX_TEST_EVENTS 10
#define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1) #define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1)
#define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAGS_VALID_MASK << 1) #define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAGS_VALID_MASK << 1)
#define PMU_EVENT_FILTER_INVALID_NEVENTS (MAX_FILTER_EVENTS + 1) #define PMU_EVENT_FILTER_INVALID_NEVENTS (KVM_PMU_EVENT_FILTER_MAX_EVENTS + 1)
/*
* This is how the event selector and unit mask are stored in an AMD
* core performance event-select register. Intel's format is similar,
* but the event selector is only 8 bits.
*/
#define EVENT(select, umask) ((select & 0xf00UL) << 24 | (select & 0xff) | \
(umask & 0xff) << 8)
/*
* "Branch instructions retired", from the Intel SDM, volume 3,
* "Pre-defined Architectural Performance Events."
*/
#define INTEL_BR_RETIRED EVENT(0xc4, 0)
/*
* "Retired branch instructions", from Processor Programming Reference
* (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
* Preliminary Processor Programming Reference (PPR) for AMD Family
* 17h Model 31h, Revision B0 Processors, and Preliminary Processor
* Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
* B1 Processors Volume 1 of 2.
*/
#define AMD_ZEN_BR_RETIRED EVENT(0xc2, 0)
/*
* "Retired instructions", from Processor Programming Reference
* (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
* Preliminary Processor Programming Reference (PPR) for AMD Family
* 17h Model 31h, Revision B0 Processors, and Preliminary Processor
* Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
* B1 Processors Volume 1 of 2.
* --- and ---
* "Instructions retired", from the Intel SDM, volume 3,
* "Pre-defined Architectural Performance Events."
*/
#define INST_RETIRED EVENT(0xc0, 0)
struct __kvm_pmu_event_filter { struct __kvm_pmu_event_filter {
__u32 action; __u32 action;
...@@ -84,26 +30,28 @@ struct __kvm_pmu_event_filter { ...@@ -84,26 +30,28 @@ struct __kvm_pmu_event_filter {
__u32 fixed_counter_bitmap; __u32 fixed_counter_bitmap;
__u32 flags; __u32 flags;
__u32 pad[4]; __u32 pad[4];
__u64 events[MAX_FILTER_EVENTS]; __u64 events[KVM_PMU_EVENT_FILTER_MAX_EVENTS];
}; };
/* /*
* This event list comprises Intel's eight architectural events plus * This event list comprises Intel's known architectural events, plus AMD's
* AMD's "retired branch instructions" for Zen[123] (and possibly * "retired branch instructions" for Zen1-Zen3 (and* possibly other AMD CPUs).
* other AMD CPUs). * Note, AMD and Intel use the same encoding for instructions retired.
*/ */
kvm_static_assert(INTEL_ARCH_INSTRUCTIONS_RETIRED == AMD_ZEN_INSTRUCTIONS_RETIRED);
static const struct __kvm_pmu_event_filter base_event_filter = { static const struct __kvm_pmu_event_filter base_event_filter = {
.nevents = ARRAY_SIZE(base_event_filter.events), .nevents = ARRAY_SIZE(base_event_filter.events),
.events = { .events = {
EVENT(0x3c, 0), INTEL_ARCH_CPU_CYCLES,
INST_RETIRED, INTEL_ARCH_INSTRUCTIONS_RETIRED,
EVENT(0x3c, 1), INTEL_ARCH_REFERENCE_CYCLES,
EVENT(0x2e, 0x4f), INTEL_ARCH_LLC_REFERENCES,
EVENT(0x2e, 0x41), INTEL_ARCH_LLC_MISSES,
EVENT(0xc4, 0), INTEL_ARCH_BRANCHES_RETIRED,
EVENT(0xc5, 0), INTEL_ARCH_BRANCHES_MISPREDICTED,
EVENT(0xa4, 1), INTEL_ARCH_TOPDOWN_SLOTS,
AMD_ZEN_BR_RETIRED, AMD_ZEN_BRANCHES_RETIRED,
}, },
}; };
...@@ -165,9 +113,9 @@ static void intel_guest_code(void) ...@@ -165,9 +113,9 @@ static void intel_guest_code(void)
for (;;) { for (;;) {
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED); ARCH_PERFMON_EVENTSEL_OS | INTEL_ARCH_BRANCHES_RETIRED);
wrmsr(MSR_P6_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE | wrmsr(MSR_P6_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED); ARCH_PERFMON_EVENTSEL_OS | INTEL_ARCH_INSTRUCTIONS_RETIRED);
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x3); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
run_and_measure_loop(MSR_IA32_PMC0); run_and_measure_loop(MSR_IA32_PMC0);
...@@ -189,9 +137,9 @@ static void amd_guest_code(void) ...@@ -189,9 +137,9 @@ static void amd_guest_code(void)
for (;;) { for (;;) {
wrmsr(MSR_K7_EVNTSEL0, 0); wrmsr(MSR_K7_EVNTSEL0, 0);
wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED); ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BRANCHES_RETIRED);
wrmsr(MSR_K7_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE | wrmsr(MSR_K7_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED); ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_INSTRUCTIONS_RETIRED);
run_and_measure_loop(MSR_K7_PERFCTR0); run_and_measure_loop(MSR_K7_PERFCTR0);
GUEST_SYNC(0); GUEST_SYNC(0);
...@@ -312,7 +260,7 @@ static void test_amd_deny_list(struct kvm_vcpu *vcpu) ...@@ -312,7 +260,7 @@ static void test_amd_deny_list(struct kvm_vcpu *vcpu)
.action = KVM_PMU_EVENT_DENY, .action = KVM_PMU_EVENT_DENY,
.nevents = 1, .nevents = 1,
.events = { .events = {
EVENT(0x1C2, 0), RAW_EVENT(0x1C2, 0),
}, },
}; };
...@@ -347,9 +295,9 @@ static void test_not_member_deny_list(struct kvm_vcpu *vcpu) ...@@ -347,9 +295,9 @@ static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
f.action = KVM_PMU_EVENT_DENY; f.action = KVM_PMU_EVENT_DENY;
remove_event(&f, INST_RETIRED); remove_event(&f, INTEL_ARCH_INSTRUCTIONS_RETIRED);
remove_event(&f, INTEL_BR_RETIRED); remove_event(&f, INTEL_ARCH_BRANCHES_RETIRED);
remove_event(&f, AMD_ZEN_BR_RETIRED); remove_event(&f, AMD_ZEN_BRANCHES_RETIRED);
test_with_filter(vcpu, &f); test_with_filter(vcpu, &f);
ASSERT_PMC_COUNTING_INSTRUCTIONS(); ASSERT_PMC_COUNTING_INSTRUCTIONS();
...@@ -361,9 +309,9 @@ static void test_not_member_allow_list(struct kvm_vcpu *vcpu) ...@@ -361,9 +309,9 @@ static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
f.action = KVM_PMU_EVENT_ALLOW; f.action = KVM_PMU_EVENT_ALLOW;
remove_event(&f, INST_RETIRED); remove_event(&f, INTEL_ARCH_INSTRUCTIONS_RETIRED);
remove_event(&f, INTEL_BR_RETIRED); remove_event(&f, INTEL_ARCH_BRANCHES_RETIRED);
remove_event(&f, AMD_ZEN_BR_RETIRED); remove_event(&f, AMD_ZEN_BRANCHES_RETIRED);
test_with_filter(vcpu, &f); test_with_filter(vcpu, &f);
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(); ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
...@@ -452,9 +400,9 @@ static bool use_amd_pmu(void) ...@@ -452,9 +400,9 @@ static bool use_amd_pmu(void)
* - Sapphire Rapids, Ice Lake, Cascade Lake, Skylake. * - Sapphire Rapids, Ice Lake, Cascade Lake, Skylake.
*/ */
#define MEM_INST_RETIRED 0xD0 #define MEM_INST_RETIRED 0xD0
#define MEM_INST_RETIRED_LOAD EVENT(MEM_INST_RETIRED, 0x81) #define MEM_INST_RETIRED_LOAD RAW_EVENT(MEM_INST_RETIRED, 0x81)
#define MEM_INST_RETIRED_STORE EVENT(MEM_INST_RETIRED, 0x82) #define MEM_INST_RETIRED_STORE RAW_EVENT(MEM_INST_RETIRED, 0x82)
#define MEM_INST_RETIRED_LOAD_STORE EVENT(MEM_INST_RETIRED, 0x83) #define MEM_INST_RETIRED_LOAD_STORE RAW_EVENT(MEM_INST_RETIRED, 0x83)
static bool supports_event_mem_inst_retired(void) static bool supports_event_mem_inst_retired(void)
{ {
...@@ -486,9 +434,9 @@ static bool supports_event_mem_inst_retired(void) ...@@ -486,9 +434,9 @@ static bool supports_event_mem_inst_retired(void)
* B1 Processors Volume 1 of 2. * B1 Processors Volume 1 of 2.
*/ */
#define LS_DISPATCH 0x29 #define LS_DISPATCH 0x29
#define LS_DISPATCH_LOAD EVENT(LS_DISPATCH, BIT(0)) #define LS_DISPATCH_LOAD RAW_EVENT(LS_DISPATCH, BIT(0))
#define LS_DISPATCH_STORE EVENT(LS_DISPATCH, BIT(1)) #define LS_DISPATCH_STORE RAW_EVENT(LS_DISPATCH, BIT(1))
#define LS_DISPATCH_LOAD_STORE EVENT(LS_DISPATCH, BIT(2)) #define LS_DISPATCH_LOAD_STORE RAW_EVENT(LS_DISPATCH, BIT(2))
#define INCLUDE_MASKED_ENTRY(event_select, mask, match) \ #define INCLUDE_MASKED_ENTRY(event_select, mask, match) \
KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, false) KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, false)
...@@ -729,14 +677,14 @@ static void add_dummy_events(uint64_t *events, int nevents) ...@@ -729,14 +677,14 @@ static void add_dummy_events(uint64_t *events, int nevents)
static void test_masked_events(struct kvm_vcpu *vcpu) static void test_masked_events(struct kvm_vcpu *vcpu)
{ {
int nevents = MAX_FILTER_EVENTS - MAX_TEST_EVENTS; int nevents = KVM_PMU_EVENT_FILTER_MAX_EVENTS - MAX_TEST_EVENTS;
uint64_t events[MAX_FILTER_EVENTS]; uint64_t events[KVM_PMU_EVENT_FILTER_MAX_EVENTS];
/* Run the test cases against a sparse PMU event filter. */ /* Run the test cases against a sparse PMU event filter. */
run_masked_events_tests(vcpu, events, 0); run_masked_events_tests(vcpu, events, 0);
/* Run the test cases against a dense PMU event filter. */ /* Run the test cases against a dense PMU event filter. */
add_dummy_events(events, MAX_FILTER_EVENTS); add_dummy_events(events, KVM_PMU_EVENT_FILTER_MAX_EVENTS);
run_masked_events_tests(vcpu, events, nevents); run_masked_events_tests(vcpu, events, nevents);
} }
...@@ -809,20 +757,19 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu) ...@@ -809,20 +757,19 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed"); TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
} }
static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx) static void intel_run_fixed_counter_guest_code(uint8_t idx)
{ {
for (;;) { for (;;) {
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0); wrmsr(MSR_CORE_PERF_FIXED_CTR0 + idx, 0);
/* Only OS_EN bit is enabled for fixed counter[idx]. */ /* Only OS_EN bit is enabled for fixed counter[idx]. */
wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx)); wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(idx, FIXED_PMC_KERNEL));
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(idx));
BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx));
__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx)); GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + idx));
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment