Commit e79f49c3 authored by Like Xu's avatar Like Xu Committed by Paolo Bonzini

KVM: x86/pmu: Introduce pmc->is_paused to reduce the call time of perf interfaces

Based on our observations, after any vm-exit associated with vPMU, there
are at least two or more perf interfaces to be called for guest counter
emulation, such as perf_event_{pause, read_value, period}(), and each one
will {lock, unlock} the same perf_event_ctx. The frequency of calls becomes
more severe when guest use counters in a multiplexed manner.

Holding a lock once and completing the KVM request operations in the perf
context would introduce a set of impractical new interfaces. So we can
further optimize the vPMU implementation by avoiding repeated calls to
these interfaces in the KVM context for at least one pattern:

After we call perf_event_pause() once, the event will be disabled and its
internal count will be reset to 0. So there is no need to pause it again
or read its value. Once the event is paused, event period will not be
updated until the next time it's resumed or reprogrammed. And there is
also no need to call perf_event_period twice for a non-running counter,
considering the perf_event for a running counter is never paused.

Based on this implementation, for the following common usage of
sampling 4 events using perf on a 4u8g guest:

  echo 0 > /proc/sys/kernel/watchdog
  echo 25 > /proc/sys/kernel/perf_cpu_time_max_percent
  echo 10000 > /proc/sys/kernel/perf_event_max_sample_rate
  echo 0 > /proc/sys/kernel/perf_cpu_time_max_percent
  for i in `seq 1 1 10`
  do
  taskset -c 0 perf record \
  -e cpu-cycles -e instructions -e branch-instructions -e cache-misses \
  /root/br_instr a
  done

the average latency of the guest NMI handler is reduced from
37646.7 ns to 32929.3 ns (~1.14x speed up) on the Intel ICX server.
Also, in addition to collecting more samples, no loss of sampling
accuracy was observed compared to before the optimization.
Signed-off-by: default avatarLike Xu <likexu@tencent.com>
Message-Id: <20210728120705.6855-1-likexu@tencent.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
parent a75b5404
...@@ -482,6 +482,7 @@ struct kvm_pmc { ...@@ -482,6 +482,7 @@ struct kvm_pmc {
* ctrl value for fixed counters. * ctrl value for fixed counters.
*/ */
u64 current_config; u64 current_config;
bool is_paused;
}; };
struct kvm_pmu { struct kvm_pmu {
......
...@@ -137,18 +137,20 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, ...@@ -137,18 +137,20 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
pmc->perf_event = event; pmc->perf_event = event;
pmc_to_pmu(pmc)->event_count++; pmc_to_pmu(pmc)->event_count++;
clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
pmc->is_paused = false;
} }
static void pmc_pause_counter(struct kvm_pmc *pmc) static void pmc_pause_counter(struct kvm_pmc *pmc)
{ {
u64 counter = pmc->counter; u64 counter = pmc->counter;
if (!pmc->perf_event) if (!pmc->perf_event || pmc->is_paused)
return; return;
/* update counter, reset event value to avoid redundant accumulation */ /* update counter, reset event value to avoid redundant accumulation */
counter += perf_event_pause(pmc->perf_event, true); counter += perf_event_pause(pmc->perf_event, true);
pmc->counter = counter & pmc_bitmask(pmc); pmc->counter = counter & pmc_bitmask(pmc);
pmc->is_paused = true;
} }
static bool pmc_resume_counter(struct kvm_pmc *pmc) static bool pmc_resume_counter(struct kvm_pmc *pmc)
...@@ -163,6 +165,7 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc) ...@@ -163,6 +165,7 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
/* reuse perf_event to serve as pmc_reprogram_counter() does*/ /* reuse perf_event to serve as pmc_reprogram_counter() does*/
perf_event_enable(pmc->perf_event); perf_event_enable(pmc->perf_event);
pmc->is_paused = false;
clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi); clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
return true; return true;
......
...@@ -55,7 +55,7 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc) ...@@ -55,7 +55,7 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
u64 counter, enabled, running; u64 counter, enabled, running;
counter = pmc->counter; counter = pmc->counter;
if (pmc->perf_event) if (pmc->perf_event && !pmc->is_paused)
counter += perf_event_read_value(pmc->perf_event, counter += perf_event_read_value(pmc->perf_event,
&enabled, &running); &enabled, &running);
/* FIXME: Scaling needed? */ /* FIXME: Scaling needed? */
......
...@@ -437,13 +437,13 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -437,13 +437,13 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!(msr & MSR_PMC_FULL_WIDTH_BIT)) !(msr & MSR_PMC_FULL_WIDTH_BIT))
data = (s64)(s32)data; data = (s64)(s32)data;
pmc->counter += data - pmc_read_counter(pmc); pmc->counter += data - pmc_read_counter(pmc);
if (pmc->perf_event) if (pmc->perf_event && !pmc->is_paused)
perf_event_period(pmc->perf_event, perf_event_period(pmc->perf_event,
get_sample_period(pmc, data)); get_sample_period(pmc, data));
return 0; return 0;
} else if ((pmc = get_fixed_pmc(pmu, msr))) { } else if ((pmc = get_fixed_pmc(pmu, msr))) {
pmc->counter += data - pmc_read_counter(pmc); pmc->counter += data - pmc_read_counter(pmc);
if (pmc->perf_event) if (pmc->perf_event && !pmc->is_paused)
perf_event_period(pmc->perf_event, perf_event_period(pmc->perf_event,
get_sample_period(pmc, data)); get_sample_period(pmc, data));
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment