Commit 26d2d059 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: PMU: Do not let AArch32 change the counters' top 32 bits

Even when using PMUv3p5 (which implies 64bit counters), there is
no way for AArch32 to write to the top 32 bits of the counters.
The only way to influence these bits (other than by counting
events) is by writing PMCR.P==1.

Make sure we obey the architecture and preserve the top 32 bits
on a counter update.
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221113163832.3154370-10-maz@kernel.org
parent 9917264d
...@@ -119,13 +119,8 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) ...@@ -119,13 +119,8 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
return counter; return counter;
} }
/** static void kvm_pmu_set_counter(struct kvm_vcpu *vcpu, u64 select_idx, u64 val,
* kvm_pmu_set_counter_value - set PMU counter value bool force)
* @vcpu: The vcpu pointer
* @select_idx: The counter index
* @val: The counter value
*/
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
{ {
u64 reg; u64 reg;
...@@ -135,12 +130,36 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) ...@@ -135,12 +130,36 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
kvm_pmu_release_perf_event(&vcpu->arch.pmu.pmc[select_idx]); kvm_pmu_release_perf_event(&vcpu->arch.pmu.pmc[select_idx]);
reg = counter_index_to_reg(select_idx); reg = counter_index_to_reg(select_idx);
if (vcpu_mode_is_32bit(vcpu) && select_idx != ARMV8_PMU_CYCLE_IDX &&
!force) {
/*
* Even with PMUv3p5, AArch32 cannot write to the top
* 32bit of the counters. The only possible course of
* action is to use PMCR.P, which will reset them to
* 0 (the only use of the 'force' parameter).
*/
val = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
val |= lower_32_bits(val);
}
__vcpu_sys_reg(vcpu, reg) = val; __vcpu_sys_reg(vcpu, reg) = val;
/* Recreate the perf event to reflect the updated sample_period */ /* Recreate the perf event to reflect the updated sample_period */
kvm_pmu_create_perf_event(vcpu, select_idx); kvm_pmu_create_perf_event(vcpu, select_idx);
} }
/**
* kvm_pmu_set_counter_value - set PMU counter value
* @vcpu: The vcpu pointer
* @select_idx: The counter index
* @val: The counter value
*/
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
{
kvm_pmu_set_counter(vcpu, select_idx, val, false);
}
/** /**
* kvm_pmu_release_perf_event - remove the perf event * kvm_pmu_release_perf_event - remove the perf event
* @pmc: The PMU counter pointer * @pmc: The PMU counter pointer
...@@ -533,7 +552,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) ...@@ -533,7 +552,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu); unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX); mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
for_each_set_bit(i, &mask, 32) for_each_set_bit(i, &mask, 32)
kvm_pmu_set_counter_value(vcpu, i, 0); kvm_pmu_set_counter(vcpu, i, 0, true);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment