Commit 76d883c4 authored by Shannon Zhao's avatar Shannon Zhao Committed by Marc Zyngier

arm64: KVM: Add access handler for PMOVSSET and PMOVSCLR register

Since the reset value of PMOVSSET and PMOVSCLR is UNKNOWN, use
reset_unknown for its reset handler. Add a handler to emulate writing
PMOVSSET or PMOVSCLR register.

When writing non-zero value to PMOVSSET, the counter and its interrupt
is enabled, kick this vcpu to sync PMU interrupt.
Signed-off-by: default avatarShannon Zhao <shannon.zhao@linaro.org>
Reviewed-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 9db52c78
...@@ -128,6 +128,7 @@ enum vcpu_sysreg { ...@@ -128,6 +128,7 @@ enum vcpu_sysreg {
PMCCFILTR_EL0, /* Cycle Count Filter Register */ PMCCFILTR_EL0, /* Cycle Count Filter Register */
PMCNTENSET_EL0, /* Count Enable Set Register */ PMCNTENSET_EL0, /* Count Enable Set Register */
PMINTENSET_EL1, /* Interrupt Enable Set Register */ PMINTENSET_EL1, /* Interrupt Enable Set Register */
PMOVSSET_EL0, /* Overflow Flag Status Set Register */
/* 32bit specific registers. Keep them at the end of the range */ /* 32bit specific registers. Keep them at the end of the range */
DACR32_EL2, /* Domain Access Control Register */ DACR32_EL2, /* Domain Access Control Register */
......
...@@ -650,6 +650,28 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -650,6 +650,28 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true; return true;
} }
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
if (p->is_write) {
if (r->CRm & 0x2)
/* accessing PMOVSSET_EL0 */
kvm_pmu_overflow_set(vcpu, p->regval & mask);
else
/* accessing PMOVSCLR_EL0 */
vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
} else {
p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
}
return true;
}
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
/* DBGBVRn_EL1 */ \ /* DBGBVRn_EL1 */ \
...@@ -857,7 +879,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -857,7 +879,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
access_pmcnten, NULL, PMCNTENSET_EL0 }, access_pmcnten, NULL, PMCNTENSET_EL0 },
/* PMOVSCLR_EL0 */ /* PMOVSCLR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
trap_raz_wi }, access_pmovs, NULL, PMOVSSET_EL0 },
/* PMSWINC_EL0 */ /* PMSWINC_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
trap_raz_wi }, trap_raz_wi },
...@@ -884,7 +906,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -884,7 +906,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
trap_raz_wi }, trap_raz_wi },
/* PMOVSSET_EL0 */ /* PMOVSSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
trap_raz_wi }, access_pmovs, reset_unknown, PMOVSSET_EL0 },
/* TPIDR_EL0 */ /* TPIDR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
...@@ -1198,7 +1220,7 @@ static const struct sys_reg_desc cp15_regs[] = { ...@@ -1198,7 +1220,7 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
{ Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
{ Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
...@@ -1208,6 +1230,7 @@ static const struct sys_reg_desc cp15_regs[] = { ...@@ -1208,6 +1230,7 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
......
...@@ -43,6 +43,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); ...@@ -43,6 +43,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx); u64 select_idx);
#else #else
...@@ -63,6 +64,7 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) ...@@ -63,6 +64,7 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
} }
static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
u64 data, u64 select_idx) {} u64 data, u64 select_idx) {}
#endif #endif
......
...@@ -149,6 +149,37 @@ void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) ...@@ -149,6 +149,37 @@ void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
} }
} }
static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
{
u64 reg = 0;
if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
reg &= kvm_pmu_valid_counter_mask(vcpu);
return reg;
}
/**
* kvm_pmu_overflow_set - set PMU overflow interrupt
* @vcpu: The vcpu pointer
* @val: the value guest writes to PMOVSSET register
*/
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
{
u64 reg;
if (val == 0)
return;
vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
reg = kvm_pmu_overflow_status(vcpu);
if (reg != 0)
kvm_vcpu_kick(vcpu);
}
static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
{ {
return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment