Commit fcf37b38 authored by Mark Brown's avatar Mark Brown Committed by Catalin Marinas

arm64/sysreg: Add _EL1 into ID_AA64DFR0_EL1 definition names

Normally we include the full register name in the defines for fields within
registers but this has not been followed for ID registers. In preparation
for automatic generation of defines add the _EL1s into the defines for
ID_AA64DFR0_EL1 to follow the convention. No functional changes.
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20220910163354.860255-3-broonie@kernel.orgSigned-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent c0357a73
...@@ -512,7 +512,7 @@ alternative_endif ...@@ -512,7 +512,7 @@ alternative_endif
*/ */
.macro reset_pmuserenr_el0, tmpreg .macro reset_pmuserenr_el0, tmpreg
mrs \tmpreg, id_aa64dfr0_el1 mrs \tmpreg, id_aa64dfr0_el1
sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVer_SHIFT, #4 sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
cmp \tmpreg, #1 // Skip if no PMU present cmp \tmpreg, #1 // Skip if no PMU present
b.lt 9000f b.lt 9000f
msr pmuserenr_el0, xzr // Disable PMU access from EL0 msr pmuserenr_el0, xzr // Disable PMU access from EL0
......
...@@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap) ...@@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
u64 mask = GENMASK_ULL(field + 3, field); u64 mask = GENMASK_ULL(field + 3, field);
/* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
if (val == ID_AA64DFR0_PMUVer_IMP_DEF) if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
val = 0; val = 0;
if (val > cap) { if (val > cap) {
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
.macro __init_el2_debug .macro __init_el2_debug
mrs x1, id_aa64dfr0_el1 mrs x1, id_aa64dfr0_el1
sbfx x0, x1, #ID_AA64DFR0_PMUVer_SHIFT, #4 sbfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
cmp x0, #1 cmp x0, #1
b.lt .Lskip_pmu_\@ // Skip if no PMU present b.lt .Lskip_pmu_\@ // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps mrs x0, pmcr_el0 // Disable debug access traps
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
csel x2, xzr, x0, lt // all PMU counters from EL1 csel x2, xzr, x0, lt // all PMU counters from EL1
/* Statistical profiling */ /* Statistical profiling */
ubfx x0, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4 ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
cbz x0, .Lskip_spe_\@ // Skip if SPE not present cbz x0, .Lskip_spe_\@ // Skip if SPE not present
mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
.Lskip_spe_\@: .Lskip_spe_\@:
/* Trace buffer */ /* Trace buffer */
ubfx x0, x1, #ID_AA64DFR0_TraceBuffer_SHIFT, #4 ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4
cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present
mrs_s x0, SYS_TRBIDR_EL1 mrs_s x0, SYS_TRBIDR_EL1
...@@ -137,7 +137,7 @@ ...@@ -137,7 +137,7 @@
mov x0, xzr mov x0, xzr
mrs x1, id_aa64dfr0_el1 mrs x1, id_aa64dfr0_el1
ubfx x1, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4 ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
cmp x1, #3 cmp x1, #3
b.lt .Lset_debug_fgt_\@ b.lt .Lset_debug_fgt_\@
/* Disable PMSNEVFR_EL1 read and write traps */ /* Disable PMSNEVFR_EL1 read and write traps */
......
...@@ -142,7 +142,7 @@ static inline int get_num_brps(void) ...@@ -142,7 +142,7 @@ static inline int get_num_brps(void)
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 + return 1 +
cpuid_feature_extract_unsigned_field(dfr0, cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_BRPs_SHIFT); ID_AA64DFR0_EL1_BRPs_SHIFT);
} }
/* Determine number of WRP registers available. */ /* Determine number of WRP registers available. */
...@@ -151,7 +151,7 @@ static inline int get_num_wrps(void) ...@@ -151,7 +151,7 @@ static inline int get_num_wrps(void)
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 + return 1 +
cpuid_feature_extract_unsigned_field(dfr0, cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_WRPs_SHIFT); ID_AA64DFR0_EL1_WRPs_SHIFT);
} }
#endif /* __ASM_BREAKPOINT_H */ #endif /* __ASM_BREAKPOINT_H */
...@@ -699,27 +699,27 @@ ...@@ -699,27 +699,27 @@
#endif #endif
/* id_aa64dfr0 */ /* id_aa64dfr0 */
#define ID_AA64DFR0_MTPMU_SHIFT 48 #define ID_AA64DFR0_EL1_MTPMU_SHIFT 48
#define ID_AA64DFR0_TraceBuffer_SHIFT 44 #define ID_AA64DFR0_EL1_TraceBuffer_SHIFT 44
#define ID_AA64DFR0_TraceFilt_SHIFT 40 #define ID_AA64DFR0_EL1_TraceFilt_SHIFT 40
#define ID_AA64DFR0_DoubleLock_SHIFT 36 #define ID_AA64DFR0_EL1_DoubleLock_SHIFT 36
#define ID_AA64DFR0_PMSVer_SHIFT 32 #define ID_AA64DFR0_EL1_PMSVer_SHIFT 32
#define ID_AA64DFR0_CTX_CMPs_SHIFT 28 #define ID_AA64DFR0_EL1_CTX_CMPs_SHIFT 28
#define ID_AA64DFR0_WRPs_SHIFT 20 #define ID_AA64DFR0_EL1_WRPs_SHIFT 20
#define ID_AA64DFR0_BRPs_SHIFT 12 #define ID_AA64DFR0_EL1_BRPs_SHIFT 12
#define ID_AA64DFR0_PMUVer_SHIFT 8 #define ID_AA64DFR0_EL1_PMUVer_SHIFT 8
#define ID_AA64DFR0_TraceVer_SHIFT 4 #define ID_AA64DFR0_EL1_TraceVer_SHIFT 4
#define ID_AA64DFR0_DebugVer_SHIFT 0 #define ID_AA64DFR0_EL1_DebugVer_SHIFT 0
#define ID_AA64DFR0_PMUVer_8_0 0x1 #define ID_AA64DFR0_EL1_PMUVer_8_0 0x1
#define ID_AA64DFR0_PMUVer_8_1 0x4 #define ID_AA64DFR0_EL1_PMUVer_8_1 0x4
#define ID_AA64DFR0_PMUVer_8_4 0x5 #define ID_AA64DFR0_EL1_PMUVer_8_4 0x5
#define ID_AA64DFR0_PMUVer_8_5 0x6 #define ID_AA64DFR0_EL1_PMUVer_8_5 0x6
#define ID_AA64DFR0_PMUVer_8_7 0x7 #define ID_AA64DFR0_EL1_PMUVer_8_7 0x7
#define ID_AA64DFR0_PMUVer_IMP_DEF 0xf #define ID_AA64DFR0_EL1_PMUVer_IMP_DEF 0xf
#define ID_AA64DFR0_PMSVer_8_2 0x1 #define ID_AA64DFR0_EL1_PMSVer_8_2 0x1
#define ID_AA64DFR0_PMSVer_8_3 0x2 #define ID_AA64DFR0_EL1_PMSVer_8_3 0x2
#define ID_DFR0_PERFMON_SHIFT 24 #define ID_DFR0_PERFMON_SHIFT 24
......
...@@ -434,17 +434,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr0[] = { ...@@ -434,17 +434,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
}; };
static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DoubleLock_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_DoubleLock_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVer_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_PMSVer_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPs_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_CTX_CMPs_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPs_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_WRPs_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPs_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_BRPs_SHIFT, 4, 0),
/* /*
* We can instantiate multiple PMU instances with different levels * We can instantiate multiple PMU instances with different levels
* of support. * of support.
*/ */
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVer_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DebugVer_SHIFT, 4, 0x6), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_EL1_DebugVer_SHIFT, 4, 0x6),
ARM64_FTR_END, ARM64_FTR_END,
}; };
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
u8 debug_monitors_arch(void) u8 debug_monitors_arch(void)
{ {
return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1), return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1),
ID_AA64DFR0_DebugVer_SHIFT); ID_AA64DFR0_EL1_DebugVer_SHIFT);
} }
/* /*
......
...@@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = { ...@@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = {
*/ */
static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
{ {
return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVer_8_5); return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_5);
} }
static inline bool armv8pmu_event_has_user_read(struct perf_event *event) static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
...@@ -1145,8 +1145,8 @@ static void __armv8pmu_probe_pmu(void *info) ...@@ -1145,8 +1145,8 @@ static void __armv8pmu_probe_pmu(void *info)
dfr0 = read_sysreg(id_aa64dfr0_el1); dfr0 = read_sysreg(id_aa64dfr0_el1);
pmuver = cpuid_feature_extract_unsigned_field(dfr0, pmuver = cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_PMUVer_SHIFT); ID_AA64DFR0_EL1_PMUVer_SHIFT);
if (pmuver == ID_AA64DFR0_PMUVer_IMP_DEF || pmuver == 0) if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || pmuver == 0)
return; return;
cpu_pmu->pmuver = pmuver; cpu_pmu->pmuver = pmuver;
...@@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info) ...@@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info)
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
/* store PMMIR_EL1 register for sysfs */ /* store PMMIR_EL1 register for sysfs */
if (pmuver >= ID_AA64DFR0_PMUVer_8_4 && (pmceid_raw[1] & BIT(31))) if (pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4 && (pmceid_raw[1] & BIT(31)))
cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
else else
cpu_pmu->reg_pmmir = 0; cpu_pmu->reg_pmmir = 0;
......
...@@ -295,12 +295,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) ...@@ -295,12 +295,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
* If SPE is present on this CPU and is available at current EL, * If SPE is present on this CPU and is available at current EL,
* we may need to check if the host state needs to be saved. * we may need to check if the host state needs to be saved.
*/ */
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVer_SHIFT) && if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
!(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
/* Check if we have TRBE implemented and available at the host */ /* Check if we have TRBE implemented and available at the host */
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TraceBuffer_SHIFT) && if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
!(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG)) !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE); vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
} }
......
...@@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) ...@@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
u64 cptr_set = 0; u64 cptr_set = 0;
/* Trap/constrain PMU */ /* Trap/constrain PMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVer), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME | mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
MDCR_EL2_HPMN_MASK; MDCR_EL2_HPMN_MASK;
} }
/* Trap Debug */ /* Trap Debug */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE; mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
/* Trap OS Double Lock */ /* Trap OS Double Lock */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DoubleLock), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
mdcr_set |= MDCR_EL2_TDOSA; mdcr_set |= MDCR_EL2_TDOSA;
/* Trap SPE */ /* Trap SPE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
mdcr_set |= MDCR_EL2_TPMS; mdcr_set |= MDCR_EL2_TPMS;
mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
} }
/* Trap Trace Filter */ /* Trap Trace Filter */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceFilt), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
mdcr_set |= MDCR_EL2_TTRF; mdcr_set |= MDCR_EL2_TTRF;
/* Trap Trace */ /* Trap Trace */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceVer), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
cptr_set |= CPTR_EL2_TTA; cptr_set |= CPTR_EL2_TTA;
vcpu->arch.mdcr_el2 |= mdcr_set; vcpu->arch.mdcr_el2 |= mdcr_set;
......
...@@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) ...@@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
pmuver = kvm->arch.arm_pmu->pmuver; pmuver = kvm->arch.arm_pmu->pmuver;
switch (pmuver) { switch (pmuver) {
case ID_AA64DFR0_PMUVer_8_0: case ID_AA64DFR0_EL1_PMUVer_8_0:
return GENMASK(9, 0); return GENMASK(9, 0);
case ID_AA64DFR0_PMUVer_8_1: case ID_AA64DFR0_EL1_PMUVer_8_1:
case ID_AA64DFR0_PMUVer_8_4: case ID_AA64DFR0_EL1_PMUVer_8_4:
case ID_AA64DFR0_PMUVer_8_5: case ID_AA64DFR0_EL1_PMUVer_8_5:
case ID_AA64DFR0_PMUVer_8_7: case ID_AA64DFR0_EL1_PMUVer_8_7:
return GENMASK(15, 0); return GENMASK(15, 0);
default: /* Shouldn't be here, just for sanity */ default: /* Shouldn't be here, just for sanity */
WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
...@@ -774,7 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu) ...@@ -774,7 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
{ {
struct arm_pmu_entry *entry; struct arm_pmu_entry *entry;
if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVer_IMP_DEF) if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
return; return;
mutex_lock(&arm_pmus_lock); mutex_lock(&arm_pmus_lock);
...@@ -828,7 +828,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void) ...@@ -828,7 +828,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
if (event->pmu) { if (event->pmu) {
pmu = to_arm_pmu(event->pmu); pmu = to_arm_pmu(event->pmu);
if (pmu->pmuver == 0 || if (pmu->pmuver == 0 ||
pmu->pmuver == ID_AA64DFR0_PMUVer_IMP_DEF) pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
pmu = NULL; pmu = NULL;
} }
...@@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) ...@@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
* Don't advertise STALL_SLOT, as PMMIR_EL0 is handled * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
* as RAZ * as RAZ
*/ */
if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVer_8_4) if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4)
val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
base = 32; base = 32;
} }
......
...@@ -1110,14 +1110,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, ...@@ -1110,14 +1110,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
break; break;
case SYS_ID_AA64DFR0_EL1: case SYS_ID_AA64DFR0_EL1:
/* Limit debug to ARMv8.0 */ /* Limit debug to ARMv8.0 */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer); val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), 6); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
/* Limit guests to PMUv3 for ARMv8.4 */ /* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val, val = cpuid_feature_cap_perfmon_field(val,
ID_AA64DFR0_PMUVer_SHIFT, ID_AA64DFR0_EL1_PMUVer_SHIFT,
kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVer_8_4 : 0); kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_8_4 : 0);
/* Hide SPE from guests */ /* Hide SPE from guests */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer); val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
break; break;
case SYS_ID_DFR0_EL1: case SYS_ID_DFR0_EL1:
/* Limit guests to PMUv3 for ARMv8.4 */ /* Limit guests to PMUv3 for ARMv8.4 */
...@@ -1827,9 +1827,9 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, ...@@ -1827,9 +1827,9 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT); u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
p->regval = ((((dfr >> ID_AA64DFR0_WRPs_SHIFT) & 0xf) << 28) | p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
(((dfr >> ID_AA64DFR0_BRPs_SHIFT) & 0xf) << 24) | (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
(((dfr >> ID_AA64DFR0_CTX_CMPs_SHIFT) & 0xf) << 20) (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
| (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment