Commit b04b3315 authored by Marc Zyngier's avatar Marc Zyngier

Merge remote-tracking branch 'arm64/for-next/sysreg' into kvmarm-master/next

Merge arm64/for-next/sysreg in order to avoid upstream conflicts
due to the never ending sysreg repainting...
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents c317c6d2 10453bf1
...@@ -384,8 +384,8 @@ alternative_cb_end ...@@ -384,8 +384,8 @@ alternative_cb_end
.macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1 .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
mrs \tmp0, ID_AA64MMFR0_EL1 mrs \tmp0, ID_AA64MMFR0_EL1
// Narrow PARange to fit the PS field in TCR_ELx // Narrow PARange to fit the PS field in TCR_ELx
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3 ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
cmp \tmp0, \tmp1 cmp \tmp0, \tmp1
csel \tmp0, \tmp1, \tmp0, hi csel \tmp0, \tmp1, \tmp0, hi
bfi \tcr, \tmp0, \pos, #3 bfi \tcr, \tmp0, \pos, #3
...@@ -512,7 +512,7 @@ alternative_endif ...@@ -512,7 +512,7 @@ alternative_endif
*/ */
.macro reset_pmuserenr_el0, tmpreg .macro reset_pmuserenr_el0, tmpreg
mrs \tmpreg, id_aa64dfr0_el1 mrs \tmpreg, id_aa64dfr0_el1
sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4 sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
cmp \tmpreg, #1 // Skip if no PMU present cmp \tmpreg, #1 // Skip if no PMU present
b.lt 9000f b.lt 9000f
msr pmuserenr_el0, xzr // Disable PMU access from EL0 msr pmuserenr_el0, xzr // Disable PMU access from EL0
...@@ -524,7 +524,7 @@ alternative_endif ...@@ -524,7 +524,7 @@ alternative_endif
*/ */
.macro reset_amuserenr_el0, tmpreg .macro reset_amuserenr_el0, tmpreg
mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1 mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1
ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4 ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
cbz \tmpreg, .Lskip_\@ // Skip if no AMU present cbz \tmpreg, .Lskip_\@ // Skip if no AMU present
msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0 msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0
.Lskip_\@: .Lskip_\@:
...@@ -612,7 +612,7 @@ alternative_endif ...@@ -612,7 +612,7 @@ alternative_endif
.macro offset_ttbr1, ttbr, tmp .macro offset_ttbr1, ttbr, tmp
#ifdef CONFIG_ARM64_VA_BITS_52 #ifdef CONFIG_ARM64_VA_BITS_52
mrs_s \tmp, SYS_ID_AA64MMFR2_EL1 mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT) and \tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
cbnz \tmp, .Lskipoffs_\@ cbnz \tmp, .Lskipoffs_\@
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
.Lskipoffs_\@ : .Lskipoffs_\@ :
......
...@@ -45,10 +45,6 @@ static inline unsigned int arch_slab_minalign(void) ...@@ -45,10 +45,6 @@ static inline unsigned int arch_slab_minalign(void)
#define arch_slab_minalign() arch_slab_minalign() #define arch_slab_minalign() arch_slab_minalign()
#endif #endif
#define CTR_CACHE_MINLINE_MASK \
(0xf << CTR_EL0_DMINLINE_SHIFT | \
CTR_EL0_IMINLINE_MASK << CTR_EL0_IMINLINE_SHIFT)
#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr) #define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
#define ICACHEF_ALIASING 0 #define ICACHEF_ALIASING 0
......
...@@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap) ...@@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
u64 mask = GENMASK_ULL(field + 3, field); u64 mask = GENMASK_ULL(field + 3, field);
/* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
if (val == ID_AA64DFR0_PMUVER_IMP_DEF) if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
val = 0; val = 0;
if (val > cap) { if (val > cap) {
...@@ -597,43 +597,43 @@ static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) ...@@ -597,43 +597,43 @@ static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
{ {
return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 || return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGEND_SHIFT) == 0x1 ||
cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1; cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1;
} }
static inline bool id_aa64pfr0_32bit_el1(u64 pfr0) static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
return val == ID_AA64PFR0_ELx_32BIT_64BIT; return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
} }
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
return val == ID_AA64PFR0_ELx_32BIT_64BIT; return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
} }
static inline bool id_aa64pfr0_sve(u64 pfr0) static inline bool id_aa64pfr0_sve(u64 pfr0)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT);
return val > 0; return val > 0;
} }
static inline bool id_aa64pfr1_sme(u64 pfr1) static inline bool id_aa64pfr1_sme(u64 pfr1)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT);
return val > 0; return val > 0;
} }
static inline bool id_aa64pfr1_mte(u64 pfr1) static inline bool id_aa64pfr1_mte(u64 pfr1)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
return val >= ID_AA64PFR1_MTE; return val >= ID_AA64PFR1_EL1_MTE_MTE2;
} }
void __init setup_cpu_features(void); void __init setup_cpu_features(void);
...@@ -659,7 +659,7 @@ static inline bool supports_csv2p3(int scope) ...@@ -659,7 +659,7 @@ static inline bool supports_csv2p3(int scope)
pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
csv2_val = cpuid_feature_extract_unsigned_field(pfr0, csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
ID_AA64PFR0_CSV2_SHIFT); ID_AA64PFR0_EL1_CSV2_SHIFT);
return csv2_val == 3; return csv2_val == 3;
} }
...@@ -694,10 +694,10 @@ static inline bool system_supports_4kb_granule(void) ...@@ -694,10 +694,10 @@ static inline bool system_supports_4kb_granule(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0, val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN4_SHIFT); ID_AA64MMFR0_EL1_TGRAN4_SHIFT);
return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) && return (val >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX); (val <= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX);
} }
static inline bool system_supports_64kb_granule(void) static inline bool system_supports_64kb_granule(void)
...@@ -707,10 +707,10 @@ static inline bool system_supports_64kb_granule(void) ...@@ -707,10 +707,10 @@ static inline bool system_supports_64kb_granule(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0, val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN64_SHIFT); ID_AA64MMFR0_EL1_TGRAN64_SHIFT);
return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) && return (val >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX); (val <= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX);
} }
static inline bool system_supports_16kb_granule(void) static inline bool system_supports_16kb_granule(void)
...@@ -720,10 +720,10 @@ static inline bool system_supports_16kb_granule(void) ...@@ -720,10 +720,10 @@ static inline bool system_supports_16kb_granule(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0, val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN16_SHIFT); ID_AA64MMFR0_EL1_TGRAN16_SHIFT);
return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) && return (val >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX); (val <= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX);
} }
static inline bool system_supports_mixed_endian_el0(void) static inline bool system_supports_mixed_endian_el0(void)
...@@ -738,7 +738,7 @@ static inline bool system_supports_mixed_endian(void) ...@@ -738,7 +738,7 @@ static inline bool system_supports_mixed_endian(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0, val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_BIGENDEL_SHIFT); ID_AA64MMFR0_EL1_BIGEND_SHIFT);
return val == 0x1; return val == 0x1;
} }
...@@ -840,13 +840,13 @@ extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); ...@@ -840,13 +840,13 @@ extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
{ {
switch (parange) { switch (parange) {
case ID_AA64MMFR0_PARANGE_32: return 32; case ID_AA64MMFR0_EL1_PARANGE_32: return 32;
case ID_AA64MMFR0_PARANGE_36: return 36; case ID_AA64MMFR0_EL1_PARANGE_36: return 36;
case ID_AA64MMFR0_PARANGE_40: return 40; case ID_AA64MMFR0_EL1_PARANGE_40: return 40;
case ID_AA64MMFR0_PARANGE_42: return 42; case ID_AA64MMFR0_EL1_PARANGE_42: return 42;
case ID_AA64MMFR0_PARANGE_44: return 44; case ID_AA64MMFR0_EL1_PARANGE_44: return 44;
case ID_AA64MMFR0_PARANGE_48: return 48; case ID_AA64MMFR0_EL1_PARANGE_48: return 48;
case ID_AA64MMFR0_PARANGE_52: return 52; case ID_AA64MMFR0_EL1_PARANGE_52: return 52;
/* /*
* A future PE could use a value unknown to the kernel. * A future PE could use a value unknown to the kernel.
* However, by the "D10.1.4 Principles of the ID scheme * However, by the "D10.1.4 Principles of the ID scheme
...@@ -868,14 +868,14 @@ static inline bool cpu_has_hw_af(void) ...@@ -868,14 +868,14 @@ static inline bool cpu_has_hw_af(void)
mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1, return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_HADBS_SHIFT); ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
} }
static inline bool cpu_has_pan(void) static inline bool cpu_has_pan(void)
{ {
u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1, return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_PAN_SHIFT); ID_AA64MMFR1_EL1_PAN_SHIFT);
} }
#ifdef CONFIG_ARM64_AMU_EXTN #ifdef CONFIG_ARM64_AMU_EXTN
...@@ -896,8 +896,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1) ...@@ -896,8 +896,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
int vmid_bits; int vmid_bits;
vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1, vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_VMIDBITS_SHIFT); ID_AA64MMFR1_EL1_VMIDBits_SHIFT);
if (vmid_bits == ID_AA64MMFR1_VMIDBITS_16) if (vmid_bits == ID_AA64MMFR1_EL1_VMIDBits_16)
return 16; return 16;
/* /*
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
.macro __init_el2_debug .macro __init_el2_debug
mrs x1, id_aa64dfr0_el1 mrs x1, id_aa64dfr0_el1
sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4 sbfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
cmp x0, #1 cmp x0, #1
b.lt .Lskip_pmu_\@ // Skip if no PMU present b.lt .Lskip_pmu_\@ // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps mrs x0, pmcr_el0 // Disable debug access traps
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
csel x2, xzr, x0, lt // all PMU counters from EL1 csel x2, xzr, x0, lt // all PMU counters from EL1
/* Statistical profiling */ /* Statistical profiling */
ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
cbz x0, .Lskip_spe_\@ // Skip if SPE not present cbz x0, .Lskip_spe_\@ // Skip if SPE not present
mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
.Lskip_spe_\@: .Lskip_spe_\@:
/* Trace buffer */ /* Trace buffer */
ubfx x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4 ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4
cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present
mrs_s x0, SYS_TRBIDR_EL1 mrs_s x0, SYS_TRBIDR_EL1
...@@ -83,7 +83,7 @@ ...@@ -83,7 +83,7 @@
/* LORegions */ /* LORegions */
.macro __init_el2_lor .macro __init_el2_lor
mrs x1, id_aa64mmfr1_el1 mrs x1, id_aa64mmfr1_el1
ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4 ubfx x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4
cbz x0, .Lskip_lor_\@ cbz x0, .Lskip_lor_\@
msr_s SYS_LORC_EL1, xzr msr_s SYS_LORC_EL1, xzr
.Lskip_lor_\@: .Lskip_lor_\@:
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
/* GICv3 system register access */ /* GICv3 system register access */
.macro __init_el2_gicv3 .macro __init_el2_gicv3
mrs x0, id_aa64pfr0_el1 mrs x0, id_aa64pfr0_el1
ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4 ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4
cbz x0, .Lskip_gicv3_\@ cbz x0, .Lskip_gicv3_\@
mrs_s x0, SYS_ICC_SRE_EL2 mrs_s x0, SYS_ICC_SRE_EL2
...@@ -132,12 +132,12 @@ ...@@ -132,12 +132,12 @@
/* Disable any fine grained traps */ /* Disable any fine grained traps */
.macro __init_el2_fgt .macro __init_el2_fgt
mrs x1, id_aa64mmfr0_el1 mrs x1, id_aa64mmfr0_el1
ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4 ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4
cbz x1, .Lskip_fgt_\@ cbz x1, .Lskip_fgt_\@
mov x0, xzr mov x0, xzr
mrs x1, id_aa64dfr0_el1 mrs x1, id_aa64dfr0_el1
ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
cmp x1, #3 cmp x1, #3
b.lt .Lset_debug_fgt_\@ b.lt .Lset_debug_fgt_\@
/* Disable PMSNEVFR_EL1 read and write traps */ /* Disable PMSNEVFR_EL1 read and write traps */
...@@ -149,7 +149,7 @@ ...@@ -149,7 +149,7 @@
mov x0, xzr mov x0, xzr
mrs x1, id_aa64pfr1_el1 mrs x1, id_aa64pfr1_el1
ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4 ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
cbz x1, .Lset_fgt_\@ cbz x1, .Lset_fgt_\@
/* Disable nVHE traps of TPIDR2 and SMPRI */ /* Disable nVHE traps of TPIDR2 and SMPRI */
...@@ -162,7 +162,7 @@ ...@@ -162,7 +162,7 @@
msr_s SYS_HFGITR_EL2, xzr msr_s SYS_HFGITR_EL2, xzr
mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
ubfx x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4 ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
cbz x1, .Lskip_fgt_\@ cbz x1, .Lskip_fgt_\@
msr_s SYS_HAFGRTR_EL2, xzr msr_s SYS_HAFGRTR_EL2, xzr
......
...@@ -142,7 +142,7 @@ static inline int get_num_brps(void) ...@@ -142,7 +142,7 @@ static inline int get_num_brps(void)
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 + return 1 +
cpuid_feature_extract_unsigned_field(dfr0, cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_BRPS_SHIFT); ID_AA64DFR0_EL1_BRPs_SHIFT);
} }
/* Determine number of WRP registers available. */ /* Determine number of WRP registers available. */
...@@ -151,7 +151,7 @@ static inline int get_num_wrps(void) ...@@ -151,7 +151,7 @@ static inline int get_num_wrps(void)
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 + return 1 +
cpuid_feature_extract_unsigned_field(dfr0, cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_WRPS_SHIFT); ID_AA64DFR0_EL1_WRPs_SHIFT);
} }
#endif /* __ASM_BREAKPOINT_H */ #endif /* __ASM_BREAKPOINT_H */
...@@ -16,9 +16,9 @@ ...@@ -16,9 +16,9 @@
static inline u64 kvm_get_parange(u64 mmfr0) static inline u64 kvm_get_parange(u64 mmfr0)
{ {
u64 parange = cpuid_feature_extract_unsigned_field(mmfr0, u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_PARANGE_SHIFT); ID_AA64MMFR0_EL1_PARANGE_SHIFT);
if (parange > ID_AA64MMFR0_PARANGE_MAX) if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX)
parange = ID_AA64MMFR0_PARANGE_MAX; parange = ID_AA64MMFR0_EL1_PARANGE_MAX;
return parange; return parange;
} }
......
...@@ -190,19 +190,6 @@ ...@@ -190,19 +190,6 @@
#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) #define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2) #define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2)
#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4)
#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5)
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
...@@ -436,19 +423,11 @@ ...@@ -436,19 +423,11 @@
#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6) #define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) #define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7)
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) #define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
#define SMIDR_EL1_IMPLEMENTER_SHIFT 24
#define SMIDR_EL1_SMPS_SHIFT 15
#define SMIDR_EL1_AFFINITY_SHIFT 0
#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) #define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) #define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
...@@ -537,7 +516,6 @@ ...@@ -537,7 +516,6 @@
#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5) #define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5)
#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6) #define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
#define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2)
#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) #define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) #define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
...@@ -690,164 +668,30 @@ ...@@ -690,164 +668,30 @@
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
/* id_aa64pfr0 */ /* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1
#define ID_AA64PFR0_CSV2_SHIFT 56 #define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2
#define ID_AA64PFR0_DIT_SHIFT 48
#define ID_AA64PFR0_AMU_SHIFT 44
#define ID_AA64PFR0_MPAM_SHIFT 40
#define ID_AA64PFR0_SEL2_SHIFT 36
#define ID_AA64PFR0_SVE_SHIFT 32
#define ID_AA64PFR0_RAS_SHIFT 28
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16
#define ID_AA64PFR0_EL3_SHIFT 12
#define ID_AA64PFR0_EL2_SHIFT 8
#define ID_AA64PFR0_EL1_SHIFT 4
#define ID_AA64PFR0_EL0_SHIFT 0
#define ID_AA64PFR0_AMU 0x1
#define ID_AA64PFR0_SVE 0x1
#define ID_AA64PFR0_RAS_V1 0x1
#define ID_AA64PFR0_RAS_V1P1 0x2
#define ID_AA64PFR0_FP_NI 0xf
#define ID_AA64PFR0_FP_SUPPORTED 0x0
#define ID_AA64PFR0_ASIMD_NI 0xf
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1
#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2
/* id_aa64pfr1 */
#define ID_AA64PFR1_SME_SHIFT 24
#define ID_AA64PFR1_MPAMFRAC_SHIFT 16
#define ID_AA64PFR1_RASFRAC_SHIFT 12
#define ID_AA64PFR1_MTE_SHIFT 8
#define ID_AA64PFR1_SSBS_SHIFT 4
#define ID_AA64PFR1_BT_SHIFT 0
#define ID_AA64PFR1_SSBS_PSTATE_NI 0
#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
#define ID_AA64PFR1_BT_BTI 0x1
#define ID_AA64PFR1_SME 1
#define ID_AA64PFR1_MTE_NI 0x0
#define ID_AA64PFR1_MTE_EL0 0x1
#define ID_AA64PFR1_MTE 0x2
#define ID_AA64PFR1_MTE_ASYMM 0x3
/* id_aa64mmfr0 */ /* id_aa64mmfr0 */
#define ID_AA64MMFR0_ECV_SHIFT 60 #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_FGT_SHIFT 56 #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_EXS_SHIFT 44 #define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40 #define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36 #define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1
#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32 #define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
#define ID_AA64MMFR0_TGRAN16_SHIFT 20
#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
#define ID_AA64MMFR0_SNSMEM_SHIFT 12
#define ID_AA64MMFR0_BIGENDEL_SHIFT 8
#define ID_AA64MMFR0_ASID_SHIFT 4
#define ID_AA64MMFR0_PARANGE_SHIFT 0
#define ID_AA64MMFR0_ASID_8 0x0
#define ID_AA64MMFR0_ASID_16 0x2
#define ID_AA64MMFR0_TGRAN4_NI 0xf
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN64_NI 0xf
#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN16_NI 0x0
#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1
#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf
#define ID_AA64MMFR0_PARANGE_32 0x0
#define ID_AA64MMFR0_PARANGE_36 0x1
#define ID_AA64MMFR0_PARANGE_40 0x2
#define ID_AA64MMFR0_PARANGE_42 0x3
#define ID_AA64MMFR0_PARANGE_44 0x4
#define ID_AA64MMFR0_PARANGE_48 0x5
#define ID_AA64MMFR0_PARANGE_52 0x6
#define ARM64_MIN_PARANGE_BITS 32 #define ARM64_MIN_PARANGE_BITS 32
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
#ifdef CONFIG_ARM64_PA_BITS_52 #ifdef CONFIG_ARM64_PA_BITS_52
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52 #define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
#else #else
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48 #define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
#endif #endif
/* id_aa64mmfr1 */
#define ID_AA64MMFR1_ECBHB_SHIFT 60
#define ID_AA64MMFR1_TIDCP1_SHIFT 52
#define ID_AA64MMFR1_HCX_SHIFT 40
#define ID_AA64MMFR1_AFP_SHIFT 44
#define ID_AA64MMFR1_ETS_SHIFT 36
#define ID_AA64MMFR1_TWED_SHIFT 32
#define ID_AA64MMFR1_XNX_SHIFT 28
#define ID_AA64MMFR1_SPECSEI_SHIFT 24
#define ID_AA64MMFR1_PAN_SHIFT 20
#define ID_AA64MMFR1_LOR_SHIFT 16
#define ID_AA64MMFR1_HPD_SHIFT 12
#define ID_AA64MMFR1_VHE_SHIFT 8
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
#define ID_AA64MMFR1_HADBS_SHIFT 0
#define ID_AA64MMFR1_VMIDBITS_8 0
#define ID_AA64MMFR1_VMIDBITS_16 2
#define ID_AA64MMFR1_TIDCP1_NI 0
#define ID_AA64MMFR1_TIDCP1_IMP 1
/* id_aa64mmfr2 */
#define ID_AA64MMFR2_E0PD_SHIFT 60
#define ID_AA64MMFR2_EVT_SHIFT 56
#define ID_AA64MMFR2_BBM_SHIFT 52
#define ID_AA64MMFR2_TTL_SHIFT 48
#define ID_AA64MMFR2_FWB_SHIFT 40
#define ID_AA64MMFR2_IDS_SHIFT 36
#define ID_AA64MMFR2_AT_SHIFT 32
#define ID_AA64MMFR2_ST_SHIFT 28
#define ID_AA64MMFR2_NV_SHIFT 24
#define ID_AA64MMFR2_CCIDX_SHIFT 20
#define ID_AA64MMFR2_LVA_SHIFT 16
#define ID_AA64MMFR2_IESB_SHIFT 12
#define ID_AA64MMFR2_LSM_SHIFT 8
#define ID_AA64MMFR2_UAO_SHIFT 4
#define ID_AA64MMFR2_CNP_SHIFT 0
/* id_aa64dfr0 */
#define ID_AA64DFR0_MTPMU_SHIFT 48
#define ID_AA64DFR0_TRBE_SHIFT 44
#define ID_AA64DFR0_TRACE_FILT_SHIFT 40
#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
#define ID_AA64DFR0_PMSVER_SHIFT 32
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
#define ID_AA64DFR0_WRPS_SHIFT 20
#define ID_AA64DFR0_BRPS_SHIFT 12
#define ID_AA64DFR0_PMUVER_SHIFT 8
#define ID_AA64DFR0_TRACEVER_SHIFT 4
#define ID_AA64DFR0_DEBUGVER_SHIFT 0
#define ID_AA64DFR0_PMUVER_8_0 0x1
#define ID_AA64DFR0_PMUVER_8_1 0x4
#define ID_AA64DFR0_PMUVER_8_4 0x5
#define ID_AA64DFR0_PMUVER_8_5 0x6
#define ID_AA64DFR0_PMUVER_8_7 0x7
#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf
#define ID_AA64DFR0_PMSVER_8_2 0x1
#define ID_AA64DFR0_PMSVER_8_3 0x2
#define ID_DFR0_PERFMON_SHIFT 24 #define ID_DFR0_PERFMON_SHIFT 24
#define ID_DFR0_PERFMON_8_0 0x3 #define ID_DFR0_PERFMON_8_0 0x3
...@@ -955,20 +799,20 @@ ...@@ -955,20 +799,20 @@
#define ID_PFR1_PROGMOD_SHIFT 0 #define ID_PFR1_PROGMOD_SHIFT 0
#if defined(CONFIG_ARM64_4K_PAGES) #if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN4_2_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT
#elif defined(CONFIG_ARM64_16K_PAGES) #elif defined(CONFIG_ARM64_16K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN16_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN16_2_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT
#elif defined(CONFIG_ARM64_64K_PAGES) #elif defined(CONFIG_ARM64_64K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN64_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN64_2_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT
#endif #endif
#define MVFR2_FPMISC_SHIFT 4 #define MVFR2_FPMISC_SHIFT 4
...@@ -1028,9 +872,6 @@ ...@@ -1028,9 +872,6 @@
#define TRFCR_ELx_ExTRE BIT(1) #define TRFCR_ELx_ExTRE BIT(1)
#define TRFCR_ELx_E0TRE BIT(0) #define TRFCR_ELx_E0TRE BIT(0)
/* HCRX_EL2 definitions */
#define HCRX_EL2_SMPME_MASK (1 << 5)
/* GIC Hypervisor interface registers */ /* GIC Hypervisor interface registers */
/* ICH_MISR_EL2 bit definitions */ /* ICH_MISR_EL2 bit definitions */
#define ICH_MISR_EOI (1 << 0) #define ICH_MISR_EOI (1 << 0)
......
This diff is collapsed.
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
u8 debug_monitors_arch(void) u8 debug_monitors_arch(void)
{ {
return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1), return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1),
ID_AA64DFR0_DEBUGVER_SHIFT); ID_AA64DFR0_EL1_DebugVer_SHIFT);
} }
/* /*
......
...@@ -99,7 +99,7 @@ SYM_CODE_START(primary_entry) ...@@ -99,7 +99,7 @@ SYM_CODE_START(primary_entry)
*/ */
#if VA_BITS > 48 #if VA_BITS > 48
mrs_s x0, SYS_ID_AA64MMFR2_EL1 mrs_s x0, SYS_ID_AA64MMFR2_EL1
tst x0, #0xf << ID_AA64MMFR2_LVA_SHIFT tst x0, #0xf << ID_AA64MMFR2_EL1_VARange_SHIFT
mov x0, #VA_BITS mov x0, #VA_BITS
mov x25, #VA_BITS_MIN mov x25, #VA_BITS_MIN
csel x25, x25, x0, eq csel x25, x25, x0, eq
...@@ -656,10 +656,10 @@ SYM_FUNC_END(__secondary_too_slow) ...@@ -656,10 +656,10 @@ SYM_FUNC_END(__secondary_too_slow)
*/ */
SYM_FUNC_START(__enable_mmu) SYM_FUNC_START(__enable_mmu)
mrs x3, ID_AA64MMFR0_EL1 mrs x3, ID_AA64MMFR0_EL1
ubfx x3, x3, #ID_AA64MMFR0_TGRAN_SHIFT, 4 ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4
cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN
b.lt __no_granule_support b.lt __no_granule_support
cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX
b.gt __no_granule_support b.gt __no_granule_support
phys_to_ttbr x2, x2 phys_to_ttbr x2, x2
msr ttbr0_el1, x2 // load TTBR0 msr ttbr0_el1, x2 // load TTBR0
...@@ -677,7 +677,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva) ...@@ -677,7 +677,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva)
b.ne 2f b.ne 2f
mrs_s x0, SYS_ID_AA64MMFR2_EL1 mrs_s x0, SYS_ID_AA64MMFR2_EL1
and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT) and x0, x0, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
cbnz x0, 2f cbnz x0, 2f
update_early_cpu_boot_status \ update_early_cpu_boot_status \
......
...@@ -98,7 +98,7 @@ SYM_CODE_START_LOCAL(elx_sync) ...@@ -98,7 +98,7 @@ SYM_CODE_START_LOCAL(elx_sync)
SYM_CODE_END(elx_sync) SYM_CODE_END(elx_sync)
SYM_CODE_START_LOCAL(__finalise_el2) SYM_CODE_START_LOCAL(__finalise_el2)
check_override id_aa64pfr0 ID_AA64PFR0_SVE_SHIFT .Linit_sve .Lskip_sve check_override id_aa64pfr0 ID_AA64PFR0_EL1_SVE_SHIFT .Linit_sve .Lskip_sve
.Linit_sve: /* SVE register access */ .Linit_sve: /* SVE register access */
mrs x0, cptr_el2 // Disable SVE traps mrs x0, cptr_el2 // Disable SVE traps
...@@ -109,7 +109,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) ...@@ -109,7 +109,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
msr_s SYS_ZCR_EL2, x1 // length for EL1. msr_s SYS_ZCR_EL2, x1 // length for EL1.
.Lskip_sve: .Lskip_sve:
check_override id_aa64pfr1 ID_AA64PFR1_SME_SHIFT .Linit_sme .Lskip_sme check_override id_aa64pfr1 ID_AA64PFR1_EL1_SME_SHIFT .Linit_sme .Lskip_sme
.Linit_sme: /* SME register access and priority mapping */ .Linit_sme: /* SME register access and priority mapping */
mrs x0, cptr_el2 // Disable SME traps mrs x0, cptr_el2 // Disable SME traps
...@@ -142,7 +142,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) ...@@ -142,7 +142,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal
mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present?
ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4 ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
cbz x1, .Lskip_sme cbz x1, .Lskip_sme
mrs_s x1, SYS_HCRX_EL2 mrs_s x1, SYS_HCRX_EL2
...@@ -157,7 +157,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) ...@@ -157,7 +157,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
tbnz x1, #0, 1f tbnz x1, #0, 1f
// Needs to be VHE capable, obviously // Needs to be VHE capable, obviously
check_override id_aa64mmfr1 ID_AA64MMFR1_VHE_SHIFT 2f 1f check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 2f 1f
1: mov_q x0, HVC_STUB_ERR 1: mov_q x0, HVC_STUB_ERR
eret eret
......
...@@ -50,7 +50,7 @@ static const struct ftr_set_desc mmfr1 __initconst = { ...@@ -50,7 +50,7 @@ static const struct ftr_set_desc mmfr1 __initconst = {
.name = "id_aa64mmfr1", .name = "id_aa64mmfr1",
.override = &id_aa64mmfr1_override, .override = &id_aa64mmfr1_override,
.fields = { .fields = {
FIELD("vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter), FIELD("vh", ID_AA64MMFR1_EL1_VH_SHIFT, mmfr1_vh_filter),
{} {}
}, },
}; };
...@@ -74,7 +74,7 @@ static const struct ftr_set_desc pfr0 __initconst = { ...@@ -74,7 +74,7 @@ static const struct ftr_set_desc pfr0 __initconst = {
.name = "id_aa64pfr0", .name = "id_aa64pfr0",
.override = &id_aa64pfr0_override, .override = &id_aa64pfr0_override,
.fields = { .fields = {
FIELD("sve", ID_AA64PFR0_SVE_SHIFT, pfr0_sve_filter), FIELD("sve", ID_AA64PFR0_EL1_SVE_SHIFT, pfr0_sve_filter),
{} {}
}, },
}; };
...@@ -98,9 +98,9 @@ static const struct ftr_set_desc pfr1 __initconst = { ...@@ -98,9 +98,9 @@ static const struct ftr_set_desc pfr1 __initconst = {
.name = "id_aa64pfr1", .name = "id_aa64pfr1",
.override = &id_aa64pfr1_override, .override = &id_aa64pfr1_override,
.fields = { .fields = {
FIELD("bt", ID_AA64PFR1_BT_SHIFT, NULL ), FIELD("bt", ID_AA64PFR1_EL1_BT_SHIFT, NULL ),
FIELD("mte", ID_AA64PFR1_MTE_SHIFT, NULL), FIELD("mte", ID_AA64PFR1_EL1_MTE_SHIFT, NULL),
FIELD("sme", ID_AA64PFR1_SME_SHIFT, pfr1_sme_filter), FIELD("sme", ID_AA64PFR1_EL1_SME_SHIFT, pfr1_sme_filter),
{} {}
}, },
}; };
......
...@@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = { ...@@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = {
*/ */
static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
{ {
return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5); return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5);
} }
static inline bool armv8pmu_event_has_user_read(struct perf_event *event) static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
...@@ -1145,8 +1145,8 @@ static void __armv8pmu_probe_pmu(void *info) ...@@ -1145,8 +1145,8 @@ static void __armv8pmu_probe_pmu(void *info)
dfr0 = read_sysreg(id_aa64dfr0_el1); dfr0 = read_sysreg(id_aa64dfr0_el1);
pmuver = cpuid_feature_extract_unsigned_field(dfr0, pmuver = cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_PMUVER_SHIFT); ID_AA64DFR0_EL1_PMUVer_SHIFT);
if (pmuver == ID_AA64DFR0_PMUVER_IMP_DEF || pmuver == 0) if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || pmuver == 0)
return; return;
cpu_pmu->pmuver = pmuver; cpu_pmu->pmuver = pmuver;
...@@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info) ...@@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info)
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
/* store PMMIR_EL1 register for sysfs */ /* store PMMIR_EL1 register for sysfs */
if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31))) if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31)))
cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
else else
cpu_pmu->reg_pmmir = 0; cpu_pmu->reg_pmmir = 0;
......
...@@ -168,7 +168,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void) ...@@ -168,7 +168,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
/* If the CPU has CSV2 set, we're safe */ /* If the CPU has CSV2 set, we're safe */
pfr0 = read_cpuid(ID_AA64PFR0_EL1); pfr0 = read_cpuid(ID_AA64PFR0_EL1);
if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT))
return SPECTRE_UNAFFECTED; return SPECTRE_UNAFFECTED;
/* Alternatively, we have a list of unaffected CPUs */ /* Alternatively, we have a list of unaffected CPUs */
...@@ -945,7 +945,7 @@ static bool supports_ecbhb(int scope) ...@@ -945,7 +945,7 @@ static bool supports_ecbhb(int scope)
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1, return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_ECBHB_SHIFT); ID_AA64MMFR1_EL1_ECBHB_SHIFT);
} }
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
......
...@@ -295,12 +295,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) ...@@ -295,12 +295,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
* If SPE is present on this CPU and is available at current EL, * If SPE is present on this CPU and is available at current EL,
* we may need to check if the host state needs to be saved. * we may need to check if the host state needs to be saved.
*/ */
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) && if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
!(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
/* Check if we have TRBE implemented and available at the host */ /* Check if we have TRBE implemented and available at the host */
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) && if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
!(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG)) !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE); vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
} }
......
...@@ -35,9 +35,9 @@ ...@@ -35,9 +35,9 @@
* - Data Independent Timing * - Data Independent Timing
*/ */
#define PVM_ID_AA64PFR0_ALLOW (\ #define PVM_ID_AA64PFR0_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64PFR0_FP) | \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD) | \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_DIT) \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
) )
/* /*
...@@ -49,11 +49,11 @@ ...@@ -49,11 +49,11 @@
* Supported by KVM * Supported by KVM
*/ */
#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\ #define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL2), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL3), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL3), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), ID_AA64PFR0_RAS_V1) \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_IMP) \
) )
/* /*
...@@ -62,8 +62,8 @@ ...@@ -62,8 +62,8 @@
* - Speculative Store Bypassing * - Speculative Store Bypassing
*/ */
#define PVM_ID_AA64PFR1_ALLOW (\ #define PVM_ID_AA64PFR1_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64PFR1_BT) | \ ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_BT) | \
ARM64_FEATURE_MASK(ID_AA64PFR1_SSBS) \ ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SSBS) \
) )
/* /*
...@@ -74,10 +74,10 @@ ...@@ -74,10 +74,10 @@
* - Non-context synchronizing exception entry and exit * - Non-context synchronizing exception entry and exit
*/ */
#define PVM_ID_AA64MMFR0_ALLOW (\ #define PVM_ID_AA64MMFR0_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGEND) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_SNSMEM) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL0) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_EXS) \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \
) )
/* /*
...@@ -86,8 +86,8 @@ ...@@ -86,8 +86,8 @@
* - 16-bit ASID * - 16-bit ASID
*/ */
#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\ #define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_PARANGE), ID_AA64MMFR0_PARANGE_40) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_ASID), ID_AA64MMFR0_ASID_16) \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASIDBITS), ID_AA64MMFR0_EL1_ASIDBITS_16) \
) )
/* /*
...@@ -100,12 +100,12 @@ ...@@ -100,12 +100,12 @@
* - Enhanced Translation Synchronization * - Enhanced Translation Synchronization
*/ */
#define PVM_ID_AA64MMFR1_ALLOW (\ #define PVM_ID_AA64MMFR1_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_VMIDBITS) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_VMIDBits) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_HPD) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HPDS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_PAN) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_PAN) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_SPECSEI) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_SpecSEI) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_ETS) \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_ETS) \
) )
/* /*
...@@ -120,14 +120,14 @@ ...@@ -120,14 +120,14 @@
* - E0PDx mechanism * - E0PDx mechanism
*/ */
#define PVM_ID_AA64MMFR2_ALLOW (\ #define PVM_ID_AA64MMFR2_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64MMFR2_CNP) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CnP) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_UAO) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_UAO) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_IESB) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IESB) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_AT) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_AT) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_IDS) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IDS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_TTL) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_TTL) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_BBM) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_BBM) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_E0PD) \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_E0PD) \
) )
/* /*
......
...@@ -20,35 +20,35 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) ...@@ -20,35 +20,35 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
u64 cptr_set = 0; u64 cptr_set = 0;
/* Protected KVM does not support AArch32 guests. */ /* Protected KVM does not support AArch32 guests. */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
/* /*
* Linux guests assume support for floating-point and Advanced SIMD. Do * Linux guests assume support for floating-point and Advanced SIMD. Do
* not change the trapping behavior for these from the KVM default. * not change the trapping behavior for these from the KVM default.
*/ */
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP), BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
PVM_ID_AA64PFR0_ALLOW)); PVM_ID_AA64PFR0_ALLOW));
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD), BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
PVM_ID_AA64PFR0_ALLOW)); PVM_ID_AA64PFR0_ALLOW));
/* Trap RAS unless all current versions are supported */ /* Trap RAS unless all current versions are supported */
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), feature_ids) < if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
ID_AA64PFR0_RAS_V1P1) { ID_AA64PFR0_EL1_RAS_V1P1) {
hcr_set |= HCR_TERR | HCR_TEA; hcr_set |= HCR_TERR | HCR_TEA;
hcr_clear |= HCR_FIEN; hcr_clear |= HCR_FIEN;
} }
/* Trap AMU */ /* Trap AMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_AMU), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
hcr_clear |= HCR_AMVOFFEN; hcr_clear |= HCR_AMVOFFEN;
cptr_set |= CPTR_EL2_TAM; cptr_set |= CPTR_EL2_TAM;
} }
/* Trap SVE */ /* Trap SVE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
cptr_set |= CPTR_EL2_TZ; cptr_set |= CPTR_EL2_TZ;
vcpu->arch.hcr_el2 |= hcr_set; vcpu->arch.hcr_el2 |= hcr_set;
...@@ -66,7 +66,7 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu) ...@@ -66,7 +66,7 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
u64 hcr_clear = 0; u64 hcr_clear = 0;
/* Memory Tagging: Trap and Treat as Untagged if not supported. */ /* Memory Tagging: Trap and Treat as Untagged if not supported. */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
hcr_set |= HCR_TID5; hcr_set |= HCR_TID5;
hcr_clear |= HCR_DCT | HCR_ATA; hcr_clear |= HCR_DCT | HCR_ATA;
} }
...@@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) ...@@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
u64 cptr_set = 0; u64 cptr_set = 0;
/* Trap/constrain PMU */ /* Trap/constrain PMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME | mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
MDCR_EL2_HPMN_MASK; MDCR_EL2_HPMN_MASK;
} }
/* Trap Debug */ /* Trap Debug */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE; mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
/* Trap OS Double Lock */ /* Trap OS Double Lock */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DOUBLELOCK), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
mdcr_set |= MDCR_EL2_TDOSA; mdcr_set |= MDCR_EL2_TDOSA;
/* Trap SPE */ /* Trap SPE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
mdcr_set |= MDCR_EL2_TPMS; mdcr_set |= MDCR_EL2_TPMS;
mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
} }
/* Trap Trace Filter */ /* Trap Trace Filter */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACE_FILT), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
mdcr_set |= MDCR_EL2_TTRF; mdcr_set |= MDCR_EL2_TTRF;
/* Trap Trace */ /* Trap Trace */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACEVER), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
cptr_set |= CPTR_EL2_TTA; cptr_set |= CPTR_EL2_TTA;
vcpu->arch.mdcr_el2 |= mdcr_set; vcpu->arch.mdcr_el2 |= mdcr_set;
...@@ -128,7 +128,7 @@ static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu) ...@@ -128,7 +128,7 @@ static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
u64 mdcr_set = 0; u64 mdcr_set = 0;
/* Trap Debug Communications Channel registers */ /* Trap Debug Communications Channel registers */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_FGT), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
mdcr_set |= MDCR_EL2_TDCC; mdcr_set |= MDCR_EL2_TDCC;
vcpu->arch.mdcr_el2 |= mdcr_set; vcpu->arch.mdcr_el2 |= mdcr_set;
...@@ -143,7 +143,7 @@ static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu) ...@@ -143,7 +143,7 @@ static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
u64 hcr_set = 0; u64 hcr_set = 0;
/* Trap LOR */ /* Trap LOR */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_LOR), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
hcr_set |= HCR_TLOR; hcr_set |= HCR_TLOR;
vcpu->arch.hcr_el2 |= hcr_set; vcpu->arch.hcr_el2 |= hcr_set;
......
...@@ -92,9 +92,9 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) ...@@ -92,9 +92,9 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
/* Spectre and Meltdown mitigation in KVM */ /* Spectre and Meltdown mitigation in KVM */
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
(u64)kvm->arch.pfr0_csv2); (u64)kvm->arch.pfr0_csv2);
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
(u64)kvm->arch.pfr0_csv3); (u64)kvm->arch.pfr0_csv3);
return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
...@@ -106,7 +106,7 @@ static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu) ...@@ -106,7 +106,7 @@ static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
u64 allow_mask = PVM_ID_AA64PFR1_ALLOW; u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
if (!kvm_has_mte(kvm)) if (!kvm_has_mte(kvm))
allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
return id_aa64pfr1_el1_sys_val & allow_mask; return id_aa64pfr1_el1_sys_val & allow_mask;
} }
...@@ -281,8 +281,8 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu, ...@@ -281,8 +281,8 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
* No support for AArch32 guests, therefore, pKVM has no sanitized copy * No support for AArch32 guests, therefore, pKVM has no sanitized copy
* of AArch32 feature id registers. * of AArch32 feature id registers.
*/ */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_ELx_64BIT_ONLY); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
return pvm_access_raz_wi(vcpu, p, r); return pvm_access_raz_wi(vcpu, p, r);
} }
......
...@@ -61,7 +61,7 @@ struct kvm_pgtable_walk_data { ...@@ -61,7 +61,7 @@ struct kvm_pgtable_walk_data {
static bool kvm_phys_is_valid(u64 phys) static bool kvm_phys_is_valid(u64 phys)
{ {
return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX)); return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
} }
static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level) static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
......
...@@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) ...@@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
pmuver = kvm->arch.arm_pmu->pmuver; pmuver = kvm->arch.arm_pmu->pmuver;
switch (pmuver) { switch (pmuver) {
case ID_AA64DFR0_PMUVER_8_0: case ID_AA64DFR0_EL1_PMUVer_IMP:
return GENMASK(9, 0); return GENMASK(9, 0);
case ID_AA64DFR0_PMUVER_8_1: case ID_AA64DFR0_EL1_PMUVer_V3P1:
case ID_AA64DFR0_PMUVER_8_4: case ID_AA64DFR0_EL1_PMUVer_V3P4:
case ID_AA64DFR0_PMUVER_8_5: case ID_AA64DFR0_EL1_PMUVer_V3P5:
case ID_AA64DFR0_PMUVER_8_7: case ID_AA64DFR0_EL1_PMUVer_V3P7:
return GENMASK(15, 0); return GENMASK(15, 0);
default: /* Shouldn't be here, just for sanity */ default: /* Shouldn't be here, just for sanity */
WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
...@@ -774,7 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu) ...@@ -774,7 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
{ {
struct arm_pmu_entry *entry; struct arm_pmu_entry *entry;
if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
return; return;
mutex_lock(&arm_pmus_lock); mutex_lock(&arm_pmus_lock);
...@@ -828,7 +828,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void) ...@@ -828,7 +828,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
if (event->pmu) { if (event->pmu) {
pmu = to_arm_pmu(event->pmu); pmu = to_arm_pmu(event->pmu);
if (pmu->pmuver == 0 || if (pmu->pmuver == 0 ||
pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
pmu = NULL; pmu = NULL;
} }
...@@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) ...@@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
* Don't advertise STALL_SLOT, as PMMIR_EL0 is handled * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
* as RAZ * as RAZ
*/ */
if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_4) if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
base = 32; base = 32;
} }
......
...@@ -359,7 +359,7 @@ int kvm_set_ipa_limit(void) ...@@ -359,7 +359,7 @@ int kvm_set_ipa_limit(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
parange = cpuid_feature_extract_unsigned_field(mmfr0, parange = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_PARANGE_SHIFT); ID_AA64MMFR0_EL1_PARANGE_SHIFT);
/* /*
* IPA size beyond 48 bits could not be supported * IPA size beyond 48 bits could not be supported
* on either 4K or 16K page size. Hence let's cap * on either 4K or 16K page size. Hence let's cap
...@@ -367,20 +367,20 @@ int kvm_set_ipa_limit(void) ...@@ -367,20 +367,20 @@ int kvm_set_ipa_limit(void)
* on the system. * on the system.
*/ */
if (PAGE_SIZE != SZ_64K) if (PAGE_SIZE != SZ_64K)
parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48); parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48);
/* /*
* Check with ARMv8.5-GTG that our PAGE_SIZE is supported at * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
* Stage-2. If not, things will stop very quickly. * Stage-2. If not, things will stop very quickly.
*/ */
switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) { switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) {
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE: case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE:
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
return -EINVAL; return -EINVAL;
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT: case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT:
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
break; break;
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX: case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX:
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n"); kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
break; break;
default: default:
......
...@@ -273,7 +273,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu, ...@@ -273,7 +273,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
u32 sr = reg_to_encoding(r); u32 sr = reg_to_encoding(r);
if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
return false; return false;
} }
...@@ -1076,22 +1076,22 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r ...@@ -1076,22 +1076,22 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
switch (id) { switch (id) {
case SYS_ID_AA64PFR0_EL1: case SYS_ID_AA64PFR0_EL1:
if (!vcpu_has_sve(vcpu)) if (!vcpu_has_sve(vcpu))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
if (kvm_vgic_global_state.type == VGIC_V3) { if (kvm_vgic_global_state.type == VGIC_V3) {
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
} }
break; break;
case SYS_ID_AA64PFR1_EL1: case SYS_ID_AA64PFR1_EL1:
if (!kvm_has_mte(vcpu->kvm)) if (!kvm_has_mte(vcpu->kvm))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
break; break;
case SYS_ID_AA64ISAR1_EL1: case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu)) if (!vcpu_has_ptrauth(vcpu))
...@@ -1109,14 +1109,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r ...@@ -1109,14 +1109,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
break; break;
case SYS_ID_AA64DFR0_EL1: case SYS_ID_AA64DFR0_EL1:
/* Limit debug to ARMv8.0 */ /* Limit debug to ARMv8.0 */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER); val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
/* Limit guests to PMUv3 for ARMv8.4 */ /* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val, val = cpuid_feature_cap_perfmon_field(val,
ID_AA64DFR0_PMUVER_SHIFT, ID_AA64DFR0_EL1_PMUVer_SHIFT,
kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0); kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_V3P4 : 0);
/* Hide SPE from guests */ /* Hide SPE from guests */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER); val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
break; break;
case SYS_ID_DFR0_EL1: case SYS_ID_DFR0_EL1:
/* Limit guests to PMUv3 for ARMv8.4 */ /* Limit guests to PMUv3 for ARMv8.4 */
...@@ -1198,21 +1198,21 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, ...@@ -1198,21 +1198,21 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
* it doesn't promise more than what is actually provided (the * it doesn't promise more than what is actually provided (the
* guest could otherwise be covered in ectoplasmic residue). * guest could otherwise be covered in ectoplasmic residue).
*/ */
csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT); csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
if (csv2 > 1 || if (csv2 > 1 ||
(csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED)) (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
return -EINVAL; return -EINVAL;
/* Same thing for CSV3 */ /* Same thing for CSV3 */
csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT); csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
if (csv3 > 1 || if (csv3 > 1 ||
(csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED)) (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
return -EINVAL; return -EINVAL;
/* We can only differ with CSV[23], and anything else is an error */ /* We can only differ with CSV[23], and anything else is an error */
val ^= read_id_reg(vcpu, rd); val ^= read_id_reg(vcpu, rd);
val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) | val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
(0xFUL << ID_AA64PFR0_CSV3_SHIFT)); ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
if (val) if (val)
return -EINVAL; return -EINVAL;
...@@ -1814,11 +1814,11 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, ...@@ -1814,11 +1814,11 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
} else { } else {
u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
| (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
return true; return true;
} }
......
...@@ -43,17 +43,17 @@ static u32 get_cpu_asid_bits(void) ...@@ -43,17 +43,17 @@ static u32 get_cpu_asid_bits(void)
{ {
u32 asid; u32 asid;
int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
ID_AA64MMFR0_ASID_SHIFT); ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
switch (fld) { switch (fld) {
default: default:
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld); smp_processor_id(), fld);
fallthrough; fallthrough;
case ID_AA64MMFR0_ASID_8: case ID_AA64MMFR0_EL1_ASIDBITS_8:
asid = 8; asid = 8;
break; break;
case ID_AA64MMFR0_ASID_16: case ID_AA64MMFR0_EL1_ASIDBITS_16:
asid = 16; asid = 16;
} }
......
...@@ -360,7 +360,7 @@ void __init arm64_memblock_init(void) ...@@ -360,7 +360,7 @@ void __init arm64_memblock_init(void)
extern u16 memstart_offset_seed; extern u16 memstart_offset_seed;
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
int parange = cpuid_feature_extract_unsigned_field( int parange = cpuid_feature_extract_unsigned_field(
mmfr0, ID_AA64MMFR0_PARANGE_SHIFT); mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
s64 range = linear_region_size - s64 range = linear_region_size -
BIT(id_aa64mmfr0_parange_to_phys_shift(parange)); BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
......
...@@ -686,7 +686,7 @@ static bool arm64_early_this_cpu_has_bti(void) ...@@ -686,7 +686,7 @@ static bool arm64_early_this_cpu_has_bti(void)
pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1); pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
return cpuid_feature_extract_unsigned_field(pfr1, return cpuid_feature_extract_unsigned_field(pfr1,
ID_AA64PFR1_BT_SHIFT); ID_AA64PFR1_EL1_BT_SHIFT);
} }
/* /*
......
...@@ -434,8 +434,8 @@ SYM_FUNC_START(__cpu_setup) ...@@ -434,8 +434,8 @@ SYM_FUNC_START(__cpu_setup)
* (ID_AA64PFR1_EL1[11:8] > 1). * (ID_AA64PFR1_EL1[11:8] > 1).
*/ */
mrs x10, ID_AA64PFR1_EL1 mrs x10, ID_AA64PFR1_EL1
ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4 ubfx x10, x10, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
cmp x10, #ID_AA64PFR1_MTE cmp x10, #ID_AA64PFR1_EL1_MTE_MTE2
b.lt 1f b.lt 1f
/* Normal Tagged memory type at the corresponding MAIR index */ /* Normal Tagged memory type at the corresponding MAIR index */
......
...@@ -46,6 +46,127 @@ ...@@ -46,6 +46,127 @@
# feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration # feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration
# item ACCDATA) though it may be more taseful to do something else. # item ACCDATA) though it may be more taseful to do something else.
Sysreg ID_AA64PFR0_EL1 3 0 0 4 0
Enum 63:60 CSV3
0b0000 NI
0b0001 IMP
EndEnum
Enum 59:56 CSV2
0b0000 NI
0b0001 IMP
0b0010 CSV2_2
0b0011 CSV2_3
EndEnum
Enum 55:52 RME
0b0000 NI
0b0001 IMP
EndEnum
Enum 51:48 DIT
0b0000 NI
0b0001 IMP
EndEnum
Enum 47:44 AMU
0b0000 NI
0b0001 IMP
0b0010 V1P1
EndEnum
Enum 43:40 MPAM
0b0000 0
0b0001 1
EndEnum
Enum 39:36 SEL2
0b0000 NI
0b0001 IMP
EndEnum
Enum 35:32 SVE
0b0000 NI
0b0001 IMP
EndEnum
Enum 31:28 RAS
0b0000 NI
0b0001 IMP
0b0010 V1P1
EndEnum
Enum 27:24 GIC
0b0000 NI
0b0001 IMP
0b0010 V4P1
EndEnum
Enum 23:20 AdvSIMD
0b0000 IMP
0b0001 FP16
0b1111 NI
EndEnum
Enum 19:16 FP
0b0000 IMP
0b0001 FP16
0b1111 NI
EndEnum
Enum 15:12 EL3
0b0000 NI
0b0001 IMP
0b0010 AARCH32
EndEnum
Enum 11:8 EL2
0b0000 NI
0b0001 IMP
0b0010 AARCH32
EndEnum
Enum 7:4 EL1
0b0001 IMP
0b0010 AARCH32
EndEnum
Enum 3:0 EL0
0b0001 IMP
0b0010 AARCH32
EndEnum
EndSysreg
Sysreg ID_AA64PFR1_EL1 3 0 0 4 1
Res0 63:40
Enum 39:36 NMI
0b0000 NI
0b0001 IMP
EndEnum
Enum 35:32 CSV2_frac
0b0000 NI
0b0001 CSV2_1p1
0b0010 CSV2_1p2
EndEnum
Enum 31:28 RNDR_trap
0b0000 NI
0b0001 IMP
EndEnum
Enum 27:24 SME
0b0000 NI
0b0001 IMP
EndEnum
Res0 23:20
Enum 19:16 MPAM_frac
0b0000 MINOR_0
0b0001 MINOR_1
EndEnum
Enum 15:12 RAS_frac
0b0000 NI
0b0001 RASv1p1
EndEnum
Enum 11:8 MTE
0b0000 NI
0b0001 IMP
0b0010 MTE2
0b0011 MTE3
EndEnum
Enum 7:4 SSBS
0b0000 NI
0b0001 IMP
0b0010 SSBS2
EndEnum
Enum 3:0 BT
0b0000 NI
0b0001 IMP
EndEnum
EndSysreg
Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4 Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4
Res0 63:60 Res0 63:60
Enum 59:56 F64MM Enum 59:56 F64MM
...@@ -98,7 +219,9 @@ Enum 63 FA64 ...@@ -98,7 +219,9 @@ Enum 63 FA64
0b1 IMP 0b1 IMP
EndEnum EndEnum
Res0 62:60 Res0 62:60
Field 59:56 SMEver Enum 59:56 SMEver
0b0000 IMP
EndEnum
Enum 55:52 I16I64 Enum 55:52 I16I64
0b0000 NI 0b0000 NI
0b1111 IMP 0b1111 IMP
...@@ -129,6 +252,89 @@ EndEnum ...@@ -129,6 +252,89 @@ EndEnum
Res0 31:0 Res0 31:0
EndSysreg EndSysreg
Sysreg ID_AA64DFR0_EL1 3 0 0 5 0
Enum 63:60 HPMN0
0b0000 UNPREDICTABLE
0b0001 DEF
EndEnum
Res0 59:56
Enum 55:52 BRBE
0b0000 NI
0b0001 IMP
0b0010 BRBE_V1P1
EndEnum
Enum 51:48 MTPMU
0b0000 NI_IMPDEF
0b0001 IMP
0b1111 NI
EndEnum
Enum 47:44 TraceBuffer
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 TraceFilt
0b0000 NI
0b0001 IMP
EndEnum
Enum 39:36 DoubleLock
0b0000 IMP
0b1111 NI
EndEnum
Enum 35:32 PMSVer
0b0000 NI
0b0001 IMP
0b0010 V1P1
0b0011 V1P2
0b0100 V1P3
EndEnum
Field 31:28 CTX_CMPs
Res0 27:24
Field 23:20 WRPs
Res0 19:16
Field 15:12 BRPs
Enum 11:8 PMUVer
0b0000 NI
0b0001 IMP
0b0100 V3P1
0b0101 V3P4
0b0110 V3P5
0b0111 V3P7
0b1000 V3P8
0b1111 IMP_DEF
EndEnum
Enum 7:4 TraceVer
0b0000 NI
0b0001 IMP
EndEnum
Enum 3:0 DebugVer
0b0110 IMP
0b0111 VHE
0b1000 V8P2
0b1001 V8P4
0b1010 V8P8
EndEnum
EndSysreg
Sysreg ID_AA64DFR1_EL1 3 0 0 5 1
Res0 63:0
EndSysreg
Sysreg ID_AA64AFR0_EL1 3 0 0 5 4
Res0 63:32
Field 31:28 IMPDEF7
Field 27:24 IMPDEF6
Field 23:20 IMPDEF5
Field 19:16 IMPDEF4
Field 15:12 IMPDEF3
Field 11:8 IMPDEF2
Field 7:4 IMPDEF1
Field 3:0 IMPDEF0
EndSysreg
Sysreg ID_AA64AFR1_EL1 3 0 0 5 5
Res0 63:0
EndSysreg
Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0 Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0
Enum 63:60 RNDR Enum 63:60 RNDR
0b0000 NI 0b0000 NI
...@@ -313,6 +519,217 @@ Enum 3:0 WFxT ...@@ -313,6 +519,217 @@ Enum 3:0 WFxT
EndEnum EndEnum
EndSysreg EndSysreg
Sysreg ID_AA64MMFR0_EL1 3 0 0 7 0
Enum 63:60 ECV
0b0000 NI
0b0001 IMP
0b0010 CNTPOFF
EndEnum
Enum 59:56 FGT
0b0000 NI
0b0001 IMP
EndEnum
Res0 55:48
Enum 47:44 EXS
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 TGRAN4_2
0b0000 TGRAN4
0b0001 NI
0b0010 IMP
0b0011 52_BIT
EndEnum
Enum 39:36 TGRAN64_2
0b0000 TGRAN64
0b0001 NI
0b0010 IMP
EndEnum
Enum 35:32 TGRAN16_2
0b0000 TGRAN16
0b0001 NI
0b0010 IMP
0b0011 52_BIT
EndEnum
Enum 31:28 TGRAN4
0b0000 IMP
0b0001 52_BIT
0b1111 NI
EndEnum
Enum 27:24 TGRAN64
0b0000 IMP
0b1111 NI
EndEnum
Enum 23:20 TGRAN16
0b0000 NI
0b0001 IMP
0b0010 52_BIT
EndEnum
Enum 19:16 BIGENDEL0
0b0000 NI
0b0001 IMP
EndEnum
Enum 15:12 SNSMEM
0b0000 NI
0b0001 IMP
EndEnum
Enum 11:8 BIGEND
0b0000 NI
0b0001 IMP
EndEnum
Enum 7:4 ASIDBITS
0b0000 8
0b0010 16
EndEnum
Enum 3:0 PARANGE
0b0000 32
0b0001 36
0b0010 40
0b0011 42
0b0100 44
0b0101 48
0b0110 52
EndEnum
EndSysreg
Sysreg ID_AA64MMFR1_EL1 3 0 0 7 1
Enum 63:60 ECBHB
0b0000 NI
0b0001 IMP
EndEnum
Enum 59:56 CMOW
0b0000 NI
0b0001 IMP
EndEnum
Enum 55:52 TIDCP1
0b0000 NI
0b0001 IMP
EndEnum
Enum 51:48 nTLBPA
0b0000 NI
0b0001 IMP
EndEnum
Enum 47:44 AFP
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 HCX
0b0000 NI
0b0001 IMP
EndEnum
Enum 39:36 ETS
0b0000 NI
0b0001 IMP
EndEnum
Enum 35:32 TWED
0b0000 NI
0b0001 IMP
EndEnum
Enum 31:28 XNX
0b0000 NI
0b0001 IMP
EndEnum
Enum 27:24 SpecSEI
0b0000 NI
0b0001 IMP
EndEnum
Enum 23:20 PAN
0b0000 NI
0b0001 IMP
0b0010 PAN2
0b0011 PAN3
EndEnum
Enum 19:16 LO
0b0000 NI
0b0001 IMP
EndEnum
Enum 15:12 HPDS
0b0000 NI
0b0001 IMP
0b0010 HPDS2
EndEnum
Enum 11:8 VH
0b0000 NI
0b0001 IMP
EndEnum
Enum 7:4 VMIDBits
0b0000 8
0b0010 16
EndEnum
Enum 3:0 HAFDBS
0b0000 NI
0b0001 AF
0b0010 DBM
EndEnum
EndSysreg
Sysreg ID_AA64MMFR2_EL1 3 0 0 7 2
Enum 63:60 E0PD
0b0000 NI
0b0001 IMP
EndEnum
Enum 59:56 EVT
0b0000 NI
0b0001 IMP
0b0010 TTLBxS
EndEnum
Enum 55:52 BBM
0b0000 0
0b0001 1
0b0010 2
EndEnum
Enum 51:48 TTL
0b0000 NI
0b0001 IMP
EndEnum
Res0 47:44
Enum 43:40 FWB
0b0000 NI
0b0001 IMP
EndEnum
Enum 39:36 IDS
0b0000 0x0
0b0001 0x18
EndEnum
Enum 35:32 AT
0b0000 NI
0b0001 IMP
EndEnum
Enum 31:28 ST
0b0000 39
0b0001 48_47
EndEnum
Enum 27:24 NV
0b0000 NI
0b0001 IMP
0b0010 NV2
EndEnum
Enum 23:20 CCIDX
0b0000 32
0b0001 64
EndEnum
Enum 19:16 VARange
0b0000 48
0b0001 52
EndEnum
Enum 15:12 IESB
0b0000 NI
0b0001 IMP
EndEnum
Enum 11:8 LSM
0b0000 NI
0b0001 IMP
EndEnum
Enum 7:4 UAO
0b0000 NI
0b0001 IMP
EndEnum
Enum 3:0 CnP
0b0000 NI
0b0001 IMP
EndEnum
EndSysreg
Sysreg SCTLR_EL1 3 0 1 0 0 Sysreg SCTLR_EL1 3 0 1 0 0
Field 63 TIDCP Field 63 TIDCP
Field 62 SPINMASK Field 62 SPINMASK
...@@ -427,6 +844,12 @@ Sysreg SMCR_EL1 3 0 1 2 6 ...@@ -427,6 +844,12 @@ Sysreg SMCR_EL1 3 0 1 2 6
Fields SMCR_ELx Fields SMCR_ELx
EndSysreg EndSysreg
Sysreg ALLINT 3 0 4 3 0
Res0 63:14
Field 13 ALLINT
Res0 12:0
EndSysreg
Sysreg FAR_EL1 3 0 6 0 0 Sysreg FAR_EL1 3 0 6 0 0
Field 63:0 ADDR Field 63:0 ADDR
EndSysreg EndSysreg
...@@ -440,6 +863,14 @@ Sysreg CONTEXTIDR_EL1 3 0 13 0 1 ...@@ -440,6 +863,14 @@ Sysreg CONTEXTIDR_EL1 3 0 13 0 1
Fields CONTEXTIDR_ELx Fields CONTEXTIDR_ELx
EndSysreg EndSysreg
Sysreg TPIDR_EL1 3 0 13 0 4
Field 63:0 ThreadID
EndSysreg
Sysreg SCXTNUM_EL1 3 0 13 0 7
Field 63:0 SoftwareContextNumber
EndSysreg
Sysreg CLIDR_EL1 3 1 0 0 1 Sysreg CLIDR_EL1 3 1 0 0 1
Res0 63:47 Res0 63:47
Field 46:33 Ttypen Field 46:33 Ttypen
...@@ -514,6 +945,22 @@ Sysreg ZCR_EL2 3 4 1 2 0 ...@@ -514,6 +945,22 @@ Sysreg ZCR_EL2 3 4 1 2 0
Fields ZCR_ELx Fields ZCR_ELx
EndSysreg EndSysreg
Sysreg HCRX_EL2 3 4 1 2 2
Res0 63:12
Field 11 MSCEn
Field 10 MCE2
Field 9 CMOW
Field 8 VFNMI
Field 7 VINMI
Field 6 TALLINT
Field 5 SMPME
Field 4 FGTnXS
Field 3 FnXS
Field 2 EnASR
Field 1 EnALS
Field 0 EnAS0
EndSysreg
Sysreg SMPRIMAP_EL2 3 4 1 2 5 Sysreg SMPRIMAP_EL2 3 4 1 2 5
Field 63:60 P15 Field 63:60 P15
Field 59:56 P14 Field 59:56 P14
......
...@@ -23,8 +23,8 @@ efi_status_t check_platform_features(void) ...@@ -23,8 +23,8 @@ efi_status_t check_platform_features(void)
if (IS_ENABLED(CONFIG_ARM64_4K_PAGES)) if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
return EFI_SUCCESS; return EFI_SUCCESS;
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_EL1_TGRAN_SHIFT) & 0xf;
if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) { if (tg < ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX) {
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
efi_err("This 64 KB granular kernel is not supported by your CPU\n"); efi_err("This 64 KB granular kernel is not supported by your CPU\n");
else else
......
...@@ -150,7 +150,7 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) ...@@ -150,7 +150,7 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
} }
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT); par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par); tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
cd->ttbr = virt_to_phys(mm->pgd); cd->ttbr = virt_to_phys(mm->pgd);
...@@ -425,13 +425,13 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) ...@@ -425,13 +425,13 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
* addresses larger than what we support. * addresses larger than what we support.
*/ */
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT); fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
oas = id_aa64mmfr0_parange_to_phys_shift(fld); oas = id_aa64mmfr0_parange_to_phys_shift(fld);
if (smmu->oas < oas) if (smmu->oas < oas)
return false; return false;
/* We can support bigger ASIDs than the CPU, but not smaller */ /* We can support bigger ASIDs than the CPU, but not smaller */
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT); fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
asid_bits = fld ? 16 : 8; asid_bits = fld ? 16 : 8;
if (smmu->asid_bits < asid_bits) if (smmu->asid_bits < asid_bits)
return false; return false;
......
...@@ -94,7 +94,7 @@ bool gic_cpuif_has_vsgi(void) ...@@ -94,7 +94,7 @@ bool gic_cpuif_has_vsgi(void)
{ {
unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT); fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT);
return fld >= 0x3; return fld >= 0x3;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment