Commit b8c9592b authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Russell King

ARM: 8318/1: treat CPU feature register fields as signed quantities

The various CPU feature registers consist of 4-bit blocks that
represent signed quantities, whose positive values represent
incremental features, and whose negative values are reserved.

To improve forward compatibility, update the feature detection
code to take possible future higher values into account, but
ignore negative values.
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent eb765c1c
...@@ -253,4 +253,20 @@ static inline int cpu_is_pj4(void) ...@@ -253,4 +253,20 @@ static inline int cpu_is_pj4(void)
#else #else
#define cpu_is_pj4() 0 #define cpu_is_pj4() 0
#endif #endif
static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
int field)
{
int feature = (features >> field) & 15;
/* feature registers are signed values */
if (feature > 8)
feature -= 16;
return feature;
}
#define cpuid_feature_extract(reg, field) \
cpuid_feature_extract_field(read_cpuid_ext(reg), field)
#endif #endif
...@@ -375,30 +375,26 @@ void __init early_print(const char *str, ...) ...@@ -375,30 +375,26 @@ void __init early_print(const char *str, ...)
static void __init cpuid_init_hwcaps(void) static void __init cpuid_init_hwcaps(void)
{ {
unsigned int divide_instrs, vmsa; int block;
if (cpu_architecture() < CPU_ARCH_ARMv7) if (cpu_architecture() < CPU_ARCH_ARMv7)
return; return;
divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
if (block >= 2)
switch (divide_instrs) {
case 2:
elf_hwcap |= HWCAP_IDIVA; elf_hwcap |= HWCAP_IDIVA;
case 1: if (block >= 1)
elf_hwcap |= HWCAP_IDIVT; elf_hwcap |= HWCAP_IDIVT;
}
/* LPAE implies atomic ldrd/strd instructions */ /* LPAE implies atomic ldrd/strd instructions */
vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0; block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
if (vmsa >= 5) if (block >= 5)
elf_hwcap |= HWCAP_LPAE; elf_hwcap |= HWCAP_LPAE;
} }
static void __init elf_hwcap_fixup(void) static void __init elf_hwcap_fixup(void)
{ {
unsigned id = read_cpuid_id(); unsigned id = read_cpuid_id();
unsigned sync_prim;
/* /*
* HWCAP_TLS is available only on 1136 r1p0 and later, * HWCAP_TLS is available only on 1136 r1p0 and later,
...@@ -419,9 +415,9 @@ static void __init elf_hwcap_fixup(void) ...@@ -419,9 +415,9 @@ static void __init elf_hwcap_fixup(void)
* avoid advertising SWP; it may not be atomic with * avoid advertising SWP; it may not be atomic with
* multiprocessing cores. * multiprocessing cores.
*/ */
sync_prim = ((read_cpuid_ext(CPUID_EXT_ISAR3) >> 8) & 0xf0) | if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
((read_cpuid_ext(CPUID_EXT_ISAR4) >> 20) & 0x0f); (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
if (sync_prim >= 0x13) cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
elf_hwcap &= ~HWCAP_SWP; elf_hwcap &= ~HWCAP_SWP;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment