Commit af040ffc authored by Russell King's avatar Russell King

ARM: make it easier to check the CPU part number correctly

Ensure that platform maintainers check the CPU part number in the right
manner: the CPU part number is meaningless without also checking the
CPU implement(e|o)r (choose your preferred spelling!)  Provide an
interface which returns both the implementer and part number together,
and update the definitions to include the implementer.

Mark the old function as being deprecated... indeed, using the old
function with the definitions will now always evaluate as false, so
people must update their un-merged code to the new function.  While
this could be avoided by adding new definitions, we'd also have to
create new names for them which would be awkward.
Acked-by: default avatarNicolas Pitre <nico@linaro.org>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent ee2593ef
...@@ -62,17 +62,18 @@ ...@@ -62,17 +62,18 @@
#define ARM_CPU_IMP_ARM 0x41 #define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_INTEL 0x69 #define ARM_CPU_IMP_INTEL 0x69
#define ARM_CPU_PART_ARM1136 0xB360 /* ARM implemented processors */
#define ARM_CPU_PART_ARM1156 0xB560 #define ARM_CPU_PART_ARM1136 0x4100b360
#define ARM_CPU_PART_ARM1176 0xB760 #define ARM_CPU_PART_ARM1156 0x4100b560
#define ARM_CPU_PART_ARM11MPCORE 0xB020 #define ARM_CPU_PART_ARM1176 0x4100b760
#define ARM_CPU_PART_CORTEX_A8 0xC080 #define ARM_CPU_PART_ARM11MPCORE 0x4100b020
#define ARM_CPU_PART_CORTEX_A9 0xC090 #define ARM_CPU_PART_CORTEX_A8 0x4100c080
#define ARM_CPU_PART_CORTEX_A5 0xC050 #define ARM_CPU_PART_CORTEX_A9 0x4100c090
#define ARM_CPU_PART_CORTEX_A15 0xC0F0 #define ARM_CPU_PART_CORTEX_A5 0x4100c050
#define ARM_CPU_PART_CORTEX_A7 0xC070 #define ARM_CPU_PART_CORTEX_A7 0x4100c070
#define ARM_CPU_PART_CORTEX_A12 0xC0D0 #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
#define ARM_CPU_PART_CORTEX_A17 0xC0E0 #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 #define ARM_CPU_XSCALE_ARCH_MASK 0xe000
#define ARM_CPU_XSCALE_ARCH_V1 0x2000 #define ARM_CPU_XSCALE_ARCH_V1 0x2000
...@@ -171,14 +172,24 @@ static inline unsigned int __attribute_const__ read_cpuid_implementor(void) ...@@ -171,14 +172,24 @@ static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
return (read_cpuid_id() & 0xFF000000) >> 24; return (read_cpuid_id() & 0xFF000000) >> 24;
} }
static inline unsigned int __attribute_const__ read_cpuid_part_number(void) /*
* The CPU part number is meaningless without referring to the CPU
* implementer: implementers are free to define their own part numbers
* which are permitted to clash with other implementer part numbers.
*/
static inline unsigned int __attribute_const__ read_cpuid_part(void)
{
return read_cpuid_id() & 0xff00fff0;
}
static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void)
{ {
return read_cpuid_id() & 0xFFF0; return read_cpuid_id() & 0xFFF0;
} }
static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void) static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
{ {
return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK; return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK;
} }
static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
static inline bool scu_a9_has_base(void) static inline bool scu_a9_has_base(void)
{ {
return read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; return read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
} }
static inline unsigned long scu_a9_get_base(void) static inline unsigned long scu_a9_get_base(void)
......
...@@ -250,15 +250,12 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = { ...@@ -250,15 +250,12 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
static int probe_current_pmu(struct arm_pmu *pmu) static int probe_current_pmu(struct arm_pmu *pmu)
{ {
int cpu = get_cpu(); int cpu = get_cpu();
unsigned long implementor = read_cpuid_implementor();
unsigned long part_number = read_cpuid_part_number();
int ret = -ENODEV; int ret = -ENODEV;
pr_info("probing PMU on CPU %d\n", cpu); pr_info("probing PMU on CPU %d\n", cpu);
switch (read_cpuid_part()) {
/* ARM Ltd CPUs. */ /* ARM Ltd CPUs. */
if (implementor == ARM_CPU_IMP_ARM) {
switch (part_number) {
case ARM_CPU_PART_ARM1136: case ARM_CPU_PART_ARM1136:
case ARM_CPU_PART_ARM1156: case ARM_CPU_PART_ARM1156:
case ARM_CPU_PART_ARM1176: case ARM_CPU_PART_ARM1176:
...@@ -273,9 +270,9 @@ static int probe_current_pmu(struct arm_pmu *pmu) ...@@ -273,9 +270,9 @@ static int probe_current_pmu(struct arm_pmu *pmu)
case ARM_CPU_PART_CORTEX_A9: case ARM_CPU_PART_CORTEX_A9:
ret = armv7_a9_pmu_init(pmu); ret = armv7_a9_pmu_init(pmu);
break; break;
}
/* Intel CPUs [xscale]. */ default:
} else if (implementor == ARM_CPU_IMP_INTEL) { if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) {
switch (xscale_cpu_arch_version()) { switch (xscale_cpu_arch_version()) {
case ARM_CPU_XSCALE_ARCH_V1: case ARM_CPU_XSCALE_ARCH_V1:
ret = xscale1pmu_init(pmu); ret = xscale1pmu_init(pmu);
...@@ -285,6 +282,8 @@ static int probe_current_pmu(struct arm_pmu *pmu) ...@@ -285,6 +282,8 @@ static int probe_current_pmu(struct arm_pmu *pmu)
break; break;
} }
} }
break;
}
put_cpu(); put_cpu();
return ret; return ret;
......
...@@ -274,13 +274,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -274,13 +274,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
int __attribute_const__ kvm_target_cpu(void) int __attribute_const__ kvm_target_cpu(void)
{ {
unsigned long implementor = read_cpuid_implementor(); switch (read_cpuid_part()) {
unsigned long part_number = read_cpuid_part_number();
if (implementor != ARM_CPU_IMP_ARM)
return -EINVAL;
switch (part_number) {
case ARM_CPU_PART_CORTEX_A7: case ARM_CPU_PART_CORTEX_A7:
return KVM_ARM_TARGET_CORTEX_A7; return KVM_ARM_TARGET_CORTEX_A7;
case ARM_CPU_PART_CORTEX_A15: case ARM_CPU_PART_CORTEX_A15:
......
...@@ -196,7 +196,7 @@ static void exynos_power_down(void) ...@@ -196,7 +196,7 @@ static void exynos_power_down(void)
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
arch_spin_unlock(&exynos_mcpm_lock); arch_spin_unlock(&exynos_mcpm_lock);
if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
/* /*
* On the Cortex-A15 we need to disable * On the Cortex-A15 we need to disable
* L2 prefetching before flushing the cache. * L2 prefetching before flushing the cache.
...@@ -291,7 +291,7 @@ static void __naked exynos_pm_power_up_setup(unsigned int affinity_level) ...@@ -291,7 +291,7 @@ static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
static void __init exynos_cache_off(void) static void __init exynos_cache_off(void)
{ {
if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
/* disable L2 prefetching on the Cortex-A15 */ /* disable L2 prefetching on the Cortex-A15 */
asm volatile( asm volatile(
"mcr p15, 1, %0, c15, c0, 3\n\t" "mcr p15, 1, %0, c15, c0, 3\n\t"
......
...@@ -188,7 +188,7 @@ static void __init exynos_smp_init_cpus(void) ...@@ -188,7 +188,7 @@ static void __init exynos_smp_init_cpus(void)
void __iomem *scu_base = scu_base_addr(); void __iomem *scu_base = scu_base_addr();
unsigned int i, ncores; unsigned int i, ncores;
if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
ncores = scu_base ? scu_get_core_count(scu_base) : 1; ncores = scu_base ? scu_get_core_count(scu_base) : 1;
else else
/* /*
...@@ -214,7 +214,7 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) ...@@ -214,7 +214,7 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
exynos_sysram_init(); exynos_sysram_init();
if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
scu_enable(scu_base_addr()); scu_enable(scu_base_addr());
/* /*
......
...@@ -300,7 +300,7 @@ static int exynos_pm_suspend(void) ...@@ -300,7 +300,7 @@ static int exynos_pm_suspend(void)
tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0); tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
__raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION); __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
exynos_cpu_save_register(); exynos_cpu_save_register();
return 0; return 0;
...@@ -334,7 +334,7 @@ static void exynos_pm_resume(void) ...@@ -334,7 +334,7 @@ static void exynos_pm_resume(void)
if (exynos_pm_central_resume()) if (exynos_pm_central_resume())
goto early_wakeup; goto early_wakeup;
if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
exynos_cpu_restore_register(); exynos_cpu_restore_register();
/* For release retention */ /* For release retention */
...@@ -353,7 +353,7 @@ static void exynos_pm_resume(void) ...@@ -353,7 +353,7 @@ static void exynos_pm_resume(void)
s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save)); s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
scu_enable(S5P_VA_SCU); scu_enable(S5P_VA_SCU);
early_wakeup: early_wakeup:
...@@ -440,15 +440,14 @@ static int exynos_cpu_pm_notifier(struct notifier_block *self, ...@@ -440,15 +440,14 @@ static int exynos_cpu_pm_notifier(struct notifier_block *self,
case CPU_PM_ENTER: case CPU_PM_ENTER:
if (cpu == 0) { if (cpu == 0) {
exynos_pm_central_suspend(); exynos_pm_central_suspend();
if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
exynos_cpu_save_register(); exynos_cpu_save_register();
} }
break; break;
case CPU_PM_EXIT: case CPU_PM_EXIT:
if (cpu == 0) { if (cpu == 0) {
if (read_cpuid_part_number() == if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
ARM_CPU_PART_CORTEX_A9) {
scu_enable(S5P_VA_SCU); scu_enable(S5P_VA_SCU);
exynos_cpu_restore_register(); exynos_cpu_restore_register();
} }
......
...@@ -152,7 +152,7 @@ static void tc2_pm_down(u64 residency) ...@@ -152,7 +152,7 @@ static void tc2_pm_down(u64 residency)
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
arch_spin_unlock(&tc2_pm_lock); arch_spin_unlock(&tc2_pm_lock);
if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
/* /*
* On the Cortex-A15 we need to disable * On the Cortex-A15 we need to disable
* L2 prefetching before flushing the cache. * L2 prefetching before flushing the cache.
...@@ -326,7 +326,7 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level) ...@@ -326,7 +326,7 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
static void __init tc2_cache_off(void) static void __init tc2_cache_off(void)
{ {
pr_info("TC2: disabling cache during MCPM loopback test\n"); pr_info("TC2: disabling cache during MCPM loopback test\n");
if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
/* disable L2 prefetching on the Cortex-A15 */ /* disable L2 prefetching on the Cortex-A15 */
asm volatile( asm volatile(
"mcr p15, 1, %0, c15, c0, 3 \n\t" "mcr p15, 1, %0, c15, c0, 3 \n\t"
......
...@@ -665,7 +665,7 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v ...@@ -665,7 +665,7 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v
static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
{ {
unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK; unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
if (rev >= L310_CACHE_ID_RTL_R2P0) { if (rev >= L310_CACHE_ID_RTL_R2P0) {
if (cortex_a9) { if (cortex_a9) {
......
...@@ -250,7 +250,7 @@ static void __init global_timer_of_register(struct device_node *np) ...@@ -250,7 +250,7 @@ static void __init global_timer_of_register(struct device_node *np)
* fire when the timer value is greater than or equal to. In previous * fire when the timer value is greater than or equal to. In previous
* revisions the comparators fired when the timer value was equal to. * revisions the comparators fired when the timer value was equal to.
*/ */
if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
&& (read_cpuid_id() & 0xf0000f) < 0x200000) { && (read_cpuid_id() & 0xf0000f) < 0x200000) {
pr_warn("global-timer: non support for this cpu version.\n"); pr_warn("global-timer: non support for this cpu version.\n");
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment