Commit d13027bb authored by Will Deacon's avatar Will Deacon

Revert "arm64: initialize per-cpu offsets earlier"

This reverts commit 353e228e.

Qian Cai reports that TX2 no longer boots with his .config as it appears
that task_cpu() gets instrumented and used before KASAN has been
initialised.

Although Mark has a proposed fix, let's take the safe option of reverting
this for now and sorting it out properly later.

Link: https://lore.kernel.org/r/711bc57a314d8d646b41307008db2845b7537b3d.camel@redhat.comReported-by: default avatarQian Cai <cai@redhat.com>
Tested-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent a82e4ef0
...@@ -68,6 +68,4 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info); ...@@ -68,6 +68,4 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info);
void update_cpu_features(int cpu, struct cpuinfo_arm64 *info, void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
struct cpuinfo_arm64 *boot); struct cpuinfo_arm64 *boot);
void init_this_cpu_offset(void);
#endif /* __ASM_CPU_H */ #endif /* __ASM_CPU_H */
...@@ -448,8 +448,6 @@ SYM_FUNC_START_LOCAL(__primary_switched) ...@@ -448,8 +448,6 @@ SYM_FUNC_START_LOCAL(__primary_switched)
bl __pi_memset bl __pi_memset
dsb ishst // Make zero page visible to PTW dsb ishst // Make zero page visible to PTW
bl init_this_cpu_offset
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
bl kasan_early_init bl kasan_early_init
#endif #endif
...@@ -756,7 +754,6 @@ SYM_FUNC_START_LOCAL(__secondary_switched) ...@@ -756,7 +754,6 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
ptrauth_keys_init_cpu x2, x3, x4, x5 ptrauth_keys_init_cpu x2, x3, x4, x5
#endif #endif
bl init_this_cpu_offset
b secondary_start_kernel b secondary_start_kernel
SYM_FUNC_END(__secondary_switched) SYM_FUNC_END(__secondary_switched)
......
...@@ -87,6 +87,12 @@ void __init smp_setup_processor_id(void) ...@@ -87,6 +87,12 @@ void __init smp_setup_processor_id(void)
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
set_cpu_logical_map(0, mpidr); set_cpu_logical_map(0, mpidr);
/*
* clear __my_cpu_offset on boot CPU to avoid hang caused by
* using percpu variable early, for example, lockdep will
* access percpu variable inside lock_release
*/
set_my_cpu_offset(0);
pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n", pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
(unsigned long)mpidr, read_cpuid_id()); (unsigned long)mpidr, read_cpuid_id());
} }
...@@ -276,12 +282,6 @@ u64 cpu_logical_map(int cpu) ...@@ -276,12 +282,6 @@ u64 cpu_logical_map(int cpu)
} }
EXPORT_SYMBOL_GPL(cpu_logical_map); EXPORT_SYMBOL_GPL(cpu_logical_map);
void noinstr init_this_cpu_offset(void)
{
unsigned int cpu = task_cpu(current);
set_my_cpu_offset(per_cpu_offset(cpu));
}
void __init __no_sanitize_address setup_arch(char **cmdline_p) void __init __no_sanitize_address setup_arch(char **cmdline_p)
{ {
init_mm.start_code = (unsigned long) _text; init_mm.start_code = (unsigned long) _text;
......
...@@ -192,7 +192,10 @@ asmlinkage notrace void secondary_start_kernel(void) ...@@ -192,7 +192,10 @@ asmlinkage notrace void secondary_start_kernel(void)
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm; struct mm_struct *mm = &init_mm;
const struct cpu_operations *ops; const struct cpu_operations *ops;
unsigned int cpu = smp_processor_id(); unsigned int cpu;
cpu = task_cpu(current);
set_my_cpu_offset(per_cpu_offset(cpu));
/* /*
* All kernel threads share the same mm context; grab a * All kernel threads share the same mm context; grab a
...@@ -432,13 +435,7 @@ void __init smp_cpus_done(unsigned int max_cpus) ...@@ -432,13 +435,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void) void __init smp_prepare_boot_cpu(void)
{ {
/* set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
* Now that setup_per_cpu_areas() has allocated the runtime per-cpu
* areas it is only safe to read the CPU0 boot-time area, and we must
* reinitialize the offset to point to the runtime area.
*/
init_this_cpu_offset();
cpuinfo_store_boot_cpu(); cpuinfo_store_boot_cpu();
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment