Commit 52af9c6c authored by Will Deacon's avatar Will Deacon Committed by Russell King

ARM: 6943/1: mm: use TTBR1 instead of reserved context ID

On ARMv7 CPUs that cache first level page table entries (like the
Cortex-A15), using a reserved ASID while changing the TTBR or flushing
the TLB is unsafe.

This is because the CPU may cache the first level entry as the result of
a speculative memory access while the reserved ASID is assigned. After
the process owning the page tables dies, the memory will be reallocated
and may be written with junk values which can be interpreted as global,
valid PTEs by the processor. This will result in the TLB being populated
with bogus global entries.

This patch avoids the use of a reserved context ID in the v7 switch_mm
and ASID rollover code by temporarily using the swapper_pg_dir pointed
at by TTBR1, which contains only global entries that are not tagged
with ASIDs.
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent d427958a
...@@ -24,9 +24,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm); ...@@ -24,9 +24,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
/* /*
* We fork()ed a process, and we need a new context for the child * We fork()ed a process, and we need a new context for the child
* to run in. We reserve version 0 for initial tasks so we will * to run in.
* always allocate an ASID. The ASID 0 is reserved for the TTBR
* register changing sequence.
*/ */
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
...@@ -36,8 +34,11 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -36,8 +34,11 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static void flush_context(void) static void flush_context(void)
{ {
/* set the reserved ASID before flushing the TLB */ u32 ttb;
asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); /* Copy TTBR1 into TTBR0 */
asm volatile("mrc p15, 0, %0, c2, c0, 1\n"
"mcr p15, 0, %0, c2, c0, 0"
: "=r" (ttb));
isb(); isb();
local_flush_tlb_all(); local_flush_tlb_all();
if (icache_is_vivt_asid_tagged()) { if (icache_is_vivt_asid_tagged()) {
......
...@@ -108,18 +108,16 @@ ENTRY(cpu_v7_switch_mm) ...@@ -108,18 +108,16 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_ARM_ERRATA_430973 #ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif #endif
#ifdef CONFIG_ARM_ERRATA_754322 mrc p15, 0, r2, c2, c0, 1 @ load TTB 1
dsb mcr p15, 0, r2, c2, c0, 0 @ into TTB 0
#endif
mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
isb
1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
isb isb
#ifdef CONFIG_ARM_ERRATA_754322 #ifdef CONFIG_ARM_ERRATA_754322
dsb dsb
#endif #endif
mcr p15, 0, r1, c13, c0, 1 @ set context ID mcr p15, 0, r1, c13, c0, 1 @ set context ID
isb isb
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
isb
#endif #endif
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_switch_mm) ENDPROC(cpu_v7_switch_mm)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment