Commit 8cc8a324 authored by Marc Zyngier's avatar Marc Zyngier Committed by Will Deacon

arm64: Turn the MMU-on sequence into a macro

Turning the MMU on is a popular sport in the arm64 kernel, and
we do it more than once, or even twice. As we are about to add
even more, let's turn it into a macro.

No expected functional change.
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarDavid Brazdil <dbrazdil@google.com>
Link: https://lore.kernel.org/r/20210208095732.3267263-4-maz@kernel.orgSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent b161f924
...@@ -675,6 +675,23 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU ...@@ -675,6 +675,23 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.endif .endif
.endm .endm
/*
* Set SCTLR_EL1 to the passed value, and invalidate the local icache
* in the process. This is called when setting the MMU on.
*/
.macro set_sctlr_el1, reg
msr sctlr_el1, \reg
isb
/*
* Invalidate the local I-cache so that any instructions fetched
* speculatively from the PoC are discarded, since they may have
* been dynamically patched at the PoU.
*/
ic iallu
dsb nsh
isb
.endm
/* /*
* Check whether to yield to another runnable task from kernel mode NEON code * Check whether to yield to another runnable task from kernel mode NEON code
* (which runs with preemption disabled). * (which runs with preemption disabled).
......
...@@ -703,16 +703,9 @@ SYM_FUNC_START(__enable_mmu) ...@@ -703,16 +703,9 @@ SYM_FUNC_START(__enable_mmu)
offset_ttbr1 x1, x3 offset_ttbr1 x1, x3
msr ttbr1_el1, x1 // load TTBR1 msr ttbr1_el1, x1 // load TTBR1
isb isb
msr sctlr_el1, x0
isb set_sctlr_el1 x0
/*
* Invalidate the local I-cache so that any instructions fetched
* speculatively from the PoC are discarded, since they may have
* been dynamically patched at the PoU.
*/
ic iallu
dsb nsh
isb
ret ret
SYM_FUNC_END(__enable_mmu) SYM_FUNC_END(__enable_mmu)
...@@ -883,11 +876,7 @@ SYM_FUNC_START_LOCAL(__primary_switch) ...@@ -883,11 +876,7 @@ SYM_FUNC_START_LOCAL(__primary_switch)
tlbi vmalle1 // Remove any stale TLB entries tlbi vmalle1 // Remove any stale TLB entries
dsb nsh dsb nsh
msr sctlr_el1, x19 // re-enable the MMU set_sctlr_el1 x19 // re-enable the MMU
isb
ic iallu // flush instructions fetched
dsb nsh // via old mapping
isb
bl __relocate_kernel bl __relocate_kernel
#endif #endif
......
...@@ -291,17 +291,7 @@ skip_pgd: ...@@ -291,17 +291,7 @@ skip_pgd:
/* We're done: fire up the MMU again */ /* We're done: fire up the MMU again */
mrs x17, sctlr_el1 mrs x17, sctlr_el1
orr x17, x17, #SCTLR_ELx_M orr x17, x17, #SCTLR_ELx_M
msr sctlr_el1, x17 set_sctlr_el1 x17
isb
/*
* Invalidate the local I-cache so that any instructions fetched
* speculatively from the PoC are discarded, since they may have
* been dynamically patched at the PoU.
*/
ic iallu
dsb nsh
isb
/* Set the flag to zero to indicate that we're all done */ /* Set the flag to zero to indicate that we're all done */
str wzr, [flag_ptr] str wzr, [flag_ptr]
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment