Commit 5dfe9d7d authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Catalin Marinas

arm64: reduce ID map to a single page

Commit ea8c2e11 ("arm64: Extend the idmap to the whole kernel
image") changed the early page table code so that the entire kernel
Image is covered by the identity map. This allows functions that
need to enable or disable the MMU to reside anywhere in the kernel
Image.

However, this change has the unfortunate side effect that the Image
cannot cross a physical 512 MB alignment boundary anymore, since the
early page table code cannot deal with the Image crossing a /virtual/
512 MB alignment boundary.

So instead, reduce the ID map to a single page, that is populated by
the contents of the .idmap.text section. Only three functions reside
there at the moment: __enable_mmu(), cpu_resume_mmu() and cpu_reset().
If new code is introduced that needs to manipulate the MMU state, it
should be added to this section as well.
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Tested-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 61bd93ce
...@@ -361,7 +361,7 @@ __create_page_tables: ...@@ -361,7 +361,7 @@ __create_page_tables:
* Create the identity mapping. * Create the identity mapping.
*/ */
mov x0, x25 // idmap_pg_dir mov x0, x25 // idmap_pg_dir
adrp x3, KERNEL_START // __pa(KERNEL_START) adrp x3, __idmap_text_start // __pa(__idmap_text_start)
#ifndef CONFIG_ARM64_VA_BITS_48 #ifndef CONFIG_ARM64_VA_BITS_48
#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
...@@ -384,11 +384,11 @@ __create_page_tables: ...@@ -384,11 +384,11 @@ __create_page_tables:
/* /*
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
* entire kernel image can be ID mapped. As T0SZ == (64 - #bits used), * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
* this number conveniently equals the number of leading zeroes in * this number conveniently equals the number of leading zeroes in
* the physical address of KERNEL_END. * the physical address of __idmap_text_end.
*/ */
adrp x5, KERNEL_END adrp x5, __idmap_text_end
clz x5, x5 clz x5, x5
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
b.ge 1f // .. then skip additional level b.ge 1f // .. then skip additional level
...@@ -403,8 +403,8 @@ __create_page_tables: ...@@ -403,8 +403,8 @@ __create_page_tables:
#endif #endif
create_pgd_entry x0, x3, x5, x6 create_pgd_entry x0, x3, x5, x6
mov x5, x3 // __pa(KERNEL_START) mov x5, x3 // __pa(__idmap_text_start)
adr_l x6, KERNEL_END // __pa(KERNEL_END) adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
create_block_map x0, x7, x3, x5, x6 create_block_map x0, x7, x3, x5, x6
/* /*
...@@ -632,6 +632,7 @@ ENDPROC(__secondary_switched) ...@@ -632,6 +632,7 @@ ENDPROC(__secondary_switched)
* *
* other registers depend on the function called upon completion * other registers depend on the function called upon completion
*/ */
.section ".idmap.text", "ax"
__enable_mmu: __enable_mmu:
ldr x5, =vectors ldr x5, =vectors
msr vbar_el1, x5 msr vbar_el1, x5
......
...@@ -130,12 +130,14 @@ ENDPROC(__cpu_suspend_enter) ...@@ -130,12 +130,14 @@ ENDPROC(__cpu_suspend_enter)
/* /*
* x0 must contain the sctlr value retrieved from restored context * x0 must contain the sctlr value retrieved from restored context
*/ */
.pushsection ".idmap.text", "ax"
ENTRY(cpu_resume_mmu) ENTRY(cpu_resume_mmu)
ldr x3, =cpu_resume_after_mmu ldr x3, =cpu_resume_after_mmu
msr sctlr_el1, x0 // restore sctlr_el1 msr sctlr_el1, x0 // restore sctlr_el1
isb isb
br x3 // global jump to virtual address br x3 // global jump to virtual address
ENDPROC(cpu_resume_mmu) ENDPROC(cpu_resume_mmu)
.popsection
cpu_resume_after_mmu: cpu_resume_after_mmu:
mov x0, #0 // return zero on success mov x0, #0 // return zero on success
ldp x19, x20, [sp, #16] ldp x19, x20, [sp, #16]
......
...@@ -38,6 +38,12 @@ jiffies = jiffies_64; ...@@ -38,6 +38,12 @@ jiffies = jiffies_64;
*(.hyp.text) \ *(.hyp.text) \
VMLINUX_SYMBOL(__hyp_text_end) = .; VMLINUX_SYMBOL(__hyp_text_end) = .;
#define IDMAP_TEXT \
. = ALIGN(SZ_4K); \
VMLINUX_SYMBOL(__idmap_text_start) = .; \
*(.idmap.text) \
VMLINUX_SYMBOL(__idmap_text_end) = .;
/* /*
* The size of the PE/COFF section that covers the kernel image, which * The size of the PE/COFF section that covers the kernel image, which
* runs from stext to _edata, must be a round multiple of the PE/COFF * runs from stext to _edata, must be a round multiple of the PE/COFF
...@@ -95,6 +101,7 @@ SECTIONS ...@@ -95,6 +101,7 @@ SECTIONS
SCHED_TEXT SCHED_TEXT
LOCK_TEXT LOCK_TEXT
HYPERVISOR_TEXT HYPERVISOR_TEXT
IDMAP_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
. = ALIGN(16); . = ALIGN(16);
...@@ -167,11 +174,13 @@ SECTIONS ...@@ -167,11 +174,13 @@ SECTIONS
} }
/* /*
* The HYP init code can't be more than a page long, * The HYP init code and ID map text can't be longer than a page each,
* and should not cross a page boundary. * and should not cross a page boundary.
*/ */
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"HYP init code too big or misaligned") "HYP init code too big or misaligned")
ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"ID map text too big or misaligned")
/* /*
* If padding is applied before .head.text, virt<->phys conversions will fail. * If padding is applied before .head.text, virt<->phys conversions will fail.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment