Commit 18b9c0d6 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Will Deacon

arm64: don't map TEXT_OFFSET bytes below the kernel if we can avoid it

For historical reasons, the kernel Image must be loaded into physical
memory at a 512 KB offset above a 2 MB aligned base address. The region
between the base address and the start of the kernel Image has no
significance to the kernel itself, but it is currently mapped explicitly
into the early kernel VMA range for all translation granules.

In some cases (i.e., 4 KB granule), this is unavoidable, due to the 2 MB
granularity of the early kernel mappings. However, in other cases, e.g.,
when running with larger page sizes, or in the future, with more granular
KASLR, there is no reason to map it explicitly like we do currently.

So update the logic so that the region is mapped only if that happens as
a side effect of rounding the start address of the kernel to swapper block
size, and leave it unmapped otherwise.

Since the symbol kernel_img_size now simply resolves to the memory
footprint of the kernel Image, we can drop its definition from image.h
and opencode its calculation.
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent b03cc885
...@@ -393,12 +393,13 @@ __create_page_tables: ...@@ -393,12 +393,13 @@ __create_page_tables:
* Map the kernel image (starting with PHYS_OFFSET). * Map the kernel image (starting with PHYS_OFFSET).
*/ */
mov x0, x26 // swapper_pg_dir mov x0, x26 // swapper_pg_dir
mov_q x5, KIMAGE_VADDR mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
add x5, x5, x23 // add KASLR displacement add x5, x5, x23 // add KASLR displacement
create_pgd_entry x0, x5, x3, x6 create_pgd_entry x0, x5, x3, x6
ldr w6, =kernel_img_size adrp x6, _end // runtime __pa(_end)
add x6, x6, x5 adrp x3, _text // runtime __pa(_text)
mov x3, x24 // phys offset sub x6, x6, x3 // _end - _text
add x6, x6, x5 // runtime __va(_end)
create_block_map x0, x7, x3, x5, x6 create_block_map x0, x7, x3, x5, x6
/* /*
......
...@@ -71,8 +71,6 @@ ...@@ -71,8 +71,6 @@
DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \ DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \
DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS); DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
kernel_img_size = _end - (_text - TEXT_OFFSET);
#ifdef CONFIG_EFI #ifdef CONFIG_EFI
__efistub_stext_offset = stext - _text; __efistub_stext_offset = stext - _text;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment