Commit 2b5548b6 authored by Jun Yao's avatar Jun Yao Committed by Catalin Marinas

arm64/mm: Separate boot-time page tables from swapper_pg_dir

Since the address of swapper_pg_dir is fixed for a given kernel image,
it is an attractive target for manipulation via an arbitrary write. To
mitigate this we'd like to make it read-only by moving it into the
rodata section.

We require that swapper_pg_dir is at a fixed offset from tramp_pg_dir
and reserved_ttbr0, so these will also need to move into rodata.
However, swapper_pg_dir is allocated along with some transient page
tables used for boot which we do not want to move into rodata.

As a step towards this, this patch separates the boot-time page tables
into a new init_pg_dir, and reduces swapper_pg_dir to the single page it
needs to be. This allows us to retain the relationship between
swapper_pg_dir, tramp_pg_dir, and swapper_pg_dir, while cleanly
separating these from the boot-time page tables.

The init_pg_dir holds all of the pgd/pud/pmd/pte levels needed during
boot, and all of these levels will be freed when we switch to the
swapper_pg_dir, which is initialized by the existing code in
paging_init(). Since we start off on the init_pg_dir, we no longer need
to allocate a transient page table in paging_init() in order to ensure
that swapper_pg_dir isn't live while we initialize it.

There should be no functional change as a result of this patch.
Signed-off-by: default avatarJun Yao <yaojun8558363@gmail.com>
Reviewed-by: default avatarJames Morse <james.morse@arm.com>
[Mark: place init_pg_dir after BSS, fold mm changes, commit message]
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 693d5639
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
+ EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \ + EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \
+ EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \ + EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
+ EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */ + EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
#define SWAPPER_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR + TEXT_OFFSET, _end)) #define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR + TEXT_OFFSET, _end))
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN #ifdef CONFIG_ARM64_SW_TTBR0_PAN
......
...@@ -95,5 +95,8 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, ...@@ -95,5 +95,8 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
extern void *fixmap_remap_fdt(phys_addr_t dt_phys); extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
extern void mark_linear_text_alias_ro(void); extern void mark_linear_text_alias_ro(void);
#define INIT_MM_CONTEXT(name) \
.pgd = init_pg_dir,
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif #endif
...@@ -718,8 +718,9 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, ...@@ -718,8 +718,9 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
} }
#endif #endif
extern pgd_t init_pg_dir[PTRS_PER_PGD];
extern pgd_t init_pg_end[];
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t swapper_pg_end[];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
......
...@@ -291,7 +291,7 @@ __create_page_tables: ...@@ -291,7 +291,7 @@ __create_page_tables:
* dirty cache lines being evicted. * dirty cache lines being evicted.
*/ */
adrp x0, idmap_pg_dir adrp x0, idmap_pg_dir
adrp x1, swapper_pg_end adrp x1, init_pg_end
sub x1, x1, x0 sub x1, x1, x0
bl __inval_dcache_area bl __inval_dcache_area
...@@ -299,7 +299,7 @@ __create_page_tables: ...@@ -299,7 +299,7 @@ __create_page_tables:
* Clear the idmap and swapper page tables. * Clear the idmap and swapper page tables.
*/ */
adrp x0, idmap_pg_dir adrp x0, idmap_pg_dir
adrp x1, swapper_pg_end adrp x1, init_pg_end
sub x1, x1, x0 sub x1, x1, x0
1: stp xzr, xzr, [x0], #16 1: stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16
...@@ -373,7 +373,7 @@ __create_page_tables: ...@@ -373,7 +373,7 @@ __create_page_tables:
/* /*
* Map the kernel image (starting with PHYS_OFFSET). * Map the kernel image (starting with PHYS_OFFSET).
*/ */
adrp x0, swapper_pg_dir adrp x0, init_pg_dir
mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
add x5, x5, x23 // add KASLR displacement add x5, x5, x23 // add KASLR displacement
mov x4, PTRS_PER_PGD mov x4, PTRS_PER_PGD
...@@ -390,7 +390,7 @@ __create_page_tables: ...@@ -390,7 +390,7 @@ __create_page_tables:
* tables again to remove any speculatively loaded cache lines. * tables again to remove any speculatively loaded cache lines.
*/ */
adrp x0, idmap_pg_dir adrp x0, idmap_pg_dir
adrp x1, swapper_pg_end adrp x1, init_pg_end
sub x1, x1, x0 sub x1, x1, x0
dmb sy dmb sy
bl __inval_dcache_area bl __inval_dcache_area
...@@ -824,7 +824,7 @@ __primary_switch: ...@@ -824,7 +824,7 @@ __primary_switch:
mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
#endif #endif
adrp x1, swapper_pg_dir adrp x1, init_pg_dir
bl __enable_mmu bl __enable_mmu
#ifdef CONFIG_RELOCATABLE #ifdef CONFIG_RELOCATABLE
bl __relocate_kernel bl __relocate_kernel
......
...@@ -229,9 +229,13 @@ SECTIONS ...@@ -229,9 +229,13 @@ SECTIONS
. += RESERVED_TTBR0_SIZE; . += RESERVED_TTBR0_SIZE;
#endif #endif
swapper_pg_dir = .; swapper_pg_dir = .;
. += SWAPPER_DIR_SIZE; . += PAGE_SIZE;
swapper_pg_end = .; swapper_pg_end = .;
init_pg_dir = .;
. += INIT_DIR_SIZE;
init_pg_end = .;
__pecoff_data_size = ABSOLUTE(. - __initdata_begin); __pecoff_data_size = ABSOLUTE(. - __initdata_begin);
_end = .; _end = .;
......
...@@ -629,34 +629,14 @@ static void __init map_kernel(pgd_t *pgdp) ...@@ -629,34 +629,14 @@ static void __init map_kernel(pgd_t *pgdp)
*/ */
void __init paging_init(void) void __init paging_init(void)
{ {
phys_addr_t pgd_phys = early_pgtable_alloc(); map_kernel(swapper_pg_dir);
pgd_t *pgdp = pgd_set_fixmap(pgd_phys); map_mem(swapper_pg_dir);
map_kernel(pgdp);
map_mem(pgdp);
/*
* We want to reuse the original swapper_pg_dir so we don't have to
* communicate the new address to non-coherent secondaries in
* secondary_entry, and so cpu_switch_mm can generate the address with
* adrp+add rather than a load from some global variable.
*
* To do this we need to go via a temporary pgd.
*/
cpu_replace_ttbr1(__va(pgd_phys));
memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
init_mm.pgd = swapper_pg_dir;
pgd_clear_fixmap(); memblock_free(__pa_symbol(init_pg_dir),
memblock_free(pgd_phys, PAGE_SIZE); __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
/*
* We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
* allocated with it.
*/
memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
__pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir)
- PAGE_SIZE);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment