Commit 3df19111 authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'topic/kaslr-book3e32' into next

This is a slight rebase of Scott's next branch, which contained the
KASLR support for book3e 32-bit, to squash in a couple of small fixes.

See the	original pull request:
  https://lore.kernel.org/r/20191022232155.GA26174@home.buserror.net
parents 565f9bc0 c2d1a135
......@@ -19,6 +19,7 @@ powerpc
firmware-assisted-dump
hvcs
isa-versions
kaslr-booke32
mpc52xx
pci_iov_resource_on_powernv
pmu-ebb
......
.. SPDX-License-Identifier: GPL-2.0
===========================
KASLR for Freescale BookE32
===========================
The word KASLR stands for Kernel Address Space Layout Randomization.
This document tries to explain the implementation of the KASLR for
Freescale BookE32. KASLR is a security feature that deters exploit
attempts relying on knowledge of the location of kernel internals.
Since CONFIG_RELOCATABLE has already supported, what we need to do is
map or copy kernel to a proper place and relocate. Freescale Book-E
parts expect lowmem to be mapped by fixed TLB entries(TLB1). The TLB1
entries are not suitable to map the kernel directly in a randomized
region, so we chose to copy the kernel to a proper place and restart to
relocate.
Entropy is derived from the banner and timer base, which will change every
build and boot. This not so much safe so additionally the bootloader may
pass entropy via the /chosen/kaslr-seed node in device tree.
We will use the first 512M of the low memory to randomize the kernel
image. The memory will be split in 64M zones. We will use the lower 8
bit of the entropy to decide the index of the 64M zone. Then we chose a
16K aligned offset inside the 64M zone to put the kernel in::
KERNELBASE
|--> 64M <--|
| |
+---------------+ +----------------+---------------+
| |....| |kernel| | |
+---------------+ +----------------+---------------+
| |
|-----> offset <-----|
kernstart_virt_addr
To enable KASLR, set CONFIG_RANDOMIZE_BASE = y. If KASLR is enable and you
want to disable it at runtime, add "nokaslr" to the kernel cmdline.
......@@ -551,6 +551,17 @@ config RELOCATABLE
setting can still be useful to bootwrappers that need to know the
load address of the kernel (eg. u-boot/mkimage).
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
depends on (FSL_BOOKE && FLATMEM && PPC32)
depends on RELOCATABLE
help
Randomizes the virtual address at which the kernel image is
loaded, as a security feature that deters exploit attempts
relying on knowledge of the location of kernel internals.
If unsure, say Y.
config RELOCATABLE_TEST
bool "Test relocatable kernel"
depends on (PPC64 && RELOCATABLE)
......
......@@ -75,7 +75,6 @@
#define MAS2_E 0x00000001
#define MAS2_WIMGE_MASK 0x0000001f
#define MAS2_EPN_MASK(size) (~0 << (size + 10))
#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags))
#define MAS3_RPN 0xFFFFF000
#define MAS3_U0 0x00000200
......@@ -221,6 +220,16 @@
#define TLBILX_T_CLASS2 6
#define TLBILX_T_CLASS3 7
/*
* The mapping only needs to be cache-coherent on SMP, except on
* Freescale e500mc derivatives where it's also needed for coherent DMA.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
#define MAS2_M_IF_NEEDED MAS2_M
#else
#define MAS2_M_IF_NEEDED 0
#endif
#ifndef __ASSEMBLY__
#include <asm/bug.h>
......
......@@ -325,6 +325,13 @@ void arch_free_page(struct page *page, int order);
struct vm_area_struct;
extern unsigned long kernstart_virt_addr;
static inline unsigned long kaslr_offset(void)
{
return kernstart_virt_addr - KERNELBASE;
}
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
#include <asm/slice.h>
......
......@@ -19,9 +19,12 @@
*/
notrace unsigned long __init early_init(unsigned long dt_ptr)
{
unsigned long offset = reloc_offset();
unsigned long kva, offset = reloc_offset();
kva = *PTRRELOC(&kernstart_virt_addr);
/* First zero the BSS */
if (kva == KERNELBASE)
memset(PTRRELOC(&__bss_start), 0, __bss_stop - __bss_start);
/*
......@@ -32,5 +35,5 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
apply_feature_fixups();
return KERNELBASE + offset;
return kva + offset;
}
......@@ -1346,16 +1346,6 @@ skpinv: addi r6,r6,1 /* Increment */
sync
isync
/*
* The mapping only needs to be cache-coherent on SMP, except on
* Freescale e500mc derivatives where it's also needed for coherent DMA.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
#define M_IF_NEEDED MAS2_M
#else
#define M_IF_NEEDED 0
#endif
/* 6. Setup KERNELBASE mapping in TLB[0]
*
* r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
......@@ -1368,7 +1358,7 @@ skpinv: addi r6,r6,1 /* Increment */
ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
mtspr SPRN_MAS1,r6
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_NEEDED)
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED)
mtspr SPRN_MAS2,r6
rlwinm r5,r5,0,0,25
......
......@@ -153,35 +153,24 @@ skpinv: addi r6,r6,1 /* Increment */
tlbivax 0,r9
TLBSYNC
/*
* The mapping only needs to be cache-coherent on SMP, except on
* Freescale e500mc derivatives where it's also needed for coherent DMA.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
#define M_IF_NEEDED MAS2_M
#else
#define M_IF_NEEDED 0
#endif
#if defined(ENTRY_MAPPING_BOOT_SETUP)
/* 6. Setup KERNELBASE mapping in TLB1[0] */
/* 6. Setup kernstart_virt_addr mapping in TLB1[0] */
lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
mtspr SPRN_MAS0,r6
lis r6,(MAS1_VALID|MAS1_IPROT)@h
ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
mtspr SPRN_MAS1,r6
lis r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_NEEDED)@h
ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_NEEDED)@l
lis r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h
ori r6,r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l
and r6,r6,r20
ori r6,r6,MAS2_M_IF_NEEDED@l
mtspr SPRN_MAS2,r6
mtspr SPRN_MAS3,r8
tlbwe
/* 7. Jump to KERNELBASE mapping */
lis r6,(KERNELBASE & ~0xfff)@h
ori r6,r6,(KERNELBASE & ~0xfff)@l
rlwinm r7,r25,0,0x03ffffff
add r6,r7,r6
/* 7. Jump to kernstart_virt_addr mapping */
mr r6,r20
#elif defined(ENTRY_MAPPING_KEXEC_SETUP)
/*
......
......@@ -155,6 +155,8 @@ _ENTRY(_start);
*/
_ENTRY(__early_start)
LOAD_REG_ADDR_PIC(r20, kernstart_virt_addr)
lwz r20,0(r20)
#define ENTRY_MAPPING_BOOT_SETUP
#include "fsl_booke_entry_mapping.S"
......@@ -277,8 +279,8 @@ set_ivor:
ori r6, r6, swapper_pg_dir@l
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
lis r4, KERNELBASE@h
ori r4, r4, KERNELBASE@l
lis r3, kernstart_virt_addr@ha
lwz r4, kernstart_virt_addr@l(r3)
stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
stw r6, 0(r5)
......@@ -1067,7 +1069,12 @@ __secondary_start:
mr r5,r25 /* phys kernel start */
rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */
subf r4,r5,r4 /* memstart_addr - phys kernel start */
li r5,0 /* no device tree */
lis r7,KERNELBASE@h
ori r7,r7,KERNELBASE@l
cmpw r20,r7 /* if kernstart_virt_addr != KERNELBASE, randomized */
beq 2f
li r4,0
2: li r5,0 /* no device tree */
li r6,0 /* not boot cpu */
bl restore_to_as0
......@@ -1114,6 +1121,54 @@ __secondary_hold_acknowledge:
.long -1
#endif
/*
* Create a 64M tlb by address and entry
* r3 - entry
* r4 - virtual address
* r5/r6 - physical address
*/
_GLOBAL(create_kaslr_tlb_entry)
lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
mtspr SPRN_MAS0,r7 /* Write MAS0 */
lis r3,(MAS1_VALID|MAS1_IPROT)@h
ori r3,r3,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
mtspr SPRN_MAS1,r3 /* Write MAS1 */
lis r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h
ori r3,r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l
and r3,r3,r4
ori r3,r3,MAS2_M_IF_NEEDED@l
mtspr SPRN_MAS2,r3 /* Write MAS2(EPN) */
#ifdef CONFIG_PHYS_64BIT
ori r8,r6,(MAS3_SW|MAS3_SR|MAS3_SX)
mtspr SPRN_MAS3,r8 /* Write MAS3(RPN) */
mtspr SPRN_MAS7,r5
#else
ori r8,r5,(MAS3_SW|MAS3_SR|MAS3_SX)
mtspr SPRN_MAS3,r8 /* Write MAS3(RPN) */
#endif
tlbwe /* Write TLB */
isync
sync
blr
/*
* Return to the start of the relocated kernel and run again
* r3 - virtual address of fdt
* r4 - entry of the kernel
*/
_GLOBAL(reloc_kernel_entry)
mfmsr r7
rlwinm r7, r7, 0, ~(MSR_IS | MSR_DS)
mtspr SPRN_SRR0,r4
mtspr SPRN_SRR1,r7
rfi
/*
* Create a tlb entry with the same effective and physical address as
* the tlb entry used by the current running code. But set the TS to 1.
......
......@@ -86,6 +86,7 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
VMCOREINFO_OFFSET(mmu_psize_def, shift);
#endif
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
}
/*
......
......@@ -330,18 +330,13 @@ kexec_create_tlb:
rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */
/* Set up a temp identity mapping v:0 to p:0 and return to it. */
#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
#define M_IF_NEEDED MAS2_M
#else
#define M_IF_NEEDED 0
#endif
mtspr SPRN_MAS0,r9
lis r9,(MAS1_VALID|MAS1_IPROT)@h
ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
mtspr SPRN_MAS1,r9
LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_NEEDED)
LOAD_REG_IMMEDIATE(r9, 0x0 | MAS2_M_IF_NEEDED)
mtspr SPRN_MAS2,r9
LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
......
......@@ -715,8 +715,28 @@ static struct notifier_block ppc_panic_block = {
.priority = INT_MIN /* may not return; must be done last */
};
/*
* Dump out kernel offset information on panic.
*/
static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
void *p)
{
pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
kaslr_offset(), KERNELBASE);
return 0;
}
static struct notifier_block kernel_offset_notifier = {
.notifier_call = dump_kernel_offset
};
void __init setup_panic(void)
{
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
atomic_notifier_chain_register(&panic_notifier_list,
&kernel_offset_notifier);
/* PPC64 always does a hard irq disable in its panic handler */
if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
return;
......
......@@ -21,6 +21,13 @@
#include <asm/pgtable.h>
#include <asm/kup.h>
phys_addr_t memstart_addr __ro_after_init = (phys_addr_t)~0ull;
EXPORT_SYMBOL_GPL(memstart_addr);
phys_addr_t kernstart_addr __ro_after_init;
EXPORT_SYMBOL_GPL(kernstart_addr);
unsigned long kernstart_virt_addr __ro_after_init = KERNELBASE;
EXPORT_SYMBOL_GPL(kernstart_virt_addr);
static bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
static bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
......
......@@ -56,11 +56,6 @@
phys_addr_t total_memory;
phys_addr_t total_lowmem;
phys_addr_t memstart_addr = (phys_addr_t)~0ull;
EXPORT_SYMBOL(memstart_addr);
phys_addr_t kernstart_addr;
EXPORT_SYMBOL(kernstart_addr);
#ifdef CONFIG_RELOCATABLE
/* Used in __va()/__pa() */
long long virt_phys_offset;
......
......@@ -63,11 +63,6 @@
#include <mm/mmu_decl.h>
phys_addr_t memstart_addr = ~0;
EXPORT_SYMBOL_GPL(memstart_addr);
phys_addr_t kernstart_addr;
EXPORT_SYMBOL_GPL(kernstart_addr);
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* Given an address within the vmemmap, determine the page that
......
......@@ -139,10 +139,21 @@ extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
extern void adjust_total_lowmem(void);
extern int switch_to_as1(void);
extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu);
void create_kaslr_tlb_entry(int entry, unsigned long virt, phys_addr_t phys);
void reloc_kernel_entry(void *fdt, int addr);
extern int is_second_reloc;
#endif
extern void loadcam_entry(unsigned int index);
extern void loadcam_multi(int first_idx, int num, int tmp_idx);
#ifdef CONFIG_RANDOMIZE_BASE
void kaslr_early_init(void *dt_ptr, phys_addr_t size);
void kaslr_late_init(void);
#else
static inline void kaslr_early_init(void *dt_ptr, phys_addr_t size) {}
static inline void kaslr_late_init(void) {}
#endif
struct tlbcam {
u32 MAS0;
u32 MAS1;
......
......@@ -8,6 +8,7 @@ obj-$(CONFIG_40x) += 40x.o
obj-$(CONFIG_44x) += 44x.o
obj-$(CONFIG_PPC_8xx) += 8xx.o
obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_booke.o
ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_FSL_BOOK3E) += book3e_hugetlbpage.o
endif
......
......@@ -263,11 +263,13 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
int __initdata is_second_reloc;
notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
{
unsigned long base = KERNELBASE;
unsigned long base = kernstart_virt_addr;
phys_addr_t size;
kernstart_addr = start;
if (is_second_reloc) {
virt_phys_offset = PAGE_OFFSET - memstart_addr;
kaslr_late_init();
return;
}
......@@ -291,7 +293,7 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
start &= ~0x3ffffff;
base &= ~0x3ffffff;
virt_phys_offset = base - start;
early_get_first_memblock_info(__va(dt_ptr), NULL);
early_get_first_memblock_info(__va(dt_ptr), &size);
/*
* We now get the memstart_addr, then we should check if this
* address is the same as what the PAGE_OFFSET map to now. If
......@@ -316,6 +318,8 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
/* We should never reach here */
panic("Relocation error");
}
kaslr_early_init(__va(dt_ptr), size);
}
#endif
#endif
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment