Commit 557b1970 authored by Vasily Gorbik's avatar Vasily Gorbik Committed by Heiko Carstens

s390/kasan: move shadow mapping to decompressor

Since regular paging structs are initialized in decompressor already
move KASAN shadow mapping to decompressor as well. This helps to avoid
allocating KASAN required memory in 1 large chunk, de-duplicate paging
structs creation code and start the uncompressed kernel with KASAN
instrumentation right away. This also allows to avoid all pitfalls
accidentally calling KASAN instrumented code during KASAN initialization.
Acked-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Reviewed-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent e4c31004
...@@ -32,6 +32,13 @@ struct vmlinux_info { ...@@ -32,6 +32,13 @@ struct vmlinux_info {
unsigned long init_mm_off; unsigned long init_mm_off;
unsigned long swapper_pg_dir_off; unsigned long swapper_pg_dir_off;
unsigned long invalid_pg_dir_off; unsigned long invalid_pg_dir_off;
#ifdef CONFIG_KASAN
unsigned long kasan_early_shadow_page_off;
unsigned long kasan_early_shadow_pte_off;
unsigned long kasan_early_shadow_pmd_off;
unsigned long kasan_early_shadow_pud_off;
unsigned long kasan_early_shadow_p4d_off;
#endif
}; };
void startup_kernel(void); void startup_kernel(void);
......
...@@ -266,6 +266,13 @@ static void offset_vmlinux_info(unsigned long offset) ...@@ -266,6 +266,13 @@ static void offset_vmlinux_info(unsigned long offset)
vmlinux.init_mm_off += offset; vmlinux.init_mm_off += offset;
vmlinux.swapper_pg_dir_off += offset; vmlinux.swapper_pg_dir_off += offset;
vmlinux.invalid_pg_dir_off += offset; vmlinux.invalid_pg_dir_off += offset;
#ifdef CONFIG_KASAN
vmlinux.kasan_early_shadow_page_off += offset;
vmlinux.kasan_early_shadow_pte_off += offset;
vmlinux.kasan_early_shadow_pmd_off += offset;
vmlinux.kasan_early_shadow_pud_off += offset;
vmlinux.kasan_early_shadow_p4d_off += offset;
#endif
} }
void startup_kernel(void) void startup_kernel(void)
...@@ -307,10 +314,6 @@ void startup_kernel(void) ...@@ -307,10 +314,6 @@ void startup_kernel(void)
detect_physmem_online_ranges(max_physmem_end); detect_physmem_online_ranges(max_physmem_end);
save_ipl_cert_comp_list(); save_ipl_cert_comp_list();
rescue_initrd(safe_addr, ident_map_size); rescue_initrd(safe_addr, ident_map_size);
#ifdef CONFIG_KASAN
physmem_alloc_top_down(RR_KASAN, kasan_estimate_memory_needs(get_physmem_usable_total()),
_SEGMENT_SIZE);
#endif
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) { if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
random_lma = get_random_base(); random_lma = get_random_base();
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/sched/task.h> #include <linux/sched/task.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/kasan.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/facility.h> #include <asm/facility.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -16,6 +17,182 @@ unsigned long __bootdata_preserved(s390_invalid_asce); ...@@ -16,6 +17,182 @@ unsigned long __bootdata_preserved(s390_invalid_asce);
#define swapper_pg_dir vmlinux.swapper_pg_dir_off #define swapper_pg_dir vmlinux.swapper_pg_dir_off
#define invalid_pg_dir vmlinux.invalid_pg_dir_off #define invalid_pg_dir vmlinux.invalid_pg_dir_off
enum populate_mode {
POPULATE_NONE,
POPULATE_ONE2ONE,
POPULATE_ABS_LOWCORE,
#ifdef CONFIG_KASAN
POPULATE_KASAN_MAP_SHADOW,
POPULATE_KASAN_ZERO_SHADOW,
POPULATE_KASAN_SHALLOW
#endif
};
static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
#ifdef CONFIG_KASAN
#define kasan_early_shadow_page vmlinux.kasan_early_shadow_page_off
#define kasan_early_shadow_pte ((pte_t *)vmlinux.kasan_early_shadow_pte_off)
#define kasan_early_shadow_pmd ((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
#define kasan_early_shadow_pud ((pud_t *)vmlinux.kasan_early_shadow_pud_off)
#define kasan_early_shadow_p4d ((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
static pte_t pte_z;
static void kasan_populate_shadow(void)
{
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
unsigned long untracked_end;
unsigned long start, end;
int i;
pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
if (!machine.has_nx)
pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
/*
* Current memory layout:
* +- 0 -------------+ +- shadow start -+
* |1:1 ident mapping| /|1/8 of ident map|
* | | / | |
* +-end of ident map+ / +----------------+
* | ... gap ... | / | kasan |
* | | / | zero page |
* +- vmalloc area -+ / | mapping |
* | vmalloc_size | / | (untracked) |
* +- modules vaddr -+ / +----------------+
* | 2Gb |/ | unmapped | allocated per module
* +- shadow start -+ +----------------+
* | 1/8 addr space | | zero pg mapping| (untracked)
* +- shadow end ----+---------+- shadow end ---+
*
* Current memory layout (KASAN_VMALLOC):
* +- 0 -------------+ +- shadow start -+
* |1:1 ident mapping| /|1/8 of ident map|
* | | / | |
* +-end of ident map+ / +----------------+
* | ... gap ... | / | kasan zero page| (untracked)
* | | / | mapping |
* +- vmalloc area -+ / +----------------+
* | vmalloc_size | / |shallow populate|
* +- modules vaddr -+ / +----------------+
* | 2Gb |/ |shallow populate|
* +- shadow start -+ +----------------+
* | 1/8 addr space | | zero pg mapping| (untracked)
* +- shadow end ----+---------+- shadow end ---+
*/
for_each_physmem_usable_range(i, &start, &end)
pgtable_populate(__sha(start), __sha(end), POPULATE_KASAN_MAP_SHADOW);
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_end = VMALLOC_START;
/* shallowly populate kasan shadow for vmalloc and modules */
pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END), POPULATE_KASAN_SHALLOW);
} else {
untracked_end = MODULES_VADDR;
}
/* populate kasan shadow for untracked memory */
pgtable_populate(__sha(ident_map_size), __sha(untracked_end), POPULATE_KASAN_ZERO_SHADOW);
pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE), POPULATE_KASAN_ZERO_SHADOW);
}
static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
if (mode == POPULATE_KASAN_ZERO_SHADOW &&
IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
return true;
}
return false;
}
static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
if (mode == POPULATE_KASAN_ZERO_SHADOW &&
IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
return true;
}
return false;
}
static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
if (mode == POPULATE_KASAN_ZERO_SHADOW &&
IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
return true;
}
return false;
}
static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
if (mode == POPULATE_KASAN_ZERO_SHADOW &&
IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
return true;
}
return false;
}
static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
{
pte_t entry;
if (mode == POPULATE_KASAN_ZERO_SHADOW) {
set_pte(pte, pte_z);
return true;
}
return false;
}
#else
static inline void kasan_populate_shadow(void) {}
static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
return false;
}
static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
return false;
}
static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
return false;
}
static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
unsigned long end, enum populate_mode mode)
{
return false;
}
static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
{
return false;
}
#endif
/* /*
* Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though. * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
*/ */
...@@ -24,12 +201,6 @@ static inline pte_t *__virt_to_kpte(unsigned long va) ...@@ -24,12 +201,6 @@ static inline pte_t *__virt_to_kpte(unsigned long va)
return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va); return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
} }
enum populate_mode {
POPULATE_NONE,
POPULATE_ONE2ONE,
POPULATE_ABS_LOWCORE,
};
static void *boot_crst_alloc(unsigned long val) static void *boot_crst_alloc(unsigned long val)
{ {
unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER; unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
...@@ -42,14 +213,26 @@ static void *boot_crst_alloc(unsigned long val) ...@@ -42,14 +213,26 @@ static void *boot_crst_alloc(unsigned long val)
static pte_t *boot_pte_alloc(void) static pte_t *boot_pte_alloc(void)
{ {
static void *pte_leftover;
pte_t *pte; pte_t *pte;
pte = (pte_t *)physmem_alloc_top_down(RR_VMEM, _PAGE_TABLE_SIZE, _PAGE_TABLE_SIZE); /*
* handling pte_leftovers this way helps to avoid memory fragmentation
* during POPULATE_KASAN_MAP_SHADOW when EDAT is off
*/
if (!pte_leftover) {
pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
pte = pte_leftover + _PAGE_TABLE_SIZE;
} else {
pte = pte_leftover;
pte_leftover = NULL;
}
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte; return pte;
} }
static unsigned long _pa(unsigned long addr, enum populate_mode mode) static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
{ {
switch (mode) { switch (mode) {
case POPULATE_NONE: case POPULATE_NONE:
...@@ -58,6 +241,12 @@ static unsigned long _pa(unsigned long addr, enum populate_mode mode) ...@@ -58,6 +241,12 @@ static unsigned long _pa(unsigned long addr, enum populate_mode mode)
return addr; return addr;
case POPULATE_ABS_LOWCORE: case POPULATE_ABS_LOWCORE:
return __abs_lowcore_pa(addr); return __abs_lowcore_pa(addr);
#ifdef CONFIG_KASAN
case POPULATE_KASAN_MAP_SHADOW:
addr = physmem_alloc_top_down(RR_VMEM, size, size);
memset((void *)addr, 0, size);
return addr;
#endif
default: default:
return -1; return -1;
} }
...@@ -83,7 +272,9 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e ...@@ -83,7 +272,9 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
pte = pte_offset_kernel(pmd, addr); pte = pte_offset_kernel(pmd, addr);
for (; addr < end; addr += PAGE_SIZE, pte++) { for (; addr < end; addr += PAGE_SIZE, pte++) {
if (pte_none(*pte)) { if (pte_none(*pte)) {
entry = __pte(_pa(addr, mode)); if (kasan_pte_populate_zero_shadow(pte, mode))
continue;
entry = __pte(_pa(addr, PAGE_SIZE, mode));
entry = set_pte_bit(entry, PAGE_KERNEL_EXEC); entry = set_pte_bit(entry, PAGE_KERNEL_EXEC);
set_pte(pte, entry); set_pte(pte, entry);
} }
...@@ -101,8 +292,10 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e ...@@ -101,8 +292,10 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
for (; addr < end; addr = next, pmd++) { for (; addr < end; addr = next, pmd++) {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
continue;
if (can_large_pmd(pmd, addr, next)) { if (can_large_pmd(pmd, addr, next)) {
entry = __pmd(_pa(addr, mode)); entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
entry = set_pmd_bit(entry, SEGMENT_KERNEL_EXEC); entry = set_pmd_bit(entry, SEGMENT_KERNEL_EXEC);
set_pmd(pmd, entry); set_pmd(pmd, entry);
continue; continue;
...@@ -127,8 +320,10 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e ...@@ -127,8 +320,10 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
for (; addr < end; addr = next, pud++) { for (; addr < end; addr = next, pud++) {
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (pud_none(*pud)) { if (pud_none(*pud)) {
if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
continue;
if (can_large_pud(pud, addr, next)) { if (can_large_pud(pud, addr, next)) {
entry = __pud(_pa(addr, mode)); entry = __pud(_pa(addr, _REGION3_SIZE, mode));
entry = set_pud_bit(entry, REGION3_KERNEL_EXEC); entry = set_pud_bit(entry, REGION3_KERNEL_EXEC);
set_pud(pud, entry); set_pud(pud, entry);
continue; continue;
...@@ -153,6 +348,8 @@ static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long e ...@@ -153,6 +348,8 @@ static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long e
for (; addr < end; addr = next, p4d++) { for (; addr < end; addr = next, p4d++) {
next = p4d_addr_end(addr, end); next = p4d_addr_end(addr, end);
if (p4d_none(*p4d)) { if (p4d_none(*p4d)) {
if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
continue;
pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY); pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
p4d_populate(&init_mm, p4d, pud); p4d_populate(&init_mm, p4d, pud);
} }
...@@ -170,9 +367,15 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat ...@@ -170,9 +367,15 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
for (; addr < end; addr = next, pgd++) { for (; addr < end; addr = next, pgd++) {
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
if (pgd_none(*pgd)) { if (pgd_none(*pgd)) {
if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
continue;
p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY); p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
pgd_populate(&init_mm, pgd, p4d); pgd_populate(&init_mm, pgd, p4d);
} }
#ifdef CONFIG_KASAN
if (mode == POPULATE_KASAN_SHALLOW)
continue;
#endif
pgtable_p4d_populate(pgd, addr, next, mode); pgtable_p4d_populate(pgd, addr, next, mode);
} }
} }
...@@ -210,6 +413,8 @@ void setup_vmem(unsigned long asce_limit) ...@@ -210,6 +413,8 @@ void setup_vmem(unsigned long asce_limit)
POPULATE_NONE); POPULATE_NONE);
memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area); memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
kasan_populate_shadow();
S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits; S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
S390_lowcore.user_asce = s390_invalid_asce; S390_lowcore.user_asce = s390_invalid_asce;
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef __ASM_KASAN_H #ifndef __ASM_KASAN_H
#define __ASM_KASAN_H #define __ASM_KASAN_H
#include <asm/pgtable.h> #include <linux/const.h>
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
...@@ -13,35 +13,6 @@ ...@@ -13,35 +13,6 @@
#define KASAN_SHADOW_START KASAN_SHADOW_OFFSET #define KASAN_SHADOW_START KASAN_SHADOW_OFFSET
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) #define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
extern void kasan_early_init(void);
/*
* Estimate kasan memory requirements, which it will reserve
* at the very end of available physical memory. To estimate
* that, we take into account that kasan would require
* 1/8 of available physical memory (for shadow memory) +
* creating page tables for the shadow memory region.
* To keep page tables estimates simple take the double of
* combined ptes size.
*
* physmem parameter has to be already adjusted if not entire physical memory
* would be used (e.g. due to effect of "mem=" option).
*/
static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem)
{
unsigned long kasan_needs;
unsigned long pages;
/* for shadow memory */
kasan_needs = round_up(physmem / 8, PAGE_SIZE);
/* for paging structures */
pages = DIV_ROUND_UP(kasan_needs, PAGE_SIZE);
kasan_needs += DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
return kasan_needs;
}
#else
static inline void kasan_early_init(void) { }
static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem) { return 0; }
#endif #endif
#endif #endif
...@@ -26,9 +26,6 @@ enum reserved_range_type { ...@@ -26,9 +26,6 @@ enum reserved_range_type {
RR_CERT_COMP_LIST, RR_CERT_COMP_LIST,
RR_MEM_DETECT_EXTENDED, RR_MEM_DETECT_EXTENDED,
RR_VMEM, RR_VMEM,
#ifdef CONFIG_KASAN
RR_KASAN,
#endif
RR_MAX RR_MAX
}; };
...@@ -129,9 +126,6 @@ static inline const char *get_rr_type_name(enum reserved_range_type t) ...@@ -129,9 +126,6 @@ static inline const char *get_rr_type_name(enum reserved_range_type t)
RR_TYPE_NAME(CERT_COMP_LIST); RR_TYPE_NAME(CERT_COMP_LIST);
RR_TYPE_NAME(MEM_DETECT_EXTENDED); RR_TYPE_NAME(MEM_DETECT_EXTENDED);
RR_TYPE_NAME(VMEM); RR_TYPE_NAME(VMEM);
#ifdef CONFIG_KASAN
RR_TYPE_NAME(KASAN);
#endif
default: default:
return "UNKNOWN"; return "UNKNOWN";
} }
...@@ -166,17 +160,6 @@ static inline struct reserved_range *__physmem_reserved_next(enum reserved_range ...@@ -166,17 +160,6 @@ static inline struct reserved_range *__physmem_reserved_next(enum reserved_range
range; range = __physmem_reserved_next(&t, range), \ range; range = __physmem_reserved_next(&t, range), \
*p_start = range ? range->start : 0, *p_end = range ? range->end : 0) *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
static inline unsigned long get_physmem_usable_total(void)
{
unsigned long start, end, total = 0;
int i;
for_each_physmem_usable_range(i, &start, &end)
total += end - start;
return total;
}
static inline unsigned long get_physmem_reserved(enum reserved_range_type type, static inline unsigned long get_physmem_reserved(enum reserved_range_type type,
unsigned long *addr, unsigned long *size) unsigned long *addr, unsigned long *size)
{ {
......
...@@ -51,6 +51,14 @@ decompressor_handled_param(nokaslr); ...@@ -51,6 +51,14 @@ decompressor_handled_param(nokaslr);
decompressor_handled_param(prot_virt); decompressor_handled_param(prot_virt);
#endif #endif
static void __init kasan_early_init(void)
{
#ifdef CONFIG_KASAN
init_task.kasan_depth = 0;
sclp_early_printk("KernelAddressSanitizer initialized\n");
#endif
}
static void __init reset_tod_clock(void) static void __init reset_tod_clock(void)
{ {
union tod_clock clk; union tod_clock clk;
...@@ -293,6 +301,7 @@ static void __init sort_amode31_extable(void) ...@@ -293,6 +301,7 @@ static void __init sort_amode31_extable(void)
void __init startup_init(void) void __init startup_init(void)
{ {
kasan_early_init();
reset_tod_clock(); reset_tod_clock();
time_early_init(); time_early_init();
init_kernel_storage_key(); init_kernel_storage_key();
......
...@@ -26,9 +26,6 @@ ENTRY(startup_continue) ...@@ -26,9 +26,6 @@ ENTRY(startup_continue)
stg %r14,__LC_CURRENT stg %r14,__LC_CURRENT
larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD-__PT_SIZE larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD-__PT_SIZE
brasl %r14,sclp_early_adjust_va # allow sclp_early_printk brasl %r14,sclp_early_adjust_va # allow sclp_early_printk
#ifdef CONFIG_KASAN
brasl %r14,kasan_early_init
#endif
brasl %r14,startup_init # s390 specific early init brasl %r14,startup_init # s390 specific early init
brasl %r14,start_kernel # common init code brasl %r14,start_kernel # common init code
# #
......
...@@ -219,6 +219,13 @@ SECTIONS ...@@ -219,6 +219,13 @@ SECTIONS
QUAD(init_mm) QUAD(init_mm)
QUAD(swapper_pg_dir) QUAD(swapper_pg_dir)
QUAD(invalid_pg_dir) QUAD(invalid_pg_dir)
#ifdef CONFIG_KASAN
QUAD(kasan_early_shadow_page)
QUAD(kasan_early_shadow_pte)
QUAD(kasan_early_shadow_pmd)
QUAD(kasan_early_shadow_pud)
QUAD(kasan_early_shadow_p4d)
#endif
} :NONE } :NONE
/* Debugging sections. */ /* Debugging sections. */
......
...@@ -10,6 +10,3 @@ obj-$(CONFIG_CMM) += cmm.o ...@@ -10,6 +10,3 @@ obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o
obj-$(CONFIG_PGSTE) += gmap.o obj-$(CONFIG_PGSTE) += gmap.o
KASAN_SANITIZE_kasan_init.o := n
obj-$(CONFIG_KASAN) += kasan_init.o
// SPDX-License-Identifier: GPL-2.0
#include <linux/memblock.h>
#include <linux/pgtable.h>
#include <linux/kasan.h>
#include <asm/physmem_info.h>
#include <asm/processor.h>
#include <asm/facility.h>
#include <asm/pgalloc.h>
#include <asm/sclp.h>
static unsigned long pgalloc_pos __initdata;
static unsigned long segment_pos __initdata;
static bool has_edat __initdata;
static bool has_nx __initdata;
#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
static void __init kasan_early_panic(const char *reason)
{
sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
sclp_early_printk(reason);
disabled_wait();
}
static void * __init kasan_early_alloc_segment(void)
{
unsigned long addr = segment_pos;
segment_pos += _SEGMENT_SIZE;
if (segment_pos > pgalloc_pos)
kasan_early_panic("out of memory during initialisation\n");
return __va(addr);
}
static void * __init kasan_early_alloc_pages(unsigned int order)
{
pgalloc_pos -= (PAGE_SIZE << order);
if (segment_pos > pgalloc_pos)
kasan_early_panic("out of memory during initialisation\n");
return __va(pgalloc_pos);
}
static void * __init kasan_early_crst_alloc(unsigned long val)
{
unsigned long *table;
table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
if (table)
crst_table_init(table, val);
return table;
}
static pte_t * __init kasan_early_pte_alloc(void)
{
static void *pte_leftover;
pte_t *pte;
BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
if (!pte_leftover) {
pte_leftover = kasan_early_alloc_pages(0);
pte = pte_leftover + _PAGE_TABLE_SIZE;
} else {
pte = pte_leftover;
pte_leftover = NULL;
}
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte;
}
enum populate_mode {
POPULATE_MAP,
POPULATE_ZERO_SHADOW,
POPULATE_SHALLOW
};
static inline pgprot_t pgprot_clear_bit(pgprot_t pgprot, unsigned long bit)
{
return __pgprot(pgprot_val(pgprot) & ~bit);
}
static void __init kasan_early_pgtable_populate(unsigned long address,
unsigned long end,
enum populate_mode mode)
{
pgprot_t pgt_prot_zero = PAGE_KERNEL_RO;
pgprot_t pgt_prot = PAGE_KERNEL;
pgprot_t sgt_prot = SEGMENT_KERNEL;
pgd_t *pg_dir;
p4d_t *p4_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pmd_t pmd;
pte_t pte;
if (!has_nx) {
pgt_prot_zero = pgprot_clear_bit(pgt_prot_zero, _PAGE_NOEXEC);
pgt_prot = pgprot_clear_bit(pgt_prot, _PAGE_NOEXEC);
sgt_prot = pgprot_clear_bit(sgt_prot, _SEGMENT_ENTRY_NOEXEC);
}
while (address < end) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, PGDIR_SIZE) &&
end - address >= PGDIR_SIZE) {
pgd_populate(&init_mm, pg_dir,
kasan_early_shadow_p4d);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
continue;
}
p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
pgd_populate(&init_mm, pg_dir, p4_dir);
}
if (mode == POPULATE_SHALLOW) {
address = (address + P4D_SIZE) & P4D_MASK;
continue;
}
p4_dir = p4d_offset(pg_dir, address);
if (p4d_none(*p4_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, P4D_SIZE) &&
end - address >= P4D_SIZE) {
p4d_populate(&init_mm, p4_dir,
kasan_early_shadow_pud);
address = (address + P4D_SIZE) & P4D_MASK;
continue;
}
pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
p4d_populate(&init_mm, p4_dir, pu_dir);
}
pu_dir = pud_offset(p4_dir, address);
if (pud_none(*pu_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, PUD_SIZE) &&
end - address >= PUD_SIZE) {
pud_populate(&init_mm, pu_dir,
kasan_early_shadow_pmd);
address = (address + PUD_SIZE) & PUD_MASK;
continue;
}
pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
pud_populate(&init_mm, pu_dir, pm_dir);
}
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
if (IS_ALIGNED(address, PMD_SIZE) &&
end - address >= PMD_SIZE) {
if (mode == POPULATE_ZERO_SHADOW) {
pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
address = (address + PMD_SIZE) & PMD_MASK;
continue;
} else if (has_edat) {
void *page = kasan_early_alloc_segment();
memset(page, 0, _SEGMENT_SIZE);
pmd = __pmd(__pa(page));
pmd = set_pmd_bit(pmd, sgt_prot);
set_pmd(pm_dir, pmd);
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
}
pt_dir = kasan_early_pte_alloc();
pmd_populate(&init_mm, pm_dir, pt_dir);
} else if (pmd_large(*pm_dir)) {
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
pt_dir = pte_offset_kernel(pm_dir, address);
if (pte_none(*pt_dir)) {
void *page;
switch (mode) {
case POPULATE_MAP:
page = kasan_early_alloc_pages(0);
memset(page, 0, PAGE_SIZE);
pte = __pte(__pa(page));
pte = set_pte_bit(pte, pgt_prot);
set_pte(pt_dir, pte);
break;
case POPULATE_ZERO_SHADOW:
page = kasan_early_shadow_page;
pte = __pte(__pa(page));
pte = set_pte_bit(pte, pgt_prot_zero);
set_pte(pt_dir, pte);
break;
case POPULATE_SHALLOW:
/* should never happen */
break;
}
}
address += PAGE_SIZE;
}
}
static void __init kasan_early_detect_facilities(void)
{
if (test_facility(8)) {
has_edat = true;
__ctl_set_bit(0, 23);
}
if (!noexec_disabled && test_facility(130)) {
has_nx = true;
__ctl_set_bit(0, 20);
}
}
void __init kasan_early_init(void)
{
pte_t pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
unsigned long pgalloc_pos_initial, segment_pos_initial;
unsigned long untracked_end = MODULES_VADDR;
unsigned long start, end;
int i;
kasan_early_detect_facilities();
if (!has_nx)
pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
/* init kasan zero shadow */
crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
/* segment allocations go bottom up -> <- pgalloc go top down */
segment_pos_initial = physmem_info.reserved[RR_KASAN].start;
segment_pos = segment_pos_initial;
pgalloc_pos_initial = physmem_info.reserved[RR_KASAN].end;
pgalloc_pos = pgalloc_pos_initial;
/*
* Current memory layout:
* +- 0 -------------+ +- shadow start -+
* |1:1 ident mapping| /|1/8 of ident map|
* | | / | |
* +-end of ident map+ / +----------------+
* | ... gap ... | / | kasan |
* | | / | zero page |
* +- vmalloc area -+ / | mapping |
* | vmalloc_size | / | (untracked) |
* +- modules vaddr -+ / +----------------+
* | 2Gb |/ | unmapped | allocated per module
* +- shadow start -+ +----------------+
* | 1/8 addr space | | zero pg mapping| (untracked)
* +- shadow end ----+---------+- shadow end ---+
*
* Current memory layout (KASAN_VMALLOC):
* +- 0 -------------+ +- shadow start -+
* |1:1 ident mapping| /|1/8 of ident map|
* | | / | |
* +-end of ident map+ / +----------------+
* | ... gap ... | / | kasan zero page| (untracked)
* | | / | mapping |
* +- vmalloc area -+ / +----------------+
* | vmalloc_size | / |shallow populate|
* +- modules vaddr -+ / +----------------+
* | 2Gb |/ |shallow populate|
* +- shadow start -+ +----------------+
* | 1/8 addr space | | zero pg mapping| (untracked)
* +- shadow end ----+---------+- shadow end ---+
*/
/* populate kasan shadow (for identity mapping and zero page mapping) */
for_each_physmem_usable_range(i, &start, &end)
kasan_early_pgtable_populate(__sha(start), __sha(end), POPULATE_MAP);
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_end = VMALLOC_START;
/* shallowly populate kasan shadow for vmalloc and modules */
kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
POPULATE_SHALLOW);
}
/* populate kasan shadow for untracked memory */
kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_end),
POPULATE_ZERO_SHADOW);
kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
POPULATE_ZERO_SHADOW);
/* enable kasan */
init_task.kasan_depth = 0;
sclp_early_printk("KernelAddressSanitizer initialized\n");
memblock_reserve(segment_pos_initial, segment_pos - segment_pos_initial);
memblock_reserve(pgalloc_pos, pgalloc_pos_initial - pgalloc_pos);
}
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/memory_hotplug.h> #include <linux/memory_hotplug.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/kasan.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -664,6 +665,9 @@ static void __init memblock_region_swap(void *a, void *b, int size) ...@@ -664,6 +665,9 @@ static void __init memblock_region_swap(void *a, void *b, int size)
swap(*(struct memblock_region *)a, *(struct memblock_region *)b); swap(*(struct memblock_region *)a, *(struct memblock_region *)b);
} }
#ifdef CONFIG_KASAN
#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
#endif
/* /*
* map whole physical memory to virtual memory (identity mapping) * map whole physical memory to virtual memory (identity mapping)
* we reserve enough space in the vmalloc area for vmemmap to hotplug * we reserve enough space in the vmalloc area for vmemmap to hotplug
...@@ -733,6 +737,13 @@ void __init vmem_map_init(void) ...@@ -733,6 +737,13 @@ void __init vmem_map_init(void)
SET_MEMORY_RW | SET_MEMORY_NX); SET_MEMORY_RW | SET_MEMORY_NX);
} }
#ifdef CONFIG_KASAN
for_each_mem_range(i, &base, &end)
__set_memory(__sha(base),
(__sha(end) - __sha(base)) >> PAGE_SHIFT,
SET_MEMORY_RW | SET_MEMORY_NX);
#endif
__set_memory((unsigned long)_stext, __set_memory((unsigned long)_stext,
(unsigned long)(_etext - _stext) >> PAGE_SHIFT, (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X); SET_MEMORY_RO | SET_MEMORY_X);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment