kasan_init.c 8.72 KB
Newer Older
1 2 3
// SPDX-License-Identifier: GPL-2.0
#include <linux/kasan.h>
#include <linux/sched/task.h>
4
#include <linux/pgtable.h>
5
#include <asm/pgalloc.h>
6
#include <asm/kasan.h>
7
#include <asm/mem_detect.h>
8 9
#include <asm/processor.h>
#include <asm/sclp.h>
10
#include <asm/facility.h>
11 12
#include <asm/sections.h>
#include <asm/setup.h>
13
#include <asm/uv.h>
14

15 16 17 18
static unsigned long segment_pos __initdata;
static unsigned long segment_low __initdata;
static bool has_edat __initdata;
static bool has_nx __initdata;
19 20 21 22 23 24 25

#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))

static void __init kasan_early_panic(const char *reason)
{
	sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
	sclp_early_printk(reason);
26
	disabled_wait();
27 28
}

29 30 31 32 33 34 35
static void * __init kasan_early_alloc_segment(void)
{
	segment_pos -= _SEGMENT_SIZE;

	if (segment_pos < segment_low)
		kasan_early_panic("out of memory during initialisation\n");

36
	return __va(segment_pos);
37 38
}

39 40 41 42 43 44 45
static void * __init kasan_early_alloc_pages(unsigned int order)
{
	pgalloc_pos -= (PAGE_SIZE << order);

	if (pgalloc_pos < pgalloc_low)
		kasan_early_panic("out of memory during initialisation\n");

46
	return __va(pgalloc_pos);
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
}

static void * __init kasan_early_crst_alloc(unsigned long val)
{
	unsigned long *table;

	table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
	if (table)
		crst_table_init(table, val);
	return table;
}

static pte_t * __init kasan_early_pte_alloc(void)
{
	static void *pte_leftover;
	pte_t *pte;

	BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);

	if (!pte_leftover) {
		pte_leftover = kasan_early_alloc_pages(0);
		pte = pte_leftover + _PAGE_TABLE_SIZE;
	} else {
		pte = pte_leftover;
		pte_leftover = NULL;
	}
	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
	return pte;
}

enum populate_mode {
	POPULATE_MAP,
79 80
	POPULATE_ZERO_SHADOW,
	POPULATE_SHALLOW
81
};
82 83 84 85 86 87

static inline pgprot_t pgprot_clear_bit(pgprot_t pgprot, unsigned long bit)
{
	return __pgprot(pgprot_val(pgprot) & ~bit);
}

88
static void __init kasan_early_pgtable_populate(unsigned long address,
89 90 91
						unsigned long end,
						enum populate_mode mode)
{
92 93 94
	pgprot_t pgt_prot_zero = PAGE_KERNEL_RO;
	pgprot_t pgt_prot = PAGE_KERNEL;
	pgprot_t sgt_prot = SEGMENT_KERNEL;
95 96 97 98 99
	pgd_t *pg_dir;
	p4d_t *p4_dir;
	pud_t *pu_dir;
	pmd_t *pm_dir;
	pte_t *pt_dir;
100 101
	pmd_t pmd;
	pte_t pte;
102

103
	if (!has_nx) {
104 105 106
		pgt_prot_zero = pgprot_clear_bit(pgt_prot_zero, _PAGE_NOEXEC);
		pgt_prot = pgprot_clear_bit(pgt_prot, _PAGE_NOEXEC);
		sgt_prot = pgprot_clear_bit(sgt_prot, _SEGMENT_ENTRY_NOEXEC);
107
	}
108 109 110 111 112 113 114

	while (address < end) {
		pg_dir = pgd_offset_k(address);
		if (pgd_none(*pg_dir)) {
			if (mode == POPULATE_ZERO_SHADOW &&
			    IS_ALIGNED(address, PGDIR_SIZE) &&
			    end - address >= PGDIR_SIZE) {
115 116
				pgd_populate(&init_mm, pg_dir,
						kasan_early_shadow_p4d);
117 118 119 120 121 122 123
				address = (address + PGDIR_SIZE) & PGDIR_MASK;
				continue;
			}
			p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
			pgd_populate(&init_mm, pg_dir, p4_dir);
		}

124
		if (mode == POPULATE_SHALLOW) {
125 126 127 128
			address = (address + P4D_SIZE) & P4D_MASK;
			continue;
		}

129 130 131 132 133
		p4_dir = p4d_offset(pg_dir, address);
		if (p4d_none(*p4_dir)) {
			if (mode == POPULATE_ZERO_SHADOW &&
			    IS_ALIGNED(address, P4D_SIZE) &&
			    end - address >= P4D_SIZE) {
134 135
				p4d_populate(&init_mm, p4_dir,
						kasan_early_shadow_pud);
136 137 138 139 140 141 142 143 144 145 146 147
				address = (address + P4D_SIZE) & P4D_MASK;
				continue;
			}
			pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
			p4d_populate(&init_mm, p4_dir, pu_dir);
		}

		pu_dir = pud_offset(p4_dir, address);
		if (pud_none(*pu_dir)) {
			if (mode == POPULATE_ZERO_SHADOW &&
			    IS_ALIGNED(address, PUD_SIZE) &&
			    end - address >= PUD_SIZE) {
148 149
				pud_populate(&init_mm, pu_dir,
						kasan_early_shadow_pmd);
150 151 152 153 154 155 156 157 158
				address = (address + PUD_SIZE) & PUD_MASK;
				continue;
			}
			pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
			pud_populate(&init_mm, pu_dir, pm_dir);
		}

		pm_dir = pmd_offset(pu_dir, address);
		if (pmd_none(*pm_dir)) {
159
			if (IS_ALIGNED(address, PMD_SIZE) &&
160
			    end - address >= PMD_SIZE) {
161 162 163 164
				if (mode == POPULATE_ZERO_SHADOW) {
					pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
					address = (address + PMD_SIZE) & PMD_MASK;
					continue;
165 166
				} else if (has_edat) {
					void *page = kasan_early_alloc_segment();
167

168
					memset(page, 0, _SEGMENT_SIZE);
169 170 171
					pmd = __pmd(__pa(page));
					pmd = set_pmd_bit(pmd, sgt_prot);
					set_pmd(pm_dir, pmd);
172 173
					address = (address + PMD_SIZE) & PMD_MASK;
					continue;
174 175
				}
			}
176 177
			pt_dir = kasan_early_pte_alloc();
			pmd_populate(&init_mm, pm_dir, pt_dir);
178 179 180
		} else if (pmd_large(*pm_dir)) {
			address = (address + PMD_SIZE) & PMD_MASK;
			continue;
181 182 183 184 185 186 187 188 189 190
		}

		pt_dir = pte_offset_kernel(pm_dir, address);
		if (pte_none(*pt_dir)) {
			void *page;

			switch (mode) {
			case POPULATE_MAP:
				page = kasan_early_alloc_pages(0);
				memset(page, 0, PAGE_SIZE);
191 192 193
				pte = __pte(__pa(page));
				pte = set_pte_bit(pte, pgt_prot);
				set_pte(pt_dir, pte);
194 195
				break;
			case POPULATE_ZERO_SHADOW:
196
				page = kasan_early_shadow_page;
197 198 199
				pte = __pte(__pa(page));
				pte = set_pte_bit(pte, pgt_prot_zero);
				set_pte(pt_dir, pte);
200
				break;
201 202 203
			case POPULATE_SHALLOW:
				/* should never happen */
				break;
204 205 206 207 208 209
			}
		}
		address += PAGE_SIZE;
	}
}

210 211 212 213 214 215 216 217 218 219 220 221
static void __init kasan_early_detect_facilities(void)
{
	if (test_facility(8)) {
		has_edat = true;
		__ctl_set_bit(0, 23);
	}
	if (!noexec_disabled && test_facility(130)) {
		has_nx = true;
		__ctl_set_bit(0, 20);
	}
}

222 223
void __init kasan_early_init(void)
{
224
	pte_t pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
225 226 227
	pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
	pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
	p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
228
	unsigned long untracked_end = MODULES_VADDR;
229
	unsigned long shadow_alloc_size;
230 231
	unsigned long start, end;
	int i;
232

233 234
	kasan_early_detect_facilities();
	if (!has_nx)
235
		pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
236

237 238
	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
239 240

	/* init kasan zero shadow */
241 242 243
	crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
	crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
	crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
244
	memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
245

246
	if (has_edat) {
247
		shadow_alloc_size = get_mem_detect_usable_total() >> KASAN_SHADOW_SCALE_SHIFT;
248
		segment_pos = round_down(pgalloc_pos, _SEGMENT_SIZE);
249
		segment_low = segment_pos - shadow_alloc_size;
250
		segment_low = round_down(segment_low, _SEGMENT_SIZE);
251 252
		pgalloc_pos = segment_low;
	}
253 254
	/*
	 * Current memory layout:
255 256 257 258 259 260 261 262 263 264 265 266 267
	 * +- 0 -------------+	       +- shadow start -+
	 * |1:1 ident mapping|	      /|1/8 of ident map|
	 * |		     |	     / |		|
	 * +-end of ident map+	    /  +----------------+
	 * | ... gap ...     |	   /   |    kasan	|
	 * |		     |	  /    |  zero page	|
	 * +- vmalloc area  -+	 /     |   mapping	|
	 * | vmalloc_size    |	/      | (untracked)	|
	 * +- modules vaddr -+ /       +----------------+
	 * | 2Gb	     |/        |    unmapped	| allocated per module
	 * +- shadow start  -+	       +----------------+
	 * | 1/8 addr space  |	       | zero pg mapping| (untracked)
	 * +- shadow end ----+---------+- shadow end ---+
268 269
	 *
	 * Current memory layout (KASAN_VMALLOC):
270 271 272 273 274 275 276 277 278 279 280 281 282
	 * +- 0 -------------+	       +- shadow start -+
	 * |1:1 ident mapping|	      /|1/8 of ident map|
	 * |		     |	     / |		|
	 * +-end of ident map+	    /  +----------------+
	 * | ... gap ...     |	   /   | kasan zero page| (untracked)
	 * |		     |	  /    | mapping	|
	 * +- vmalloc area  -+	 /     +----------------+
	 * | vmalloc_size    |	/      |shallow populate|
	 * +- modules vaddr -+ /       +----------------+
	 * | 2Gb	     |/        |shallow populate|
	 * +- shadow start  -+	       +----------------+
	 * | 1/8 addr space  |	       | zero pg mapping| (untracked)
	 * +- shadow end ----+---------+- shadow end ---+
283
	 */
284
	/* populate kasan shadow (for identity mapping and zero page mapping) */
285
	for_each_mem_detect_usable_block(i, &start, &end)
286
		kasan_early_pgtable_populate(__sha(start), __sha(end), POPULATE_MAP);
287
	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
288
		untracked_end = VMALLOC_START;
289
		/* shallowly populate kasan shadow for vmalloc and modules */
290
		kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
291
					     POPULATE_SHALLOW);
292 293
	}
	/* populate kasan shadow for untracked memory */
294
	kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_end),
295
				     POPULATE_ZERO_SHADOW);
296
	kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
297
				     POPULATE_ZERO_SHADOW);
298 299 300 301
	/* enable kasan */
	init_task.kasan_depth = 0;
	sclp_early_printk("KernelAddressSanitizer initialized\n");
}