Commit 917538e2 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds

kasan: clean up KASAN_SHADOW_SCALE_SHIFT usage

Right now the fact that KASAN uses a single shadow byte for 8 bytes of
memory is scattered all over the code.

This change defines KASAN_SHADOW_SCALE_SHIFT early in asm include files
and makes use of this constant where necessary.

[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/34937ca3b90736eaad91b568edf5684091f662e3.1515775666.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Acked-by: default avatarAndrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5f21f3a8
...@@ -12,7 +12,8 @@ ...@@ -12,7 +12,8 @@
/* /*
* KASAN_SHADOW_START: beginning of the kernel virtual addresses. * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses. * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
* where N = (1 << KASAN_SHADOW_SCALE_SHIFT).
*/ */
#define KASAN_SHADOW_START (VA_START) #define KASAN_SHADOW_START (VA_START)
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) #define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
...@@ -20,14 +21,16 @@ ...@@ -20,14 +21,16 @@
/* /*
* This value is used to map an address to the corresponding shadow * This value is used to map an address to the corresponding shadow
* address by the following formula: * address by the following formula:
* shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET; * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
* *
* (1 << 61) shadow addresses - [KASAN_SHADOW_OFFSET,KASAN_SHADOW_END] * (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range
* cover all 64-bits of virtual addresses. So KASAN_SHADOW_OFFSET * [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual
* should satisfy the following equation: * addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation:
* KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - (1ULL << 61) * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END -
* (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
*/ */
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3))) #define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \
(64 - KASAN_SHADOW_SCALE_SHIFT)))
void kasan_init(void); void kasan_init(void);
void kasan_copy_shadow(pgd_t *pgdir); void kasan_copy_shadow(pgd_t *pgdir);
......
...@@ -85,7 +85,8 @@ ...@@ -85,7 +85,8 @@
* stack size when KASAN is in use. * stack size when KASAN is in use.
*/ */
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3)) #define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
#define KASAN_THREAD_SHIFT 1 #define KASAN_THREAD_SHIFT 1
#else #else
#define KASAN_SHADOW_SIZE (0) #define KASAN_SHADOW_SIZE (0)
......
...@@ -135,7 +135,8 @@ static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, ...@@ -135,7 +135,8 @@ static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
/* The early shadow maps everything to a single page of zeroes */ /* The early shadow maps everything to a single page of zeroes */
asmlinkage void __init kasan_early_init(void) asmlinkage void __init kasan_early_init(void)
{ {
BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61)); BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/const.h> #include <linux/const.h>
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#define KASAN_SHADOW_SCALE_SHIFT 3
/* /*
* Compiler uses shadow offset assuming that addresses start * Compiler uses shadow offset assuming that addresses start
...@@ -12,12 +13,15 @@ ...@@ -12,12 +13,15 @@
* 'kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT * 'kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT
*/ */
#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \ #define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
((-1UL << __VIRTUAL_MASK_SHIFT) >> 3)) ((-1UL << __VIRTUAL_MASK_SHIFT) >> \
KASAN_SHADOW_SCALE_SHIFT))
/* /*
* 47 bits for kernel address -> (47 - 3) bits for shadow * 47 bits for kernel address -> (47 - KASAN_SHADOW_SCALE_SHIFT) bits for shadow
* 56 bits for kernel address -> (56 - 3) bits for shadow * 56 bits for kernel address -> (56 - KASAN_SHADOW_SCALE_SHIFT) bits for shadow
*/ */
#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1ULL << (__VIRTUAL_MASK_SHIFT - 3))) #define KASAN_SHADOW_END (KASAN_SHADOW_START + \
(1ULL << (__VIRTUAL_MASK_SHIFT - \
KASAN_SHADOW_SCALE_SHIFT)))
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -11,8 +11,6 @@ struct task_struct; ...@@ -11,8 +11,6 @@ struct task_struct;
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3
#include <asm/kasan.h> #include <asm/kasan.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment