Commit 0fea6e9a authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds

kasan, arm64: expand CONFIG_KASAN checks

Some #ifdef CONFIG_KASAN checks are only relevant for software KASAN modes
(either related to shadow memory or compiler instrumentation).  Expand
those into CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS.

Link: https://lkml.kernel.org/r/e6971e432dbd72bb897ff14134ebb7e169bdcf0c.1606161801.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Signed-off-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reviewed-by: default avatarAlexander Potapenko <glider@google.com>
Tested-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Marco Elver <elver@google.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8a494023
...@@ -334,7 +334,7 @@ config BROKEN_GAS_INST ...@@ -334,7 +334,7 @@ config BROKEN_GAS_INST
config KASAN_SHADOW_OFFSET config KASAN_SHADOW_OFFSET
hex hex
depends on KASAN depends on KASAN_GENERIC || KASAN_SW_TAGS
default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
......
...@@ -137,7 +137,7 @@ head-y := arch/arm64/kernel/head.o ...@@ -137,7 +137,7 @@ head-y := arch/arm64/kernel/head.o
ifeq ($(CONFIG_KASAN_SW_TAGS), y) ifeq ($(CONFIG_KASAN_SW_TAGS), y)
KASAN_SHADOW_SCALE_SHIFT := 4 KASAN_SHADOW_SCALE_SHIFT := 4
else else ifeq ($(CONFIG_KASAN_GENERIC), y)
KASAN_SHADOW_SCALE_SHIFT := 3 KASAN_SHADOW_SCALE_SHIFT := 3
endif endif
......
...@@ -473,7 +473,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU ...@@ -473,7 +473,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
#define NOKPROBE(x) #define NOKPROBE(x)
#endif #endif
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#define EXPORT_SYMBOL_NOKASAN(name) #define EXPORT_SYMBOL_NOKASAN(name)
#else #else
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name) #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
......
...@@ -72,7 +72,7 @@ ...@@ -72,7 +72,7 @@
* address space for the shadow region respectively. They can bloat the stack * address space for the shadow region respectively. They can bloat the stack
* significantly, so double the (minimum) stack size when they are in use. * significantly, so double the (minimum) stack size when they are in use.
*/ */
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \ #define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
+ KASAN_SHADOW_OFFSET) + KASAN_SHADOW_OFFSET)
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#ifndef __ASM_STRING_H #ifndef __ASM_STRING_H
#define __ASM_STRING_H #define __ASM_STRING_H
#ifndef CONFIG_KASAN #if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
#define __HAVE_ARCH_STRRCHR #define __HAVE_ARCH_STRRCHR
extern char *strrchr(const char *, int c); extern char *strrchr(const char *, int c);
...@@ -48,7 +48,8 @@ extern void *__memset(void *, int, __kernel_size_t); ...@@ -48,7 +48,8 @@ extern void *__memset(void *, int, __kernel_size_t);
void memcpy_flushcache(void *dst, const void *src, size_t cnt); void memcpy_flushcache(void *dst, const void *src, size_t cnt);
#endif #endif
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
!defined(__SANITIZE_ADDRESS__)
/* /*
* For files that are not instrumented (e.g. mm/slub.c) we * For files that are not instrumented (e.g. mm/slub.c) we
......
...@@ -433,7 +433,7 @@ SYM_FUNC_START_LOCAL(__primary_switched) ...@@ -433,7 +433,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
bl __pi_memset bl __pi_memset
dsb ishst // Make zero page visible to PTW dsb ishst // Make zero page visible to PTW
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bl kasan_early_init bl kasan_early_init
#endif #endif
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
......
...@@ -37,7 +37,7 @@ __efistub_strncmp = __pi_strncmp; ...@@ -37,7 +37,7 @@ __efistub_strncmp = __pi_strncmp;
__efistub_strrchr = __pi_strrchr; __efistub_strrchr = __pi_strrchr;
__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc; __efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
__efistub___memcpy = __pi_memcpy; __efistub___memcpy = __pi_memcpy;
__efistub___memmove = __pi_memmove; __efistub___memmove = __pi_memmove;
__efistub___memset = __pi_memset; __efistub___memset = __pi_memset;
......
...@@ -161,7 +161,8 @@ u64 __init kaslr_early_init(u64 dt_phys) ...@@ -161,7 +161,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
/* use the top 16 bits to randomize the linear region */ /* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48; memstart_offset_seed = seed >> 48;
if (IS_ENABLED(CONFIG_KASAN)) if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
IS_ENABLED(CONFIG_KASAN_SW_TAGS))
/* /*
* KASAN does not expect the module region to intersect the * KASAN does not expect the module region to intersect the
* vmalloc region, since shadow memory is allocated for each * vmalloc region, since shadow memory is allocated for each
......
...@@ -30,7 +30,8 @@ void *module_alloc(unsigned long size) ...@@ -30,7 +30,8 @@ void *module_alloc(unsigned long size)
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
gfp_mask |= __GFP_NOWARN; gfp_mask |= __GFP_NOWARN;
if (IS_ENABLED(CONFIG_KASAN)) if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
IS_ENABLED(CONFIG_KASAN_SW_TAGS))
/* don't exceed the static module region - see below */ /* don't exceed the static module region - see below */
module_alloc_end = MODULES_END; module_alloc_end = MODULES_END;
...@@ -39,7 +40,8 @@ void *module_alloc(unsigned long size) ...@@ -39,7 +40,8 @@ void *module_alloc(unsigned long size)
NUMA_NO_NODE, __builtin_return_address(0)); NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
!IS_ENABLED(CONFIG_KASAN)) !IS_ENABLED(CONFIG_KASAN_GENERIC) &&
!IS_ENABLED(CONFIG_KASAN_SW_TAGS))
/* /*
* KASAN can only deal with module allocations being served * KASAN can only deal with module allocations being served
* from the reserved module region, since the remainder of * from the reserved module region, since the remainder of
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
enum address_markers_idx { enum address_markers_idx {
PAGE_OFFSET_NR = 0, PAGE_OFFSET_NR = 0,
PAGE_END_NR, PAGE_END_NR,
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
KASAN_START_NR, KASAN_START_NR,
#endif #endif
}; };
...@@ -37,7 +37,7 @@ enum address_markers_idx { ...@@ -37,7 +37,7 @@ enum address_markers_idx {
static struct addr_marker address_markers[] = { static struct addr_marker address_markers[] = {
{ PAGE_OFFSET, "Linear Mapping start" }, { PAGE_OFFSET, "Linear Mapping start" },
{ 0 /* PAGE_END */, "Linear Mapping end" }, { 0 /* PAGE_END */, "Linear Mapping end" },
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
{ 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
{ KASAN_SHADOW_END, "Kasan shadow end" }, { KASAN_SHADOW_END, "Kasan shadow end" },
#endif #endif
...@@ -383,7 +383,7 @@ void ptdump_check_wx(void) ...@@ -383,7 +383,7 @@ void ptdump_check_wx(void)
static int ptdump_init(void) static int ptdump_init(void)
{ {
address_markers[PAGE_END_NR].start_address = PAGE_END; address_markers[PAGE_END_NR].start_address = PAGE_END;
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START; address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
#endif #endif
ptdump_initialize(); ptdump_initialize();
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
* even in compilation units that selectively disable KASAN, but must use KASAN * even in compilation units that selectively disable KASAN, but must use KASAN
* to validate access to an address. Never use these in header files! * to validate access to an address. Never use these in header files!
*/ */
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bool __kasan_check_read(const volatile void *p, unsigned int size); bool __kasan_check_read(const volatile void *p, unsigned int size);
bool __kasan_check_write(const volatile void *p, unsigned int size); bool __kasan_check_write(const volatile void *p, unsigned int size);
#else #else
......
...@@ -238,7 +238,8 @@ static inline void kasan_release_vmalloc(unsigned long start, ...@@ -238,7 +238,8 @@ static inline void kasan_release_vmalloc(unsigned long start,
#endif /* CONFIG_KASAN_VMALLOC */ #endif /* CONFIG_KASAN_VMALLOC */
#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC) #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
!defined(CONFIG_KASAN_VMALLOC)
/* /*
* These functions provide a special case to support backing module * These functions provide a special case to support backing module
...@@ -248,12 +249,12 @@ static inline void kasan_release_vmalloc(unsigned long start, ...@@ -248,12 +249,12 @@ static inline void kasan_release_vmalloc(unsigned long start,
int kasan_module_alloc(void *addr, size_t size); int kasan_module_alloc(void *addr, size_t size);
void kasan_free_shadow(const struct vm_struct *vm); void kasan_free_shadow(const struct vm_struct *vm);
#else /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */ #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
static inline void kasan_free_shadow(const struct vm_struct *vm) {} static inline void kasan_free_shadow(const struct vm_struct *vm) {}
#endif /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */ #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
#ifdef CONFIG_KASAN_INLINE #ifdef CONFIG_KASAN_INLINE
void kasan_non_canonical_hook(unsigned long addr); void kasan_non_canonical_hook(unsigned long addr);
......
...@@ -96,7 +96,8 @@ void module_arch_cleanup(struct module *mod); ...@@ -96,7 +96,8 @@ void module_arch_cleanup(struct module *mod);
/* Any cleanup before freeing mod->module_init */ /* Any cleanup before freeing mod->module_init */
void module_arch_freeing_init(struct module *mod); void module_arch_freeing_init(struct module *mod);
#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC) #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
!defined(CONFIG_KASAN_VMALLOC)
#include <linux/kasan.h> #include <linux/kasan.h>
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else #else
......
...@@ -267,7 +267,7 @@ void __write_overflow(void) __compiletime_error("detected write beyond size of o ...@@ -267,7 +267,7 @@ void __write_overflow(void) __compiletime_error("detected write beyond size of o
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include <linux/ptdump.h> #include <linux/ptdump.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#ifdef CONFIG_KASAN #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
/* /*
* This is an optimization for KASAN=y case. Since all kasan page tables * This is an optimization for KASAN=y case. Since all kasan page tables
* eventually point to the kasan_early_shadow_page we could call note_page() * eventually point to the kasan_early_shadow_page we could call note_page()
...@@ -31,7 +31,8 @@ static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr, ...@@ -31,7 +31,8 @@ static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
struct ptdump_state *st = walk->private; struct ptdump_state *st = walk->private;
pgd_t val = READ_ONCE(*pgd); pgd_t val = READ_ONCE(*pgd);
#if CONFIG_PGTABLE_LEVELS > 4 && defined(CONFIG_KASAN) #if CONFIG_PGTABLE_LEVELS > 4 && \
(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d))) if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d)))
return note_kasan_page_table(walk, addr); return note_kasan_page_table(walk, addr);
#endif #endif
...@@ -51,7 +52,8 @@ static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr, ...@@ -51,7 +52,8 @@ static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
struct ptdump_state *st = walk->private; struct ptdump_state *st = walk->private;
p4d_t val = READ_ONCE(*p4d); p4d_t val = READ_ONCE(*p4d);
#if CONFIG_PGTABLE_LEVELS > 3 && defined(CONFIG_KASAN) #if CONFIG_PGTABLE_LEVELS > 3 && \
(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud))) if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud)))
return note_kasan_page_table(walk, addr); return note_kasan_page_table(walk, addr);
#endif #endif
...@@ -71,7 +73,8 @@ static int ptdump_pud_entry(pud_t *pud, unsigned long addr, ...@@ -71,7 +73,8 @@ static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
struct ptdump_state *st = walk->private; struct ptdump_state *st = walk->private;
pud_t val = READ_ONCE(*pud); pud_t val = READ_ONCE(*pud);
#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_KASAN) #if CONFIG_PGTABLE_LEVELS > 2 && \
(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd))) if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd)))
return note_kasan_page_table(walk, addr); return note_kasan_page_table(walk, addr);
#endif #endif
...@@ -91,7 +94,7 @@ static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr, ...@@ -91,7 +94,7 @@ static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
struct ptdump_state *st = walk->private; struct ptdump_state *st = walk->private;
pmd_t val = READ_ONCE(*pmd); pmd_t val = READ_ONCE(*pmd);
#if defined(CONFIG_KASAN) #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte))) if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte)))
return note_kasan_page_table(walk, addr); return note_kasan_page_table(walk, addr);
#endif #endif
......
...@@ -148,10 +148,12 @@ endif ...@@ -148,10 +148,12 @@ endif
# we don't want to check (depends on variables KASAN_SANITIZE_obj.o, KASAN_SANITIZE) # we don't want to check (depends on variables KASAN_SANITIZE_obj.o, KASAN_SANITIZE)
# #
ifeq ($(CONFIG_KASAN),y) ifeq ($(CONFIG_KASAN),y)
ifneq ($(CONFIG_KASAN_HW_TAGS),y)
_c_flags += $(if $(patsubst n%,, \ _c_flags += $(if $(patsubst n%,, \
$(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \ $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
$(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE)) $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE))
endif endif
endif
ifeq ($(CONFIG_UBSAN),y) ifeq ($(CONFIG_UBSAN),y)
_c_flags += $(if $(patsubst n%,, \ _c_flags += $(if $(patsubst n%,, \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment