Commit 080eb83f authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds

kasan: initialize shadow to 0xff for tag-based mode

A tag-based KASAN shadow memory cell contains a memory tag, that
corresponds to the tag in the top byte of the pointer, that points to that
memory.  The native top byte value of kernel pointers is 0xff, so with
tag-based KASAN we need to initialize shadow memory to 0xff.

[cai@lca.pw: arm64: skip kmemleak for KASAN again\
  Link: http://lkml.kernel.org/r/20181226020550.63712-1-cai@lca.pw
Link: http://lkml.kernel.org/r/5cc1b789aad7c99cf4f3ec5b328b147ad53edb40.1544099024.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarAndrey Ryabinin <aryabinin@virtuozzo.com>
Reviewed-by: default avatarDmitry Vyukov <dvyukov@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9577dd74
...@@ -43,6 +43,14 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) ...@@ -43,6 +43,14 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
return __pa(p); return __pa(p);
} }
static phys_addr_t __init kasan_alloc_raw_page(int node)
{
void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_KASAN, node);
return __pa(p);
}
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
bool early) bool early)
{ {
...@@ -92,7 +100,9 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, ...@@ -92,7 +100,9 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
do { do {
phys_addr_t page_phys = early ? phys_addr_t page_phys = early ?
__pa_symbol(kasan_early_shadow_page) __pa_symbol(kasan_early_shadow_page)
: kasan_alloc_zeroed_page(node); : kasan_alloc_raw_page(node);
if (!early)
memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
next = addr + PAGE_SIZE; next = addr + PAGE_SIZE;
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep))); } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
...@@ -239,7 +249,7 @@ void __init kasan_init(void) ...@@ -239,7 +249,7 @@ void __init kasan_init(void)
pfn_pte(sym_to_pfn(kasan_early_shadow_page), pfn_pte(sym_to_pfn(kasan_early_shadow_page),
PAGE_KERNEL_RO)); PAGE_KERNEL_RO));
memset(kasan_early_shadow_page, 0, PAGE_SIZE); memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
/* At this point kasan is fully initialized. Enable error messages */ /* At this point kasan is fully initialized. Enable error messages */
......
...@@ -153,6 +153,8 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } ...@@ -153,6 +153,8 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
#ifdef CONFIG_KASAN_GENERIC #ifdef CONFIG_KASAN_GENERIC
#define KASAN_SHADOW_INIT 0
void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache);
...@@ -163,4 +165,10 @@ static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} ...@@ -163,4 +165,10 @@ static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
#endif /* CONFIG_KASAN_GENERIC */ #endif /* CONFIG_KASAN_GENERIC */
#ifdef CONFIG_KASAN_SW_TAGS
#define KASAN_SHADOW_INIT 0xFF
#endif /* CONFIG_KASAN_SW_TAGS */
#endif /* LINUX_KASAN_H */ #endif /* LINUX_KASAN_H */
...@@ -473,11 +473,12 @@ int kasan_module_alloc(void *addr, size_t size) ...@@ -473,11 +473,12 @@ int kasan_module_alloc(void *addr, size_t size)
ret = __vmalloc_node_range(shadow_size, 1, shadow_start, ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
shadow_start + shadow_size, shadow_start + shadow_size,
GFP_KERNEL | __GFP_ZERO, GFP_KERNEL,
PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
if (ret) { if (ret) {
__memset(ret, KASAN_SHADOW_INIT, shadow_size);
find_vm_area(addr)->flags |= VM_KASAN; find_vm_area(addr)->flags |= VM_KASAN;
kmemleak_ignore(ret); kmemleak_ignore(ret);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment