Commit 1f600626 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds

kasan: rename KASAN_SHADOW_* to KASAN_GRANULE_*

This is a preparatory commit for the upcoming addition of a new hardware
tag-based (MTE-based) KASAN mode.

The new mode won't be using shadow memory, but will still use the concept
of memory granules.  Each memory granule maps to a single metadata entry:
8 bytes per one shadow byte for generic mode, 16 bytes per one shadow byte
for software tag-based mode, and 16 bytes per one allocation tag for
hardware tag-based mode.

Rename KASAN_SHADOW_SCALE_SIZE to KASAN_GRANULE_SIZE, and
KASAN_SHADOW_MASK to KASAN_GRANULE_MASK.

Also use MASK when used as a mask, otherwise use SIZE.

No functional changes.

Link: https://lkml.kernel.org/r/939b5754e47f528a6e6a6f28ffc5815d8d128033.1606161801.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Signed-off-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: default avatarMarco Elver <elver@google.com>
Reviewed-by: default avatarAlexander Potapenko <glider@google.com>
Tested-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cebd0eb2
......@@ -265,7 +265,7 @@ Most mappings in vmalloc space are small, requiring less than a full
page of shadow space. Allocating a full shadow page per mapping would
therefore be wasteful. Furthermore, to ensure that different mappings
use different shadow pages, mappings would have to be aligned to
``KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE``.
``KASAN_GRANULE_SIZE * PAGE_SIZE``.
Instead, we share backing space across multiple mappings. We allocate
a backing page when a mapping in vmalloc space uses a particular page
......
......@@ -25,7 +25,7 @@
#include "../mm/kasan/kasan.h"
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
/*
* We assign some test results to these globals to make sure the tests
......
......@@ -15,7 +15,7 @@
#include "../mm/kasan/kasan.h"
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
static noinline void __init copy_user_test(void)
{
......
......@@ -106,7 +106,7 @@ void *memcpy(void *dest, const void *src, size_t len)
/*
* Poisons the shadow memory for 'size' bytes starting from 'addr'.
* Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
* Memory addresses should be aligned to KASAN_GRANULE_SIZE.
*/
void poison_range(const void *address, size_t size, u8 value)
{
......@@ -138,13 +138,13 @@ void unpoison_range(const void *address, size_t size)
poison_range(address, size, tag);
if (size & KASAN_SHADOW_MASK) {
if (size & KASAN_GRANULE_MASK) {
u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
*shadow = tag;
else
*shadow = size & KASAN_SHADOW_MASK;
*shadow = size & KASAN_GRANULE_MASK;
}
}
......@@ -301,7 +301,7 @@ void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
void kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
poison_range(object,
round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_KMALLOC_REDZONE);
}
......@@ -373,7 +373,7 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
{
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
return shadow_byte < 0 ||
shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
shadow_byte >= KASAN_GRANULE_SIZE;
/* else CONFIG_KASAN_SW_TAGS: */
if ((u8)shadow_byte == KASAN_TAG_INVALID)
......@@ -412,7 +412,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
return true;
}
rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
rounded_up_size = round_up(cache->object_size, KASAN_GRANULE_SIZE);
poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE);
if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
......@@ -445,9 +445,9 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
return NULL;
redzone_start = round_up((unsigned long)(object + size),
KASAN_SHADOW_SCALE_SIZE);
KASAN_GRANULE_SIZE);
redzone_end = round_up((unsigned long)object + cache->object_size,
KASAN_SHADOW_SCALE_SIZE);
KASAN_GRANULE_SIZE);
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
tag = assign_tag(cache, object, false, keep_tag);
......@@ -491,7 +491,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
page = virt_to_page(ptr);
redzone_start = round_up((unsigned long)(ptr + size),
KASAN_SHADOW_SCALE_SIZE);
KASAN_GRANULE_SIZE);
redzone_end = (unsigned long)ptr + page_size(page);
unpoison_range(ptr, size);
......@@ -589,8 +589,8 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
shadow_size = nr_shadow_pages << PAGE_SHIFT;
shadow_end = shadow_start + shadow_size;
if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
WARN_ON(start_kaddr % (KASAN_GRANULE_SIZE << PAGE_SHIFT)))
return NOTIFY_BAD;
switch (action) {
......@@ -748,7 +748,7 @@ void kasan_poison_vmalloc(const void *start, unsigned long size)
if (!is_vmalloc_or_module_addr(start))
return;
size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
size = round_up(size, KASAN_GRANULE_SIZE);
poison_range(start, size, KASAN_VMALLOC_INVALID);
}
......@@ -861,22 +861,22 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long region_start, region_end;
unsigned long size;
region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
region_start = ALIGN(start, PAGE_SIZE * KASAN_GRANULE_SIZE);
region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_GRANULE_SIZE);
free_region_start = ALIGN(free_region_start,
PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
PAGE_SIZE * KASAN_GRANULE_SIZE);
if (start != region_start &&
free_region_start < region_start)
region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
region_start -= PAGE_SIZE * KASAN_GRANULE_SIZE;
free_region_end = ALIGN_DOWN(free_region_end,
PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
PAGE_SIZE * KASAN_GRANULE_SIZE);
if (end != region_end &&
free_region_end > region_end)
region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
region_end += PAGE_SIZE * KASAN_GRANULE_SIZE;
shadow_start = kasan_mem_to_shadow((void *)region_start);
shadow_end = kasan_mem_to_shadow((void *)region_end);
......@@ -902,7 +902,8 @@ int kasan_module_alloc(void *addr, size_t size)
unsigned long shadow_start;
shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
KASAN_SHADOW_SCALE_SHIFT;
shadow_size = round_up(scaled_size, PAGE_SIZE);
if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
......
......@@ -46,7 +46,7 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr)
s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(shadow_value)) {
s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
return unlikely(last_accessible_byte >= shadow_value);
}
......@@ -62,7 +62,7 @@ static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
* Access crosses 8(shadow size)-byte boundary. Such access maps
* into 2 shadow bytes, so we need to check them both.
*/
if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
return memory_is_poisoned_1(addr + size - 1);
......@@ -73,7 +73,7 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr)
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
/* Unaligned 16-bytes access maps into 3 shadow bytes. */
if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
return *shadow_addr || memory_is_poisoned_1(addr + 15);
return *shadow_addr;
......@@ -134,7 +134,7 @@ static __always_inline bool memory_is_poisoned_n(unsigned long addr,
s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
if (unlikely(ret != (unsigned long)last_shadow ||
((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
return true;
}
return false;
......@@ -200,7 +200,7 @@ void kasan_cache_shutdown(struct kmem_cache *cache)
static void register_global(struct kasan_global *global)
{
size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
unpoison_range(global->beg, global->size);
......@@ -274,10 +274,10 @@ EXPORT_SYMBOL(__asan_handle_no_return);
/* Emitted by compiler to poison alloca()ed objects. */
void __asan_alloca_poison(unsigned long addr, size_t size)
{
size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
rounded_up_size;
size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
const void *left_redzone = (const void *)(addr -
KASAN_ALLOCA_REDZONE_SIZE);
......
......@@ -34,7 +34,7 @@ void *find_first_bad_addr(void *addr, size_t size)
void *p = addr;
while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p)))
p += KASAN_SHADOW_SCALE_SIZE;
p += KASAN_GRANULE_SIZE;
return p;
}
......@@ -46,14 +46,14 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info)
shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
/*
* If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look
* If shadow byte value is in [0, KASAN_GRANULE_SIZE) we can look
* at the next shadow byte to determine the type of the bad access.
*/
if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1)
if (*shadow_addr > 0 && *shadow_addr <= KASAN_GRANULE_SIZE - 1)
shadow_addr++;
switch (*shadow_addr) {
case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
case 0 ... KASAN_GRANULE_SIZE - 1:
/*
* In theory it's still possible to see these shadow values
* due to a data race in the kernel code.
......
......@@ -442,8 +442,8 @@ void kasan_remove_zero_shadow(void *start, unsigned long size)
end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
if (WARN_ON((unsigned long)start %
(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
(KASAN_GRANULE_SIZE * PAGE_SIZE)) ||
WARN_ON(size % (KASAN_GRANULE_SIZE * PAGE_SIZE)))
return;
for (; addr < end; addr = next) {
......@@ -477,8 +477,8 @@ int kasan_add_zero_shadow(void *start, unsigned long size)
shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
if (WARN_ON((unsigned long)start %
(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
(KASAN_GRANULE_SIZE * PAGE_SIZE)) ||
WARN_ON(size % (KASAN_GRANULE_SIZE * PAGE_SIZE)))
return -EINVAL;
ret = kasan_populate_early_shadow(shadow_start, shadow_end);
......
......@@ -5,8 +5,8 @@
#include <linux/kasan.h>
#include <linux/stackdepot.h>
#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
#define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
#define KASAN_GRANULE_MASK (KASAN_GRANULE_SIZE - 1)
#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
......
......@@ -314,24 +314,24 @@ static bool __must_check get_address_stack_frame_info(const void *addr,
return false;
aligned_addr = round_down((unsigned long)addr, sizeof(long));
mem_ptr = round_down(aligned_addr, KASAN_SHADOW_SCALE_SIZE);
mem_ptr = round_down(aligned_addr, KASAN_GRANULE_SIZE);
shadow_ptr = kasan_mem_to_shadow((void *)aligned_addr);
shadow_bottom = kasan_mem_to_shadow(end_of_stack(current));
while (shadow_ptr >= shadow_bottom && *shadow_ptr != KASAN_STACK_LEFT) {
shadow_ptr--;
mem_ptr -= KASAN_SHADOW_SCALE_SIZE;
mem_ptr -= KASAN_GRANULE_SIZE;
}
while (shadow_ptr >= shadow_bottom && *shadow_ptr == KASAN_STACK_LEFT) {
shadow_ptr--;
mem_ptr -= KASAN_SHADOW_SCALE_SIZE;
mem_ptr -= KASAN_GRANULE_SIZE;
}
if (shadow_ptr < shadow_bottom)
return false;
frame = (const unsigned long *)(mem_ptr + KASAN_SHADOW_SCALE_SIZE);
frame = (const unsigned long *)(mem_ptr + KASAN_GRANULE_SIZE);
if (frame[0] != KASAN_CURRENT_STACK_FRAME_MAGIC) {
pr_err("KASAN internal error: frame info validation failed; invalid marker: %lu\n",
frame[0]);
......@@ -599,6 +599,6 @@ void kasan_non_canonical_hook(unsigned long addr)
else
bug_type = "maybe wild-memory-access";
pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
orig_addr, orig_addr + KASAN_SHADOW_MASK);
orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
}
#endif
......@@ -76,7 +76,7 @@ void *find_first_bad_addr(void *addr, size_t size)
void *end = p + size;
while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p))
p += KASAN_SHADOW_SCALE_SIZE;
p += KASAN_GRANULE_SIZE;
return p;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment