Commit 1bb5eab3 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds

kasan, mm: integrate page_alloc init with HW_TAGS

This change uses the previously added memory initialization feature of
HW_TAGS KASAN routines for page_alloc memory when init_on_alloc/free is
enabled.

With this change, kernel_init_free_pages() is no longer called when both
HW_TAGS KASAN and init_on_alloc/free are enabled.  Instead, memory is
initialized in KASAN runtime.

To avoid discrepancies with which memory gets initialized that can be
caused by future changes, both KASAN and kernel_init_free_pages() hooks
are put together and a warning comment is added.

This patch changes the order in which memory initialization and page
poisoning hooks are called.  This doesn't lead to any side-effects, as
whenever page poisoning is enabled, memory initialization gets disabled.

Combining setting allocation tags with memory initialization improves
HW_TAGS KASAN performance when init_on_alloc/free is enabled.

[andreyknvl@google.com: fix for "integrate page_alloc init with HW_TAGS"]
  Link: https://lkml.kernel.org/r/65b6028dea2e9a6e8e2cb779b5115c09457363fc.1617122211.git.andreyknvl@google.com

Link: https://lkml.kernel.org/r/e77f0d5b1b20658ef0b8288625c74c2b3690e725.1615296150.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarMarco Elver <elver@google.com>
Tested-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarSergei Trofimovich <slyfox@gentoo.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent aa5c219c
......@@ -96,6 +96,11 @@ static __always_inline bool kasan_enabled(void)
return static_branch_likely(&kasan_flag_enabled);
}
static inline bool kasan_has_integrated_init(void)
{
return kasan_enabled();
}
#else /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_enabled(void)
......@@ -103,6 +108,11 @@ static inline bool kasan_enabled(void)
return true;
}
static inline bool kasan_has_integrated_init(void)
{
return false;
}
#endif /* CONFIG_KASAN_HW_TAGS */
slab_flags_t __kasan_never_merge(void);
......@@ -120,20 +130,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
__kasan_unpoison_range(addr, size);
}
void __kasan_alloc_pages(struct page *page, unsigned int order);
void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
static __always_inline void kasan_alloc_pages(struct page *page,
unsigned int order)
unsigned int order, bool init)
{
if (kasan_enabled())
__kasan_alloc_pages(page, order);
__kasan_alloc_pages(page, order, init);
}
void __kasan_free_pages(struct page *page, unsigned int order);
void __kasan_free_pages(struct page *page, unsigned int order, bool init);
static __always_inline void kasan_free_pages(struct page *page,
unsigned int order)
unsigned int order, bool init)
{
if (kasan_enabled())
__kasan_free_pages(page, order);
__kasan_free_pages(page, order, init);
}
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
......@@ -277,13 +287,17 @@ static inline bool kasan_enabled(void)
{
return false;
}
static inline bool kasan_has_integrated_init(void)
{
return false;
}
static inline slab_flags_t kasan_never_merge(void)
{
return 0;
}
static inline void kasan_unpoison_range(const void *address, size_t size) {}
static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
......
......@@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void)
return 0;
}
void __kasan_alloc_pages(struct page *page, unsigned int order)
void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
{
u8 tag;
unsigned long i;
......@@ -108,14 +108,14 @@ void __kasan_alloc_pages(struct page *page, unsigned int order)
tag = kasan_random_tag();
for (i = 0; i < (1 << order); i++)
page_kasan_tag_set(page + i, tag);
kasan_unpoison(page_address(page), PAGE_SIZE << order, false);
kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
}
void __kasan_free_pages(struct page *page, unsigned int order)
void __kasan_free_pages(struct page *page, unsigned int order, bool init)
{
if (likely(!PageHighMem(page)))
kasan_poison(page_address(page), PAGE_SIZE << order,
KASAN_FREE_PAGE, false);
KASAN_FREE_PAGE, init);
}
/*
......
......@@ -106,7 +106,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_slab_free_mempool(element);
else if (pool->alloc == mempool_alloc_pages)
kasan_free_pages(element, (unsigned long)pool->pool_data);
kasan_free_pages(element, (unsigned long)pool->pool_data, false);
}
static void kasan_unpoison_element(mempool_t *pool, void *element)
......@@ -114,7 +114,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element)
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_unpoison_range(element, __ksize(element));
else if (pool->alloc == mempool_alloc_pages)
kasan_alloc_pages(element, (unsigned long)pool->pool_data);
kasan_alloc_pages(element, (unsigned long)pool->pool_data, false);
}
static __always_inline void add_element(mempool_t *pool, void *element)
......
......@@ -397,14 +397,14 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages);
* initialization is done, but this is not likely to happen.
*/
static inline void kasan_free_nondeferred_pages(struct page *page, int order,
fpi_t fpi_flags)
bool init, fpi_t fpi_flags)
{
if (static_branch_unlikely(&deferred_pages))
return;
if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
(fpi_flags & FPI_SKIP_KASAN_POISON))
return;
kasan_free_pages(page, order);
kasan_free_pages(page, order, init);
}
/* Returns true if the struct page for the pfn is uninitialised */
......@@ -456,12 +456,12 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
}
#else
static inline void kasan_free_nondeferred_pages(struct page *page, int order,
fpi_t fpi_flags)
bool init, fpi_t fpi_flags)
{
if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
(fpi_flags & FPI_SKIP_KASAN_POISON))
return;
kasan_free_pages(page, order);
kasan_free_pages(page, order, init);
}
static inline bool early_page_uninitialised(unsigned long pfn)
......@@ -1243,6 +1243,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
unsigned int order, bool check_free, fpi_t fpi_flags)
{
int bad = 0;
bool init;
VM_BUG_ON_PAGE(PageTail(page), page);
......@@ -1300,16 +1301,21 @@ static __always_inline bool free_pages_prepare(struct page *page,
debug_check_no_obj_freed(page_address(page),
PAGE_SIZE << order);
}
if (want_init_on_free())
kernel_init_free_pages(page, 1 << order);
kernel_poison_pages(page, 1 << order);
/*
* As memory initialization might be integrated into KASAN,
* kasan_free_pages and kernel_init_free_pages must be
* kept together to avoid discrepancies in behavior.
*
* With hardware tag-based KASAN, memory tags must be set before the
* page becomes unavailable via debug_pagealloc or arch_free_page.
*/
kasan_free_nondeferred_pages(page, order, fpi_flags);
init = want_init_on_free();
if (init && !kasan_has_integrated_init())
kernel_init_free_pages(page, 1 << order);
kasan_free_nondeferred_pages(page, order, init, fpi_flags);
/*
* arch_free_page() can make the page's contents inaccessible. s390
......@@ -2316,17 +2322,32 @@ static bool check_new_pages(struct page *page, unsigned int order)
inline void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags)
{
bool init;
set_page_private(page, 0);
set_page_refcounted(page);
arch_alloc_page(page, order);
debug_pagealloc_map_pages(page, 1 << order);
kasan_alloc_pages(page, order);
/*
* Page unpoisoning must happen before memory initialization.
* Otherwise, the poison pattern will be overwritten for __GFP_ZERO
* allocations and the page unpoisoning code will complain.
*/
kernel_unpoison_pages(page, 1 << order);
set_page_owner(page, order, gfp_flags);
if (!want_init_on_free() && want_init_on_alloc(gfp_flags))
/*
* As memory initialization might be integrated into KASAN,
* kasan_alloc_pages and kernel_init_free_pages must be
* kept together to avoid discrepancies in behavior.
*/
init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
kasan_alloc_pages(page, order, init);
if (init && !kasan_has_integrated_init())
kernel_init_free_pages(page, 1 << order);
set_page_owner(page, order, gfp_flags);
}
static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment