Commit 8e57f8ac authored by Vlastimil Babka's avatar Vlastimil Babka Committed by Linus Torvalds

mm, debug_pagealloc: don't rely on static keys too early

Commit 96a2b03f ("mm, debug_pagelloc: use static keys to enable
debugging") has introduced a static key to reduce overhead when
debug_pagealloc is compiled in but not enabled.  It relied on the
assumption that jump_label_init() is called before parse_early_param()
as in start_kernel(), so when the "debug_pagealloc=on" option is parsed,
it is safe to enable the static key.

However, it turns out multiple architectures call parse_early_param()
earlier from their setup_arch().  x86 also calls jump_label_init() even
earlier, so no issue was found while testing the commit, but same is not
true for e.g.  ppc64 and s390 where the kernel would not boot with
debug_pagealloc=on as found by our QA.

To fix this without tricky changes to init code of multiple
architectures, this patch partially reverts the static key conversion
from 96a2b03f.  Init-time and non-fastpath calls (such as in arch
code) of debug_pagealloc_enabled() will again test a simple bool
variable.  Fastpath mm code is converted to a new
debug_pagealloc_enabled_static() variant that relies on the static key,
which is enabled in a well-defined point in mm_init() where it's
guaranteed that jump_label_init() has been called, regardless of
architecture.

[sfr@canb.auug.org.au: export _debug_pagealloc_enabled_early]
  Link: http://lkml.kernel.org/r/20200106164944.063ac07b@canb.auug.org.au
Link: http://lkml.kernel.org/r/20191219130612.23171-1-vbabka@suse.cz
Fixes: 96a2b03f ("mm, debug_pagelloc: use static keys to enable debugging")
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Qian Cai <cai@lca.pw>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4a87e2a2
...@@ -2658,13 +2658,25 @@ static inline bool want_init_on_free(void) ...@@ -2658,13 +2658,25 @@ static inline bool want_init_on_free(void)
!page_poisoning_enabled(); !page_poisoning_enabled();
} }
#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT #ifdef CONFIG_DEBUG_PAGEALLOC
DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled); extern void init_debug_pagealloc(void);
#else #else
DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); static inline void init_debug_pagealloc(void) {}
#endif #endif
extern bool _debug_pagealloc_enabled_early;
DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
static inline bool debug_pagealloc_enabled(void) static inline bool debug_pagealloc_enabled(void)
{
return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
_debug_pagealloc_enabled_early;
}
/*
* For use in fast paths after init_debug_pagealloc() has run, or when a
* false negative result is not harmful when called too early.
*/
static inline bool debug_pagealloc_enabled_static(void)
{ {
if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
return false; return false;
......
...@@ -553,6 +553,7 @@ static void __init mm_init(void) ...@@ -553,6 +553,7 @@ static void __init mm_init(void)
* bigger than MAX_ORDER unless SPARSEMEM. * bigger than MAX_ORDER unless SPARSEMEM.
*/ */
page_ext_init_flatmem(); page_ext_init_flatmem();
init_debug_pagealloc();
report_meminit(); report_meminit();
mem_init(); mem_init();
kmem_cache_init(); kmem_cache_init();
......
...@@ -694,34 +694,27 @@ void prep_compound_page(struct page *page, unsigned int order) ...@@ -694,34 +694,27 @@ void prep_compound_page(struct page *page, unsigned int order)
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder; unsigned int _debug_guardpage_minorder;
#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT bool _debug_pagealloc_enabled_early __read_mostly
DEFINE_STATIC_KEY_TRUE(_debug_pagealloc_enabled); = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
#else EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
#endif
EXPORT_SYMBOL(_debug_pagealloc_enabled); EXPORT_SYMBOL(_debug_pagealloc_enabled);
DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
static int __init early_debug_pagealloc(char *buf) static int __init early_debug_pagealloc(char *buf)
{ {
bool enable = false; return kstrtobool(buf, &_debug_pagealloc_enabled_early);
if (kstrtobool(buf, &enable))
return -EINVAL;
if (enable)
static_branch_enable(&_debug_pagealloc_enabled);
return 0;
} }
early_param("debug_pagealloc", early_debug_pagealloc); early_param("debug_pagealloc", early_debug_pagealloc);
static void init_debug_guardpage(void) void init_debug_pagealloc(void)
{ {
if (!debug_pagealloc_enabled()) if (!debug_pagealloc_enabled())
return; return;
static_branch_enable(&_debug_pagealloc_enabled);
if (!debug_guardpage_minorder()) if (!debug_guardpage_minorder())
return; return;
...@@ -1186,7 +1179,7 @@ static __always_inline bool free_pages_prepare(struct page *page, ...@@ -1186,7 +1179,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
*/ */
arch_free_page(page, order); arch_free_page(page, order);
if (debug_pagealloc_enabled()) if (debug_pagealloc_enabled_static())
kernel_map_pages(page, 1 << order, 0); kernel_map_pages(page, 1 << order, 0);
kasan_free_nondeferred_pages(page, order); kasan_free_nondeferred_pages(page, order);
...@@ -1207,7 +1200,7 @@ static bool free_pcp_prepare(struct page *page) ...@@ -1207,7 +1200,7 @@ static bool free_pcp_prepare(struct page *page)
static bool bulkfree_pcp_prepare(struct page *page) static bool bulkfree_pcp_prepare(struct page *page)
{ {
if (debug_pagealloc_enabled()) if (debug_pagealloc_enabled_static())
return free_pages_check(page); return free_pages_check(page);
else else
return false; return false;
...@@ -1221,7 +1214,7 @@ static bool bulkfree_pcp_prepare(struct page *page) ...@@ -1221,7 +1214,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
*/ */
static bool free_pcp_prepare(struct page *page) static bool free_pcp_prepare(struct page *page)
{ {
if (debug_pagealloc_enabled()) if (debug_pagealloc_enabled_static())
return free_pages_prepare(page, 0, true); return free_pages_prepare(page, 0, true);
else else
return free_pages_prepare(page, 0, false); return free_pages_prepare(page, 0, false);
...@@ -1973,10 +1966,6 @@ void __init page_alloc_init_late(void) ...@@ -1973,10 +1966,6 @@ void __init page_alloc_init_late(void)
for_each_populated_zone(zone) for_each_populated_zone(zone)
set_zone_contiguous(zone); set_zone_contiguous(zone);
#ifdef CONFIG_DEBUG_PAGEALLOC
init_debug_guardpage();
#endif
} }
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
...@@ -2106,7 +2095,7 @@ static inline bool free_pages_prezeroed(void) ...@@ -2106,7 +2095,7 @@ static inline bool free_pages_prezeroed(void)
*/ */
static inline bool check_pcp_refill(struct page *page) static inline bool check_pcp_refill(struct page *page)
{ {
if (debug_pagealloc_enabled()) if (debug_pagealloc_enabled_static())
return check_new_page(page); return check_new_page(page);
else else
return false; return false;
...@@ -2128,7 +2117,7 @@ static inline bool check_pcp_refill(struct page *page) ...@@ -2128,7 +2117,7 @@ static inline bool check_pcp_refill(struct page *page)
} }
static inline bool check_new_pcp(struct page *page) static inline bool check_new_pcp(struct page *page)
{ {
if (debug_pagealloc_enabled()) if (debug_pagealloc_enabled_static())
return check_new_page(page); return check_new_page(page);
else else
return false; return false;
...@@ -2155,7 +2144,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order, ...@@ -2155,7 +2144,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
set_page_refcounted(page); set_page_refcounted(page);
arch_alloc_page(page, order); arch_alloc_page(page, order);
if (debug_pagealloc_enabled()) if (debug_pagealloc_enabled_static())
kernel_map_pages(page, 1 << order, 1); kernel_map_pages(page, 1 << order, 1);
kasan_alloc_pages(page, order); kasan_alloc_pages(page, order);
kernel_poison_pages(page, 1 << order, 1); kernel_poison_pages(page, 1 << order, 1);
......
...@@ -1416,7 +1416,7 @@ static void kmem_rcu_free(struct rcu_head *head) ...@@ -1416,7 +1416,7 @@ static void kmem_rcu_free(struct rcu_head *head)
#if DEBUG #if DEBUG
static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
{ {
if (debug_pagealloc_enabled() && OFF_SLAB(cachep) && if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
(cachep->size % PAGE_SIZE) == 0) (cachep->size % PAGE_SIZE) == 0)
return true; return true;
...@@ -2008,7 +2008,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) ...@@ -2008,7 +2008,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
* to check size >= 256. It guarantees that all necessary small * to check size >= 256. It guarantees that all necessary small
* sized slab is initialized in current slab initialization sequence. * sized slab is initialized in current slab initialization sequence.
*/ */
if (debug_pagealloc_enabled() && (flags & SLAB_POISON) && if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
size >= 256 && cachep->object_size > cache_line_size()) { size >= 256 && cachep->object_size > cache_line_size()) {
if (size < PAGE_SIZE || size % PAGE_SIZE == 0) { if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
size_t tmp_size = ALIGN(size, PAGE_SIZE); size_t tmp_size = ALIGN(size, PAGE_SIZE);
......
...@@ -288,7 +288,7 @@ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) ...@@ -288,7 +288,7 @@ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
unsigned long freepointer_addr; unsigned long freepointer_addr;
void *p; void *p;
if (!debug_pagealloc_enabled()) if (!debug_pagealloc_enabled_static())
return get_freepointer(s, object); return get_freepointer(s, object);
freepointer_addr = (unsigned long)object + s->offset; freepointer_addr = (unsigned long)object + s->offset;
......
...@@ -1383,7 +1383,7 @@ static void free_unmap_vmap_area(struct vmap_area *va) ...@@ -1383,7 +1383,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
{ {
flush_cache_vunmap(va->va_start, va->va_end); flush_cache_vunmap(va->va_start, va->va_end);
unmap_vmap_area(va); unmap_vmap_area(va);
if (debug_pagealloc_enabled()) if (debug_pagealloc_enabled_static())
flush_tlb_kernel_range(va->va_start, va->va_end); flush_tlb_kernel_range(va->va_start, va->va_end);
free_vmap_area_noflush(va); free_vmap_area_noflush(va);
...@@ -1681,7 +1681,7 @@ static void vb_free(const void *addr, unsigned long size) ...@@ -1681,7 +1681,7 @@ static void vb_free(const void *addr, unsigned long size)
vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
if (debug_pagealloc_enabled()) if (debug_pagealloc_enabled_static())
flush_tlb_kernel_range((unsigned long)addr, flush_tlb_kernel_range((unsigned long)addr,
(unsigned long)addr + size); (unsigned long)addr + size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment