Commit f2fc4b44 authored by Mike Rapoport (IBM)'s avatar Mike Rapoport (IBM) Committed by Andrew Morton

mm: move init_mem_debugging_and_hardening() to mm/mm_init.c

init_mem_debugging_and_hardening() is only called from mm_core_init().

Move it close to the caller, make it static and rename it to
mem_debugging_and_hardening_init() for consistency with surrounding
convention.

Link: https://lkml.kernel.org/r/20230321170513.2401534-10-rppt@kernel.orgSigned-off-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Doug Berger <opendmb@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 4cd1e9ed
...@@ -3251,7 +3251,6 @@ extern int apply_to_existing_page_range(struct mm_struct *mm, ...@@ -3251,7 +3251,6 @@ extern int apply_to_existing_page_range(struct mm_struct *mm,
unsigned long address, unsigned long size, unsigned long address, unsigned long size,
pte_fn_t fn, void *data); pte_fn_t fn, void *data);
extern void __init init_mem_debugging_and_hardening(void);
#ifdef CONFIG_PAGE_POISONING #ifdef CONFIG_PAGE_POISONING
extern void __kernel_poison_pages(struct page *page, int numpages); extern void __kernel_poison_pages(struct page *page, int numpages);
extern void __kernel_unpoison_pages(struct page *page, int numpages); extern void __kernel_unpoison_pages(struct page *page, int numpages);
......
...@@ -204,6 +204,14 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); ...@@ -204,6 +204,14 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
extern char * const zone_names[MAX_NR_ZONES]; extern char * const zone_names[MAX_NR_ZONES];
/* perform sanity checks on struct pages being allocated or freed */
DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
static inline bool is_check_pages_enabled(void)
{
return static_branch_unlikely(&check_pages_enabled);
}
/* /*
* Structure for holding the mostly immutable allocation parameters passed * Structure for holding the mostly immutable allocation parameters passed
* between functions involved in allocations, including the alloc_pages* * between functions involved in allocations, including the alloc_pages*
......
...@@ -2531,6 +2531,95 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn, ...@@ -2531,6 +2531,95 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
__free_pages_core(page, order); __free_pages_core(page, order);
} }
static bool _init_on_alloc_enabled_early __read_mostly
= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
static int __init early_init_on_alloc(char *buf)
{
return kstrtobool(buf, &_init_on_alloc_enabled_early);
}
early_param("init_on_alloc", early_init_on_alloc);
static bool _init_on_free_enabled_early __read_mostly
= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
static int __init early_init_on_free(char *buf)
{
return kstrtobool(buf, &_init_on_free_enabled_early);
}
early_param("init_on_free", early_init_on_free);
DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
/*
* Enable static keys related to various memory debugging and hardening options.
* Some override others, and depend on early params that are evaluated in the
* order of appearance. So we need to first gather the full picture of what was
* enabled, and then make decisions.
*/
static void __init mem_debugging_and_hardening_init(void)
{
bool page_poisoning_requested = false;
bool want_check_pages = false;
#ifdef CONFIG_PAGE_POISONING
/*
* Page poisoning is debug page alloc for some arches. If
* either of those options are enabled, enable poisoning.
*/
if (page_poisoning_enabled() ||
(!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
debug_pagealloc_enabled())) {
static_branch_enable(&_page_poisoning_enabled);
page_poisoning_requested = true;
want_check_pages = true;
}
#endif
if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
page_poisoning_requested) {
pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
"will take precedence over init_on_alloc and init_on_free\n");
_init_on_alloc_enabled_early = false;
_init_on_free_enabled_early = false;
}
if (_init_on_alloc_enabled_early) {
want_check_pages = true;
static_branch_enable(&init_on_alloc);
} else {
static_branch_disable(&init_on_alloc);
}
if (_init_on_free_enabled_early) {
want_check_pages = true;
static_branch_enable(&init_on_free);
} else {
static_branch_disable(&init_on_free);
}
if (IS_ENABLED(CONFIG_KMSAN) &&
(_init_on_alloc_enabled_early || _init_on_free_enabled_early))
pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled()) {
want_check_pages = true;
static_branch_enable(&_debug_pagealloc_enabled);
if (debug_guardpage_minorder())
static_branch_enable(&_debug_guardpage_enabled);
}
#endif
/*
* Any page debugging or hardening option also enables sanity checking
* of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
* enabled already.
*/
if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
static_branch_enable(&check_pages_enabled);
}
/* Report memory auto-initialization states for this boot. */ /* Report memory auto-initialization states for this boot. */
static void __init report_meminit(void) static void __init report_meminit(void)
{ {
...@@ -2570,7 +2659,7 @@ void __init mm_core_init(void) ...@@ -2570,7 +2659,7 @@ void __init mm_core_init(void)
* bigger than MAX_ORDER unless SPARSEMEM. * bigger than MAX_ORDER unless SPARSEMEM.
*/ */
page_ext_init_flatmem(); page_ext_init_flatmem();
init_mem_debugging_and_hardening(); mem_debugging_and_hardening_init();
kfence_alloc_pool(); kfence_alloc_pool();
report_meminit(); report_meminit();
kmsan_init_shadow(); kmsan_init_shadow();
......
...@@ -240,31 +240,6 @@ EXPORT_SYMBOL(init_on_alloc); ...@@ -240,31 +240,6 @@ EXPORT_SYMBOL(init_on_alloc);
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
EXPORT_SYMBOL(init_on_free); EXPORT_SYMBOL(init_on_free);
/* perform sanity checks on struct pages being allocated or freed */
static DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
static inline bool is_check_pages_enabled(void)
{
return static_branch_unlikely(&check_pages_enabled);
}
static bool _init_on_alloc_enabled_early __read_mostly
= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
static int __init early_init_on_alloc(char *buf)
{
return kstrtobool(buf, &_init_on_alloc_enabled_early);
}
early_param("init_on_alloc", early_init_on_alloc);
static bool _init_on_free_enabled_early __read_mostly
= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
static int __init early_init_on_free(char *buf)
{
return kstrtobool(buf, &_init_on_free_enabled_early);
}
early_param("init_on_free", early_init_on_free);
/* /*
* A cached value of the page's pageblock's migratetype, used when the page is * A cached value of the page's pageblock's migratetype, used when the page is
* put on a pcplist. Used to avoid the pageblock migratetype lookup when * put on a pcplist. Used to avoid the pageblock migratetype lookup when
...@@ -798,76 +773,6 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, ...@@ -798,76 +773,6 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) {} unsigned int order, int migratetype) {}
#endif #endif
/*
* Enable static keys related to various memory debugging and hardening options.
* Some override others, and depend on early params that are evaluated in the
* order of appearance. So we need to first gather the full picture of what was
* enabled, and then make decisions.
*/
void __init init_mem_debugging_and_hardening(void)
{
bool page_poisoning_requested = false;
bool want_check_pages = false;
#ifdef CONFIG_PAGE_POISONING
/*
* Page poisoning is debug page alloc for some arches. If
* either of those options are enabled, enable poisoning.
*/
if (page_poisoning_enabled() ||
(!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
debug_pagealloc_enabled())) {
static_branch_enable(&_page_poisoning_enabled);
page_poisoning_requested = true;
want_check_pages = true;
}
#endif
if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
page_poisoning_requested) {
pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
"will take precedence over init_on_alloc and init_on_free\n");
_init_on_alloc_enabled_early = false;
_init_on_free_enabled_early = false;
}
if (_init_on_alloc_enabled_early) {
want_check_pages = true;
static_branch_enable(&init_on_alloc);
} else {
static_branch_disable(&init_on_alloc);
}
if (_init_on_free_enabled_early) {
want_check_pages = true;
static_branch_enable(&init_on_free);
} else {
static_branch_disable(&init_on_free);
}
if (IS_ENABLED(CONFIG_KMSAN) &&
(_init_on_alloc_enabled_early || _init_on_free_enabled_early))
pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled()) {
want_check_pages = true;
static_branch_enable(&_debug_pagealloc_enabled);
if (debug_guardpage_minorder())
static_branch_enable(&_debug_guardpage_enabled);
}
#endif
/*
* Any page debugging or hardening option also enables sanity checking
* of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
* enabled already.
*/
if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
static_branch_enable(&check_pages_enabled);
}
static inline void set_buddy_order(struct page *page, unsigned int order) static inline void set_buddy_order(struct page *page, unsigned int order)
{ {
set_page_private(page, order); set_page_private(page, order);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment