Commit 7ec7096b authored by Pasha Tatashin's avatar Pasha Tatashin Committed by Andrew Morton

mm/page_ext: init page_ext early if there are no deferred struct pages

page_ext must be initialized after all struct pages are initialized. 
Therefore, page_ext is initialized after page_alloc_init_late(), and can
optionally be initialized earlier via early_page_ext kernel parameter
which as a side effect also disables deferred struct pages.

Allow to automatically init page_ext early when there are no deferred
struct pages in order to be able to use page_ext during kernel boot and
track for example page allocations early.

[pasha.tatashin@soleen.com: fix build with CONFIG_PAGE_EXTENSION=n]
  Link: https://lkml.kernel.org/r/20230118155251.2522985-1-pasha.tatashin@soleen.com
Link: https://lkml.kernel.org/r/20230117204617.1553748-1-pasha.tatashin@soleen.comSigned-off-by: default avatarPasha Tatashin <pasha.tatashin@soleen.com>
Acked-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Charan Teja Kalla <quic_charante@quicinc.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Li Zhe <lizhe.67@bytedance.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 64517d6e
......@@ -29,6 +29,8 @@ struct page_ext_operations {
bool need_shared_flags;
};
extern bool deferred_struct_pages;
#ifdef CONFIG_PAGE_EXTENSION
/*
......
......@@ -855,8 +855,8 @@ static void __init mm_init(void)
pgtable_init();
debug_objects_mem_init();
vmalloc_init();
/* Should be run after vmap initialization */
if (early_page_ext_enabled())
/* If no deferred init page_ext now, as vmap is fully initialized */
if (!deferred_struct_pages)
page_ext_init();
/* Should be run before the first non-init thread is created */
init_espfix_bsp();
......@@ -1628,7 +1628,7 @@ static noinline void __init kernel_init_freeable(void)
padata_init();
page_alloc_init_late();
/* Initialize page ext after all struct pages are initialized. */
if (!early_page_ext_enabled())
if (deferred_struct_pages)
page_ext_init();
do_basic_setup();
......
......@@ -430,6 +430,8 @@ EXPORT_SYMBOL(nr_online_nodes);
int page_group_by_mobility_disabled __read_mostly;
bool deferred_struct_pages __meminitdata;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
* During boot we initialize deferred pages on-demand, as needed, but once
......@@ -6803,8 +6805,10 @@ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone
if (context == MEMINIT_EARLY) {
if (overlap_memmap_init(zone, &pfn))
continue;
if (defer_init(nid, pfn, zone_end_pfn))
if (defer_init(nid, pfn, zone_end_pfn)) {
deferred_struct_pages = true;
break;
}
}
page = pfn_to_page(pfn);
......
......@@ -92,7 +92,7 @@ unsigned long page_ext_size;
static unsigned long total_usage;
static struct page_ext *lookup_page_ext(const struct page *page);
bool early_page_ext;
bool early_page_ext __meminitdata;
static int __init setup_early_page_ext(char *str)
{
early_page_ext = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment