Commit 90e796e2 authored by Baoquan He's avatar Baoquan He Committed by Andrew Morton

mm/mm_init.c: remove unneeded calc_memmap_size()

Nobody calls calc_memmap_size() now.

Link: https://lkml.kernel.org/r/20240325145646.1044760-6-bhe@redhat.comSigned-off-by: default avatarBaoquan He <bhe@redhat.com>
Reviewed-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0ac5e785
...@@ -1332,26 +1332,6 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat, ...@@ -1332,26 +1332,6 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat,
pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
} }
static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
unsigned long present_pages)
{
unsigned long pages = spanned_pages;
/*
* Provide a more accurate estimation if there are holes within
* the zone and SPARSEMEM is in use. If there are holes within the
* zone, each populated memory region may cost us one or two extra
* memmap pages due to alignment because memmap pages for each
* populated regions may not be naturally aligned on page boundary.
* So the (present_pages >> 4) heuristic is a tradeoff for that.
*/
if (spanned_pages > present_pages + (present_pages >> 4) &&
IS_ENABLED(CONFIG_SPARSEMEM))
pages = present_pages;
return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void pgdat_init_split_queue(struct pglist_data *pgdat) static void pgdat_init_split_queue(struct pglist_data *pgdat)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment