Commit a4de83dd authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: meminit: free pages in large chunks where possible

Parallel struct page frees pages one at a time. Try free pages as single
large pages where possible.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Tested-by: default avatarNate Zimmer <nzimmer@sgi.com>
Tested-by: default avatarWaiman Long <waiman.long@hp.com>
Tested-by: default avatarDaniel J Blueman <daniel@numascale.com>
Acked-by: default avatarPekka Enberg <penberg@kernel.org>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Nate Zimmer <nzimmer@sgi.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Waiman Long <waiman.long@hp.com>
Cc: Scott Norton <scott.norton@hp.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3b242c66
...@@ -1062,6 +1062,25 @@ void __defer_init __free_pages_bootmem(struct page *page, unsigned long pfn, ...@@ -1062,6 +1062,25 @@ void __defer_init __free_pages_bootmem(struct page *page, unsigned long pfn,
} }
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
static void __defermem_init deferred_free_range(struct page *page,
unsigned long pfn, int nr_pages)
{
int i;
if (!page)
return;
/* Free a large naturally-aligned chunk if possible */
if (nr_pages == MAX_ORDER_NR_PAGES &&
(pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
__free_pages_boot_core(page, pfn, MAX_ORDER-1);
return;
}
for (i = 0; i < nr_pages; i++, page++, pfn++)
__free_pages_boot_core(page, pfn, 0);
}
/* Initialise remaining memory on a node */ /* Initialise remaining memory on a node */
void __defermem_init deferred_init_memmap(int nid) void __defermem_init deferred_init_memmap(int nid)
{ {
...@@ -1092,6 +1111,9 @@ void __defermem_init deferred_init_memmap(int nid) ...@@ -1092,6 +1111,9 @@ void __defermem_init deferred_init_memmap(int nid)
for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) { for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
unsigned long pfn, end_pfn; unsigned long pfn, end_pfn;
struct page *page = NULL; struct page *page = NULL;
struct page *free_base_page = NULL;
unsigned long free_base_pfn = 0;
int nr_to_free = 0;
end_pfn = min(walk_end, zone_end_pfn(zone)); end_pfn = min(walk_end, zone_end_pfn(zone));
pfn = first_init_pfn; pfn = first_init_pfn;
...@@ -1102,7 +1124,7 @@ void __defermem_init deferred_init_memmap(int nid) ...@@ -1102,7 +1124,7 @@ void __defermem_init deferred_init_memmap(int nid)
for (; pfn < end_pfn; pfn++) { for (; pfn < end_pfn; pfn++) {
if (!pfn_valid_within(pfn)) if (!pfn_valid_within(pfn))
continue; goto free_range;
/* /*
* Ensure pfn_valid is checked every * Ensure pfn_valid is checked every
...@@ -1111,32 +1133,53 @@ void __defermem_init deferred_init_memmap(int nid) ...@@ -1111,32 +1133,53 @@ void __defermem_init deferred_init_memmap(int nid)
if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
if (!pfn_valid(pfn)) { if (!pfn_valid(pfn)) {
page = NULL; page = NULL;
continue; goto free_range;
} }
} }
if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) { if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
page = NULL; page = NULL;
continue; goto free_range;
} }
/* Minimise pfn page lookups and scheduler checks */ /* Minimise pfn page lookups and scheduler checks */
if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) { if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
page++; page++;
} else { } else {
nr_pages += nr_to_free;
deferred_free_range(free_base_page,
free_base_pfn, nr_to_free);
free_base_page = NULL;
free_base_pfn = nr_to_free = 0;
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
cond_resched(); cond_resched();
} }
if (page->flags) { if (page->flags) {
VM_BUG_ON(page_zone(page) != zone); VM_BUG_ON(page_zone(page) != zone);
continue; goto free_range;
} }
__init_single_page(page, pfn, zid, nid); __init_single_page(page, pfn, zid, nid);
__free_pages_boot_core(page, pfn, 0); if (!free_base_page) {
nr_pages++; free_base_page = page;
free_base_pfn = pfn;
nr_to_free = 0;
}
nr_to_free++;
/* Where possible, batch up pages for a single free */
continue;
free_range:
/* Free the current block of pages to allocator */
nr_pages += nr_to_free;
deferred_free_range(free_base_page, free_base_pfn,
nr_to_free);
free_base_page = NULL;
free_base_pfn = nr_to_free = 0;
} }
first_init_pfn = max(end_pfn, first_init_pfn); first_init_pfn = max(end_pfn, first_init_pfn);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment