Commit 54608c3f authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: meminit: minimise number of pfn->page lookups during initialisation

Deferred struct page initialisation is using pfn_to_page() on every PFN
unnecessarily.  This patch minimises the number of lookups and scheduler
checks.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Tested-by: default avatarNate Zimmer <nzimmer@sgi.com>
Tested-by: default avatarWaiman Long <waiman.long@hp.com>
Tested-by: default avatarDaniel J Blueman <daniel@numascale.com>
Acked-by: default avatarPekka Enberg <penberg@kernel.org>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Nate Zimmer <nzimmer@sgi.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Waiman Long <waiman.long@hp.com>
Cc: Scott Norton <scott.norton@hp.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7e18adb4
...@@ -1091,6 +1091,7 @@ void __defermem_init deferred_init_memmap(int nid) ...@@ -1091,6 +1091,7 @@ void __defermem_init deferred_init_memmap(int nid)
for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) { for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
unsigned long pfn, end_pfn; unsigned long pfn, end_pfn;
struct page *page = NULL;
end_pfn = min(walk_end, zone_end_pfn(zone)); end_pfn = min(walk_end, zone_end_pfn(zone));
pfn = first_init_pfn; pfn = first_init_pfn;
...@@ -1100,13 +1101,32 @@ void __defermem_init deferred_init_memmap(int nid) ...@@ -1100,13 +1101,32 @@ void __defermem_init deferred_init_memmap(int nid)
pfn = zone->zone_start_pfn; pfn = zone->zone_start_pfn;
for (; pfn < end_pfn; pfn++) { for (; pfn < end_pfn; pfn++) {
struct page *page; if (!pfn_valid_within(pfn))
if (!pfn_valid(pfn))
continue; continue;
if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) /*
* Ensure pfn_valid is checked every
* MAX_ORDER_NR_PAGES for memory holes
*/
if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
if (!pfn_valid(pfn)) {
page = NULL;
continue;
}
}
if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
page = NULL;
continue; continue;
}
/* Minimise pfn page lookups and scheduler checks */
if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
page++;
} else {
page = pfn_to_page(pfn);
cond_resched();
}
if (page->flags) { if (page->flags) {
VM_BUG_ON(page_zone(page) != zone); VM_BUG_ON(page_zone(page) != zone);
...@@ -1116,7 +1136,6 @@ void __defermem_init deferred_init_memmap(int nid) ...@@ -1116,7 +1136,6 @@ void __defermem_init deferred_init_memmap(int nid)
__init_single_page(page, pfn, zid, nid); __init_single_page(page, pfn, zid, nid);
__free_pages_boot_core(page, pfn, 0); __free_pages_boot_core(page, pfn, 0);
nr_pages++; nr_pages++;
cond_resched();
} }
first_init_pfn = max(end_pfn, first_init_pfn); first_init_pfn = max(end_pfn, first_init_pfn);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment