Commit 35979ef3 authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds

mm, compaction: add per-zone migration pfn cache for async compaction

Each zone has a cached migration scanner pfn for memory compaction so that
subsequent calls to memory compaction can start where the previous call
left off.

Currently, the compaction migration scanner only updates the per-zone
cached pfn when pageblocks were not skipped for async compaction.  This
creates a dependency on calling sync compaction to avoid having subsequent
calls to async compaction from scanning an enormous amount of non-MOVABLE
pageblocks each time it is called.  On large machines, this could be
potentially very expensive.

This patch adds a per-zone cached migration scanner pfn only for async
compaction.  It is updated everytime a pageblock has been scanned in its
entirety and when no pages from it were successfully isolated.  The cached
migration scanner pfn for sync compaction is updated only when called for
sync compaction.
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d53aea3d
...@@ -360,9 +360,10 @@ struct zone { ...@@ -360,9 +360,10 @@ struct zone {
/* Set to true when the PG_migrate_skip bits should be cleared */ /* Set to true when the PG_migrate_skip bits should be cleared */
bool compact_blockskip_flush; bool compact_blockskip_flush;
/* pfns where compaction scanners should start */ /* pfn where compaction free scanner should start */
unsigned long compact_cached_free_pfn; unsigned long compact_cached_free_pfn;
unsigned long compact_cached_migrate_pfn; /* pfn where async and sync compaction migration scanner should start */
unsigned long compact_cached_migrate_pfn[2];
#endif #endif
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */ /* see spanned/present_pages for more description */
......
...@@ -89,7 +89,8 @@ static void __reset_isolation_suitable(struct zone *zone) ...@@ -89,7 +89,8 @@ static void __reset_isolation_suitable(struct zone *zone)
unsigned long end_pfn = zone_end_pfn(zone); unsigned long end_pfn = zone_end_pfn(zone);
unsigned long pfn; unsigned long pfn;
zone->compact_cached_migrate_pfn = start_pfn; zone->compact_cached_migrate_pfn[0] = start_pfn;
zone->compact_cached_migrate_pfn[1] = start_pfn;
zone->compact_cached_free_pfn = end_pfn; zone->compact_cached_free_pfn = end_pfn;
zone->compact_blockskip_flush = false; zone->compact_blockskip_flush = false;
...@@ -131,9 +132,10 @@ void reset_isolation_suitable(pg_data_t *pgdat) ...@@ -131,9 +132,10 @@ void reset_isolation_suitable(pg_data_t *pgdat)
*/ */
static void update_pageblock_skip(struct compact_control *cc, static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated, struct page *page, unsigned long nr_isolated,
bool migrate_scanner) bool set_unsuitable, bool migrate_scanner)
{ {
struct zone *zone = cc->zone; struct zone *zone = cc->zone;
unsigned long pfn;
if (cc->ignore_skip_hint) if (cc->ignore_skip_hint)
return; return;
...@@ -141,20 +143,31 @@ static void update_pageblock_skip(struct compact_control *cc, ...@@ -141,20 +143,31 @@ static void update_pageblock_skip(struct compact_control *cc,
if (!page) if (!page)
return; return;
if (!nr_isolated) { if (nr_isolated)
unsigned long pfn = page_to_pfn(page); return;
/*
* Only skip pageblocks when all forms of compaction will be known to
* fail in the near future.
*/
if (set_unsuitable)
set_pageblock_skip(page); set_pageblock_skip(page);
/* Update where compaction should restart */ pfn = page_to_pfn(page);
if (migrate_scanner) {
if (!cc->finished_update_migrate && /* Update where async and sync compaction should restart */
pfn > zone->compact_cached_migrate_pfn) if (migrate_scanner) {
zone->compact_cached_migrate_pfn = pfn; if (cc->finished_update_migrate)
} else { return;
if (!cc->finished_update_free && if (pfn > zone->compact_cached_migrate_pfn[0])
pfn < zone->compact_cached_free_pfn) zone->compact_cached_migrate_pfn[0] = pfn;
zone->compact_cached_free_pfn = pfn; if (cc->sync && pfn > zone->compact_cached_migrate_pfn[1])
} zone->compact_cached_migrate_pfn[1] = pfn;
} else {
if (cc->finished_update_free)
return;
if (pfn < zone->compact_cached_free_pfn)
zone->compact_cached_free_pfn = pfn;
} }
} }
#else #else
...@@ -166,7 +179,7 @@ static inline bool isolation_suitable(struct compact_control *cc, ...@@ -166,7 +179,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
static void update_pageblock_skip(struct compact_control *cc, static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated, struct page *page, unsigned long nr_isolated,
bool migrate_scanner) bool set_unsuitable, bool migrate_scanner)
{ {
} }
#endif /* CONFIG_COMPACTION */ #endif /* CONFIG_COMPACTION */
...@@ -323,7 +336,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -323,7 +336,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
/* Update the pageblock-skip if the whole pageblock was scanned */ /* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn) if (blockpfn == end_pfn)
update_pageblock_skip(cc, valid_page, total_isolated, false); update_pageblock_skip(cc, valid_page, total_isolated, true,
false);
count_compact_events(COMPACTFREE_SCANNED, nr_scanned); count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
if (total_isolated) if (total_isolated)
...@@ -458,7 +472,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, ...@@ -458,7 +472,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
unsigned long flags; unsigned long flags;
bool locked = false; bool locked = false;
struct page *page = NULL, *valid_page = NULL; struct page *page = NULL, *valid_page = NULL;
bool skipped_async_unsuitable = false; bool set_unsuitable = true;
const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) | const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) |
(unevictable ? ISOLATE_UNEVICTABLE : 0); (unevictable ? ISOLATE_UNEVICTABLE : 0);
...@@ -535,8 +549,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, ...@@ -535,8 +549,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
*/ */
mt = get_pageblock_migratetype(page); mt = get_pageblock_migratetype(page);
if (!cc->sync && !migrate_async_suitable(mt)) { if (!cc->sync && !migrate_async_suitable(mt)) {
cc->finished_update_migrate = true; set_unsuitable = false;
skipped_async_unsuitable = true;
goto next_pageblock; goto next_pageblock;
} }
} }
...@@ -640,11 +653,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, ...@@ -640,11 +653,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
/* /*
* Update the pageblock-skip information and cached scanner pfn, * Update the pageblock-skip information and cached scanner pfn,
* if the whole pageblock was scanned without isolating any page. * if the whole pageblock was scanned without isolating any page.
* This is not done when pageblock was skipped due to being unsuitable
* for async compaction, so that eventual sync compaction can try.
*/ */
if (low_pfn == end_pfn && !skipped_async_unsuitable) if (low_pfn == end_pfn)
update_pageblock_skip(cc, valid_page, nr_isolated, true); update_pageblock_skip(cc, valid_page, nr_isolated,
set_unsuitable, true);
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
...@@ -868,7 +880,8 @@ static int compact_finished(struct zone *zone, ...@@ -868,7 +880,8 @@ static int compact_finished(struct zone *zone,
/* Compaction run completes if the migrate and free scanner meet */ /* Compaction run completes if the migrate and free scanner meet */
if (cc->free_pfn <= cc->migrate_pfn) { if (cc->free_pfn <= cc->migrate_pfn) {
/* Let the next compaction start anew. */ /* Let the next compaction start anew. */
zone->compact_cached_migrate_pfn = zone->zone_start_pfn; zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
zone->compact_cached_free_pfn = zone_end_pfn(zone); zone->compact_cached_free_pfn = zone_end_pfn(zone);
/* /*
...@@ -993,7 +1006,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) ...@@ -993,7 +1006,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
* information on where the scanners should start but check that it * information on where the scanners should start but check that it
* is initialised by ensuring the values are within zone boundaries. * is initialised by ensuring the values are within zone boundaries.
*/ */
cc->migrate_pfn = zone->compact_cached_migrate_pfn; cc->migrate_pfn = zone->compact_cached_migrate_pfn[cc->sync];
cc->free_pfn = zone->compact_cached_free_pfn; cc->free_pfn = zone->compact_cached_free_pfn;
if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
...@@ -1001,7 +1014,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) ...@@ -1001,7 +1014,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
} }
if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
cc->migrate_pfn = start_pfn; cc->migrate_pfn = start_pfn;
zone->compact_cached_migrate_pfn = cc->migrate_pfn; zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
} }
trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment