Commit 9b7ea46a authored by Qian Cai's avatar Qian Cai Committed by Linus Torvalds

mm/hotplug: fix offline undo_isolate_page_range()

Commit f1dd2cd1 ("mm, memory_hotplug: do not associate hotadded
memory to zones until online") introduced move_pfn_range_to_zone() which
calls memmap_init_zone() during onlining a memory block.
memmap_init_zone() will reset pagetype flags and makes migrate type to
be MOVABLE.

However, in __offline_pages(), it also call undo_isolate_page_range()
after offline_isolated_pages() to do the same thing.  Due to commit
2ce13640 ("mm: __first_valid_page skip over offline pages") changed
__first_valid_page() to skip offline pages, undo_isolate_page_range()
here just waste CPU cycles looping around the offlining PFN range while
doing nothing, because __first_valid_page() will return NULL as
offline_isolated_pages() has already marked all memory sections within
the pfn range as offline via offline_mem_sections().

Also, after calling the "useless" undo_isolate_page_range() here, it
reaches the point of no returning by notifying MEM_OFFLINE.  Those pages
will be marked as MIGRATE_MOVABLE again once onlining.  The only thing
left to do is to decrease the number of isolated pageblocks zone counter
which would make some paths of the page allocation slower that the above
commit introduced.

Even if alloc_contig_range() can be used to isolate 16GB-hugetlb pages
on ppc64, an "int" should still be enough to represent the number of
pageblocks there.  Fix an incorrect comment along the way.

[cai@lca.pw: v4]
  Link: http://lkml.kernel.org/r/20190314150641.59358-1-cai@lca.pw
Link: http://lkml.kernel.org/r/20190313143133.46200-1-cai@lca.pw
Fixes: 2ce13640 ("mm: __first_valid_page skip over offline pages")
Signed-off-by: default avatarQian Cai <cai@lca.pw>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: <stable@vger.kernel.org>	[4.13+]
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 73601ea5
...@@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page, ...@@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page,
/* /*
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
* If specified range includes migrate types other than MOVABLE or CMA,
* this will fail with -EBUSY.
*
* For isolating all pages in the range finally, the caller have to
* free all pages in the range. test_page_isolated() can be used for
* test it.
*
* The following flags are allowed (they can be combined in a bit mask)
* SKIP_HWPOISON - ignore hwpoison pages
* REPORT_FAILURE - report details about the failure to isolate the range
*/ */
int int
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
......
...@@ -1576,7 +1576,7 @@ static int __ref __offline_pages(unsigned long start_pfn, ...@@ -1576,7 +1576,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
{ {
unsigned long pfn, nr_pages; unsigned long pfn, nr_pages;
long offlined_pages; long offlined_pages;
int ret, node; int ret, node, nr_isolate_pageblock;
unsigned long flags; unsigned long flags;
unsigned long valid_start, valid_end; unsigned long valid_start, valid_end;
struct zone *zone; struct zone *zone;
...@@ -1602,10 +1602,11 @@ static int __ref __offline_pages(unsigned long start_pfn, ...@@ -1602,10 +1602,11 @@ static int __ref __offline_pages(unsigned long start_pfn,
ret = start_isolate_page_range(start_pfn, end_pfn, ret = start_isolate_page_range(start_pfn, end_pfn,
MIGRATE_MOVABLE, MIGRATE_MOVABLE,
SKIP_HWPOISON | REPORT_FAILURE); SKIP_HWPOISON | REPORT_FAILURE);
if (ret) { if (ret < 0) {
reason = "failure to isolate range"; reason = "failure to isolate range";
goto failed_removal; goto failed_removal;
} }
nr_isolate_pageblock = ret;
arg.start_pfn = start_pfn; arg.start_pfn = start_pfn;
arg.nr_pages = nr_pages; arg.nr_pages = nr_pages;
...@@ -1657,8 +1658,16 @@ static int __ref __offline_pages(unsigned long start_pfn, ...@@ -1657,8 +1658,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
/* Ok, all of our target is isolated. /* Ok, all of our target is isolated.
We cannot do rollback at this point. */ We cannot do rollback at this point. */
offline_isolated_pages(start_pfn, end_pfn); offline_isolated_pages(start_pfn, end_pfn);
/* reset pagetype flags and makes migrate type to be MOVABLE */
undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); /*
* Onlining will reset pagetype flags and makes migrate type
* MOVABLE, so just need to decrease the number of isolated
* pageblocks zone counter here.
*/
spin_lock_irqsave(&zone->lock, flags);
zone->nr_isolate_pageblock -= nr_isolate_pageblock;
spin_unlock_irqrestore(&zone->lock, flags);
/* removal success */ /* removal success */
adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
zone->present_pages -= offlined_pages; zone->present_pages -= offlined_pages;
......
...@@ -8233,7 +8233,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, ...@@ -8233,7 +8233,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
ret = start_isolate_page_range(pfn_max_align_down(start), ret = start_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype, 0); pfn_max_align_up(end), migratetype, 0);
if (ret) if (ret < 0)
return ret; return ret;
/* /*
......
...@@ -160,19 +160,25 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) ...@@ -160,19 +160,25 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
return NULL; return NULL;
} }
/* /**
* start_isolate_page_range() -- make page-allocation-type of range of pages * start_isolate_page_range() - make page-allocation-type of range of pages to
* to be MIGRATE_ISOLATE. * be MIGRATE_ISOLATE.
* @start_pfn: The lower PFN of the range to be isolated. * @start_pfn: The lower PFN of the range to be isolated.
* @end_pfn: The upper PFN of the range to be isolated. * @end_pfn: The upper PFN of the range to be isolated.
* @migratetype: migrate type to set in error recovery. * start_pfn/end_pfn must be aligned to pageblock_order.
* @migratetype: Migrate type to set in error recovery.
* @flags: The following flags are allowed (they can be combined in
* a bit mask)
* SKIP_HWPOISON - ignore hwpoison pages
* REPORT_FAILURE - report details about the failure to
* isolate the range
* *
* Making page-allocation-type to be MIGRATE_ISOLATE means free pages in * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
* the range will never be allocated. Any free pages and pages freed in the * the range will never be allocated. Any free pages and pages freed in the
* future will not be allocated again. * future will not be allocated again. If specified range includes migrate types
* * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
* start_pfn/end_pfn must be aligned to pageblock_order. * pages in the range finally, the caller have to free all pages in the range.
* Return 0 on success and -EBUSY if any part of range cannot be isolated. * test_page_isolated() can be used for test it.
* *
* There is no high level synchronization mechanism that prevents two threads * There is no high level synchronization mechanism that prevents two threads
* from trying to isolate overlapping ranges. If this happens, one thread * from trying to isolate overlapping ranges. If this happens, one thread
...@@ -181,6 +187,9 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) ...@@ -181,6 +187,9 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* returns an error. We then clean up by restoring the migration type on * returns an error. We then clean up by restoring the migration type on
* pageblocks we may have modified and return -EBUSY to caller. This * pageblocks we may have modified and return -EBUSY to caller. This
* prevents two threads from simultaneously working on overlapping ranges. * prevents two threads from simultaneously working on overlapping ranges.
*
* Return: the number of isolated pageblocks on success and -EBUSY if any part
* of range cannot be isolated.
*/ */
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype, int flags) unsigned migratetype, int flags)
...@@ -188,6 +197,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -188,6 +197,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned long pfn; unsigned long pfn;
unsigned long undo_pfn; unsigned long undo_pfn;
struct page *page; struct page *page;
int nr_isolate_pageblock = 0;
BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
...@@ -196,13 +206,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -196,13 +206,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
pfn < end_pfn; pfn < end_pfn;
pfn += pageblock_nr_pages) { pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages); page = __first_valid_page(pfn, pageblock_nr_pages);
if (page && if (page) {
set_migratetype_isolate(page, migratetype, flags)) { if (set_migratetype_isolate(page, migratetype, flags)) {
undo_pfn = pfn; undo_pfn = pfn;
goto undo; goto undo;
} }
nr_isolate_pageblock++;
} }
return 0; }
return nr_isolate_pageblock;
undo: undo:
for (pfn = start_pfn; for (pfn = start_pfn;
pfn < undo_pfn; pfn < undo_pfn;
......
...@@ -567,7 +567,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) ...@@ -567,7 +567,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
} }
#ifdef CONFIG_MEMORY_HOTREMOVE #ifdef CONFIG_MEMORY_HOTREMOVE
/* Mark all memory sections within the pfn range as online */ /* Mark all memory sections within the pfn range as offline */
void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
{ {
unsigned long pfn; unsigned long pfn;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment