Commit d381c547 authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm: only report isolation failures when offlining memory

Heiko has complained that his log is swamped by warnings from
has_unmovable_pages

[   20.536664] page dumped because: has_unmovable_pages
[   20.536792] page:000003d081ff4080 count:1 mapcount:0 mapping:000000008ff88600 index:0x0 compound_mapcount: 0
[   20.536794] flags: 0x3fffe0000010200(slab|head)
[   20.536795] raw: 03fffe0000010200 0000000000000100 0000000000000200 000000008ff88600
[   20.536796] raw: 0000000000000000 0020004100000000 ffffffff00000001 0000000000000000
[   20.536797] page dumped because: has_unmovable_pages
[   20.536814] page:000003d0823b0000 count:1 mapcount:0 mapping:0000000000000000 index:0x0
[   20.536815] flags: 0x7fffe0000000000()
[   20.536817] raw: 07fffe0000000000 0000000000000100 0000000000000200 0000000000000000
[   20.536818] raw: 0000000000000000 0000000000000000 ffffffff00000001 0000000000000000

which are not triggered by the memory hotplug but rather CMA allocator.
The original idea behind dumping the page state for all call paths was
that these messages will be helpful debugging failures.  From the above it
seems that this is not the case for the CMA path because we are lacking
much more context.  E.g the second reported page might be a CMA allocated
page.  It is still interesting to see a slab page in the CMA area but it
is hard to tell whether this is bug from the above output alone.

Address this issue by dumping the page state only on request.  Both
start_isolate_page_range and has_unmovable_pages already have an argument
to ignore hwpoison pages so make this argument more generic and turn it
into flags and allow callers to combine non-default modes into a mask.
While we are at it, has_unmovable_pages call from
is_pageblock_removable_nolock (sysfs removable file) is questionable to
report the failure so drop it from there as well.

Link: http://lkml.kernel.org/r/20181218092802.31429-1-mhocko@kernel.orgSigned-off-by: default avatarMichal Hocko <mhocko@suse.com>
Reported-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2932c8b0
...@@ -30,8 +30,11 @@ static inline bool is_migrate_isolate(int migratetype) ...@@ -30,8 +30,11 @@ static inline bool is_migrate_isolate(int migratetype)
} }
#endif #endif
#define SKIP_HWPOISON 0x1
#define REPORT_FAILURE 0x2
bool has_unmovable_pages(struct zone *zone, struct page *page, int count, bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
int migratetype, bool skip_hwpoisoned_pages); int migratetype, int flags);
void set_pageblock_migratetype(struct page *page, int migratetype); void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page, int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable); int migratetype, int *num_movable);
...@@ -44,10 +47,14 @@ int move_freepages_block(struct zone *zone, struct page *page, ...@@ -44,10 +47,14 @@ int move_freepages_block(struct zone *zone, struct page *page,
* For isolating all pages in the range finally, the caller have to * For isolating all pages in the range finally, the caller have to
* free all pages in the range. test_page_isolated() can be used for * free all pages in the range. test_page_isolated() can be used for
* test it. * test it.
*
* The following flags are allowed (they can be combined in a bit mask)
* SKIP_HWPOISON - ignore hwpoison pages
* REPORT_FAILURE - report details about the failure to isolate the range
*/ */
int int
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype, bool skip_hwpoisoned_pages); unsigned migratetype, int flags);
/* /*
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
......
...@@ -1226,7 +1226,7 @@ static bool is_pageblock_removable_nolock(struct page *page) ...@@ -1226,7 +1226,7 @@ static bool is_pageblock_removable_nolock(struct page *page)
if (!zone_spans_pfn(zone, pfn)) if (!zone_spans_pfn(zone, pfn))
return false; return false;
return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true); return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, SKIP_HWPOISON);
} }
/* Checks if this range of memory is likely to be hot-removable. */ /* Checks if this range of memory is likely to be hot-removable. */
...@@ -1577,7 +1577,8 @@ static int __ref __offline_pages(unsigned long start_pfn, ...@@ -1577,7 +1577,8 @@ static int __ref __offline_pages(unsigned long start_pfn,
/* set above range as isolated */ /* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn, ret = start_isolate_page_range(start_pfn, end_pfn,
MIGRATE_MOVABLE, true); MIGRATE_MOVABLE,
SKIP_HWPOISON | REPORT_FAILURE);
if (ret) { if (ret) {
mem_hotplug_done(); mem_hotplug_done();
reason = "failure to isolate range"; reason = "failure to isolate range";
......
...@@ -7767,8 +7767,7 @@ void *__init alloc_large_system_hash(const char *tablename, ...@@ -7767,8 +7767,7 @@ void *__init alloc_large_system_hash(const char *tablename,
* race condition. So you can't expect this function should be exact. * race condition. So you can't expect this function should be exact.
*/ */
bool has_unmovable_pages(struct zone *zone, struct page *page, int count, bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
int migratetype, int migratetype, int flags)
bool skip_hwpoisoned_pages)
{ {
unsigned long pfn, iter, found; unsigned long pfn, iter, found;
...@@ -7842,7 +7841,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, ...@@ -7842,7 +7841,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
* The HWPoisoned page may be not in buddy system, and * The HWPoisoned page may be not in buddy system, and
* page_count() is not 0. * page_count() is not 0.
*/ */
if (skip_hwpoisoned_pages && PageHWPoison(page)) if ((flags & SKIP_HWPOISON) && PageHWPoison(page))
continue; continue;
if (__PageMovable(page)) if (__PageMovable(page))
...@@ -7869,7 +7868,8 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, ...@@ -7869,7 +7868,8 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
return false; return false;
unmovable: unmovable:
WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
dump_page(pfn_to_page(pfn+iter), "unmovable page"); if (flags & REPORT_FAILURE)
dump_page(pfn_to_page(pfn+iter), "unmovable page");
return true; return true;
} }
...@@ -7996,8 +7996,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, ...@@ -7996,8 +7996,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
*/ */
ret = start_isolate_page_range(pfn_max_align_down(start), ret = start_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype, pfn_max_align_up(end), migratetype, 0);
false);
if (ret) if (ret)
return ret; return ret;
......
...@@ -15,8 +15,7 @@ ...@@ -15,8 +15,7 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/page_isolation.h> #include <trace/events/page_isolation.h>
static int set_migratetype_isolate(struct page *page, int migratetype, static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
bool skip_hwpoisoned_pages)
{ {
struct zone *zone; struct zone *zone;
unsigned long flags, pfn; unsigned long flags, pfn;
...@@ -60,8 +59,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, ...@@ -60,8 +59,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype,
* FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
* We just check MOVABLE pages. * We just check MOVABLE pages.
*/ */
if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags))
skip_hwpoisoned_pages))
ret = 0; ret = 0;
/* /*
...@@ -185,7 +183,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) ...@@ -185,7 +183,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* prevents two threads from simultaneously working on overlapping ranges. * prevents two threads from simultaneously working on overlapping ranges.
*/ */
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype, bool skip_hwpoisoned_pages) unsigned migratetype, int flags)
{ {
unsigned long pfn; unsigned long pfn;
unsigned long undo_pfn; unsigned long undo_pfn;
...@@ -199,7 +197,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -199,7 +197,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
pfn += pageblock_nr_pages) { pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages); page = __first_valid_page(pfn, pageblock_nr_pages);
if (page && if (page &&
set_migratetype_isolate(page, migratetype, skip_hwpoisoned_pages)) { set_migratetype_isolate(page, migratetype, flags)) {
undo_pfn = pfn; undo_pfn = pfn;
goto undo; goto undo;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment