Commit 4491f712 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

mm/memory-failure: set PageHWPoison before migrate_pages()

Now page freeing code doesn't consider PageHWPoison as a bad page, so by
setting it before completing the page containment, we can prevent the
error page from being reused just after successful page migration.

I added TTU_IGNORE_HWPOISON for try_to_unmap() to make sure that the
page table entry is transformed into migration entry, not to hwpoison
entry.
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dean Nelson <dnelson@redhat.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f4c18e6f
...@@ -1659,6 +1659,8 @@ static int __soft_offline_page(struct page *page, int flags) ...@@ -1659,6 +1659,8 @@ static int __soft_offline_page(struct page *page, int flags)
inc_zone_page_state(page, NR_ISOLATED_ANON + inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page)); page_is_file_cache(page));
list_add(&page->lru, &pagelist); list_add(&page->lru, &pagelist);
if (!TestSetPageHWPoison(page))
atomic_long_inc(&num_poisoned_pages);
ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE); MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) { if (ret) {
...@@ -1673,9 +1675,8 @@ static int __soft_offline_page(struct page *page, int flags) ...@@ -1673,9 +1675,8 @@ static int __soft_offline_page(struct page *page, int flags)
pfn, ret, page->flags); pfn, ret, page->flags);
if (ret > 0) if (ret > 0)
ret = -EIO; ret = -EIO;
} else { if (TestClearPageHWPoison(page))
if (!TestSetPageHWPoison(page)) atomic_long_dec(&num_poisoned_pages);
atomic_long_inc(&num_poisoned_pages);
} }
} else { } else {
pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
......
...@@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage, ...@@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
/* Establish migration ptes or remove ptes */ /* Establish migration ptes or remove ptes */
if (page_mapped(page)) { if (page_mapped(page)) {
try_to_unmap(page, try_to_unmap(page,
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
TTU_IGNORE_HWPOISON);
page_was_mapped = 1; page_was_mapped = 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment