Commit f714f4f2 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: numa: call MMU notifiers on THP migration

MMU notifiers must be called on THP page migration or secondary MMUs
will get very confused.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: Alex Thorlton <athorlton@sgi.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2b4847e7
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/hugetlb_cgroup.h> #include <linux/hugetlb_cgroup.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/balloon_compaction.h> #include <linux/balloon_compaction.h>
#include <linux/mmu_notifier.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -1716,12 +1717,13 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1716,12 +1717,13 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
struct page *page, int node) struct page *page, int node)
{ {
spinlock_t *ptl; spinlock_t *ptl;
unsigned long haddr = address & HPAGE_PMD_MASK;
pg_data_t *pgdat = NODE_DATA(node); pg_data_t *pgdat = NODE_DATA(node);
int isolated = 0; int isolated = 0;
struct page *new_page = NULL; struct page *new_page = NULL;
struct mem_cgroup *memcg = NULL; struct mem_cgroup *memcg = NULL;
int page_lru = page_is_file_cache(page); int page_lru = page_is_file_cache(page);
unsigned long mmun_start = address & HPAGE_PMD_MASK;
unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
pmd_t orig_entry; pmd_t orig_entry;
/* /*
...@@ -1756,10 +1758,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1756,10 +1758,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
WARN_ON(PageLRU(new_page)); WARN_ON(PageLRU(new_page));
/* Recheck the target PMD */ /* Recheck the target PMD */
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
ptl = pmd_lock(mm, pmd); ptl = pmd_lock(mm, pmd);
if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
fail_putback: fail_putback:
spin_unlock(ptl); spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
/* Reverse changes made by migrate_page_copy() */ /* Reverse changes made by migrate_page_copy() */
if (TestClearPageActive(new_page)) if (TestClearPageActive(new_page))
...@@ -1800,15 +1804,16 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1800,15 +1804,16 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
* The SetPageUptodate on the new page and page_add_new_anon_rmap * The SetPageUptodate on the new page and page_add_new_anon_rmap
* guarantee the copy is visible before the pagetable update. * guarantee the copy is visible before the pagetable update.
*/ */
flush_cache_range(vma, haddr, haddr + HPAGE_PMD_SIZE); flush_cache_range(vma, mmun_start, mmun_end);
page_add_new_anon_rmap(new_page, vma, haddr); page_add_new_anon_rmap(new_page, vma, mmun_start);
pmdp_clear_flush(vma, haddr, pmd); pmdp_clear_flush(vma, mmun_start, pmd);
set_pmd_at(mm, haddr, pmd, entry); set_pmd_at(mm, mmun_start, pmd, entry);
flush_tlb_range(vma, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry); update_mmu_cache_pmd(vma, address, &entry);
if (page_count(page) != 2) { if (page_count(page) != 2) {
set_pmd_at(mm, haddr, pmd, orig_entry); set_pmd_at(mm, mmun_start, pmd, orig_entry);
flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); flush_tlb_range(vma, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry); update_mmu_cache_pmd(vma, address, &entry);
page_remove_rmap(new_page); page_remove_rmap(new_page);
goto fail_putback; goto fail_putback;
...@@ -1823,6 +1828,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1823,6 +1828,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
*/ */
mem_cgroup_end_migration(memcg, page, new_page, true); mem_cgroup_end_migration(memcg, page, new_page, true);
spin_unlock(ptl); spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
unlock_page(new_page); unlock_page(new_page);
unlock_page(page); unlock_page(page);
...@@ -1843,7 +1849,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1843,7 +1849,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
ptl = pmd_lock(mm, pmd); ptl = pmd_lock(mm, pmd);
if (pmd_same(*pmd, entry)) { if (pmd_same(*pmd, entry)) {
entry = pmd_mknonnuma(entry); entry = pmd_mknonnuma(entry);
set_pmd_at(mm, haddr, pmd, entry); set_pmd_at(mm, mmun_start, pmd, entry);
update_mmu_cache_pmd(vma, address, &entry); update_mmu_cache_pmd(vma, address, &entry);
} }
spin_unlock(ptl); spin_unlock(ptl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment