Commit 587fe586 authored by Mel Gorman's avatar Mel Gorman Committed by Ingo Molnar

mm: Prevent parallel splits during THP migration

THP migrations are serialised by the page lock but on its own that does
not prevent THP splits. If the page is split during THP migration then
the pmd_same checks will prevent page table corruption but the unlock page
and other fix-ups potentially will cause corruption. This patch takes the
anon_vma lock to prevent parallel splits during migration.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: <stable@kernel.org>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1381141781-10992-7-git-send-email-mgorman@suse.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 42836f5f
...@@ -1278,18 +1278,18 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, ...@@ -1278,18 +1278,18 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, pmd_t *pmdp) unsigned long addr, pmd_t pmd, pmd_t *pmdp)
{ {
struct anon_vma *anon_vma = NULL;
struct page *page; struct page *page;
unsigned long haddr = addr & HPAGE_PMD_MASK; unsigned long haddr = addr & HPAGE_PMD_MASK;
int target_nid; int target_nid;
int current_nid = -1; int current_nid = -1;
bool migrated; bool migrated, page_locked;
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(pmd, *pmdp))) if (unlikely(!pmd_same(pmd, *pmdp)))
goto out_unlock; goto out_unlock;
page = pmd_page(pmd); page = pmd_page(pmd);
get_page(page);
current_nid = page_to_nid(page); current_nid = page_to_nid(page);
count_vm_numa_event(NUMA_HINT_FAULTS); count_vm_numa_event(NUMA_HINT_FAULTS);
if (current_nid == numa_node_id()) if (current_nid == numa_node_id())
...@@ -1299,12 +1299,29 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1299,12 +1299,29 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
* Acquire the page lock to serialise THP migrations but avoid dropping * Acquire the page lock to serialise THP migrations but avoid dropping
* page_table_lock if at all possible * page_table_lock if at all possible
*/ */
if (trylock_page(page)) page_locked = trylock_page(page);
goto got_lock; target_nid = mpol_misplaced(page, vma, haddr);
if (target_nid == -1) {
/* If the page was locked, there are no parallel migrations */
if (page_locked) {
unlock_page(page);
goto clear_pmdnuma;
}
/* Serialise against migrationa and check placement check placement */ /* Otherwise wait for potential migrations and retry fault */
spin_unlock(&mm->page_table_lock);
wait_on_page_locked(page);
goto out;
}
/* Page is misplaced, serialise migrations and parallel THP splits */
get_page(page);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
if (!page_locked) {
lock_page(page); lock_page(page);
page_locked = true;
}
anon_vma = page_lock_anon_vma_read(page);
/* Confirm the PTE did not while locked */ /* Confirm the PTE did not while locked */
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
...@@ -1314,14 +1331,6 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1314,14 +1331,6 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_unlock; goto out_unlock;
} }
got_lock:
target_nid = mpol_misplaced(page, vma, haddr);
if (target_nid == -1) {
unlock_page(page);
put_page(page);
goto clear_pmdnuma;
}
/* Migrate the THP to the requested node */ /* Migrate the THP to the requested node */
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
migrated = migrate_misplaced_transhuge_page(mm, vma, migrated = migrate_misplaced_transhuge_page(mm, vma,
...@@ -1330,6 +1339,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1330,6 +1339,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto check_same; goto check_same;
task_numa_fault(target_nid, HPAGE_PMD_NR, true); task_numa_fault(target_nid, HPAGE_PMD_NR, true);
if (anon_vma)
page_unlock_anon_vma_read(anon_vma);
return 0; return 0;
check_same: check_same:
...@@ -1346,6 +1357,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1346,6 +1357,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
update_mmu_cache_pmd(vma, addr, pmdp); update_mmu_cache_pmd(vma, addr, pmdp);
out_unlock: out_unlock:
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
out:
if (anon_vma)
page_unlock_anon_vma_read(anon_vma);
if (current_nid != -1) if (current_nid != -1)
task_numa_fault(current_nid, HPAGE_PMD_NR, false); task_numa_fault(current_nid, HPAGE_PMD_NR, false);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment