Commit de466bd6 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: numa: avoid unnecessary disruption of NUMA hinting during migration

do_huge_pmd_numa_page() handles the case where there is parallel THP
migration.  However, by the time it is checked the NUMA hinting
information has already been disrupted.  This patch adds an earlier
check with some helpers.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: Alex Thorlton <athorlton@sgi.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1667918b
......@@ -90,10 +90,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
extern bool pmd_trans_migrating(pmd_t pmd);
extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
extern int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node);
extern bool migrate_ratelimited(int node);
#else
static inline bool pmd_trans_migrating(pmd_t pmd)
{
return false;
}
static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
{
}
static inline int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node)
{
......
......@@ -882,6 +882,10 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
ret = 0;
goto out_unlock;
}
/* mmap_sem prevents this happening but warn if that changes */
WARN_ON(pmd_trans_migrating(pmd));
if (unlikely(pmd_trans_splitting(pmd))) {
/* split huge page running from under us */
spin_unlock(src_ptl);
......@@ -1299,6 +1303,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(!pmd_same(pmd, *pmdp)))
goto out_unlock;
/*
* If there are potential migrations, wait for completion and retry
* without disrupting NUMA hinting information. Do not relock and
* check_same as the page may no longer be mapped.
*/
if (unlikely(pmd_trans_migrating(*pmdp))) {
spin_unlock(ptl);
wait_migrate_huge_page(vma->anon_vma, pmdp);
goto out;
}
page = pmd_page(pmd);
BUG_ON(is_huge_zero_page(page));
page_nid = page_to_nid(page);
......@@ -1329,12 +1344,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto clear_pmdnuma;
}
/*
* If there are potential migrations, wait for completion and retry. We
* do not relock and check_same as the page may no longer be mapped.
* Furtermore, even if the page is currently misplaced, there is no
* guarantee it is still misplaced after the migration completes.
*/
/* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) {
spin_unlock(ptl);
wait_on_page_locked(page);
......
......@@ -1655,6 +1655,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
return 1;
}
bool pmd_trans_migrating(pmd_t pmd)
{
struct page *page = pmd_page(pmd);
return PageLocked(page);
}
void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
{
struct page *page = pmd_page(*pmd);
wait_on_page_locked(page);
}
/*
* Attempt to migrate a misplaced page to the specified destination
* node. Caller is expected to have an elevated reference count on
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment