Commit e8db67eb authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

mm: migrate: move_pages() supports thp migration

This patch enables thp migration for move_pages(2).

Link: http://lkml.kernel.org/r/20170717193955.20207-10-zi.yan@sent.comSigned-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarZi Yan <zi.yan@cs.rutgers.edu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Nellans <dnellans@nvidia.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c8633798
...@@ -185,8 +185,8 @@ void putback_movable_pages(struct list_head *l) ...@@ -185,8 +185,8 @@ void putback_movable_pages(struct list_head *l)
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
} else { } else {
dec_node_page_state(page, NR_ISOLATED_ANON + mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
page_is_file_cache(page)); page_is_file_cache(page), -hpage_nr_pages(page));
putback_lru_page(page); putback_lru_page(page);
} }
} }
...@@ -1146,8 +1146,8 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page, ...@@ -1146,8 +1146,8 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
* as __PageMovable * as __PageMovable
*/ */
if (likely(!__PageMovable(page))) if (likely(!__PageMovable(page)))
dec_node_page_state(page, NR_ISOLATED_ANON + mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
page_is_file_cache(page)); page_is_file_cache(page), -hpage_nr_pages(page));
} }
/* /*
...@@ -1421,7 +1421,17 @@ static struct page *new_page_node(struct page *p, unsigned long private, ...@@ -1421,7 +1421,17 @@ static struct page *new_page_node(struct page *p, unsigned long private,
if (PageHuge(p)) if (PageHuge(p))
return alloc_huge_page_node(page_hstate(compound_head(p)), return alloc_huge_page_node(page_hstate(compound_head(p)),
pm->node); pm->node);
else else if (thp_migration_supported() && PageTransHuge(p)) {
struct page *thp;
thp = alloc_pages_node(pm->node,
(GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
HPAGE_PMD_ORDER);
if (!thp)
return NULL;
prep_transhuge_page(thp);
return thp;
} else
return __alloc_pages_node(pm->node, return __alloc_pages_node(pm->node,
GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
} }
...@@ -1448,6 +1458,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm, ...@@ -1448,6 +1458,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
for (pp = pm; pp->node != MAX_NUMNODES; pp++) { for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct page *page; struct page *page;
struct page *head;
unsigned int follflags;
err = -EFAULT; err = -EFAULT;
vma = find_vma(mm, pp->addr); vma = find_vma(mm, pp->addr);
...@@ -1455,8 +1467,10 @@ static int do_move_page_to_node_array(struct mm_struct *mm, ...@@ -1455,8 +1467,10 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
goto set_status; goto set_status;
/* FOLL_DUMP to ignore special (like zero) pages */ /* FOLL_DUMP to ignore special (like zero) pages */
page = follow_page(vma, pp->addr, follflags = FOLL_GET | FOLL_DUMP;
FOLL_GET | FOLL_SPLIT | FOLL_DUMP); if (!thp_migration_supported())
follflags |= FOLL_SPLIT;
page = follow_page(vma, pp->addr, follflags);
err = PTR_ERR(page); err = PTR_ERR(page);
if (IS_ERR(page)) if (IS_ERR(page))
...@@ -1466,7 +1480,6 @@ static int do_move_page_to_node_array(struct mm_struct *mm, ...@@ -1466,7 +1480,6 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
if (!page) if (!page)
goto set_status; goto set_status;
pp->page = page;
err = page_to_nid(page); err = page_to_nid(page);
if (err == pp->node) if (err == pp->node)
...@@ -1481,16 +1494,22 @@ static int do_move_page_to_node_array(struct mm_struct *mm, ...@@ -1481,16 +1494,22 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
goto put_and_set; goto put_and_set;
if (PageHuge(page)) { if (PageHuge(page)) {
if (PageHead(page)) if (PageHead(page)) {
isolate_huge_page(page, &pagelist); isolate_huge_page(page, &pagelist);
err = 0;
pp->page = page;
}
goto put_and_set; goto put_and_set;
} }
err = isolate_lru_page(page); pp->page = compound_head(page);
head = compound_head(page);
err = isolate_lru_page(head);
if (!err) { if (!err) {
list_add_tail(&page->lru, &pagelist); list_add_tail(&head->lru, &pagelist);
inc_node_page_state(page, NR_ISOLATED_ANON + mod_node_page_state(page_pgdat(head),
page_is_file_cache(page)); NR_ISOLATED_ANON + page_is_file_cache(head),
hpage_nr_pages(head));
} }
put_and_set: put_and_set:
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment