Commit c8633798 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

mm: mempolicy: mbind and migrate_pages support thp migration

This patch enables thp migration for mbind(2) and migrate_pages(2).
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarZi Yan <zi.yan@cs.rutgers.edu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Nellans <dnellans@nvidia.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ab6e3d09
...@@ -97,6 +97,7 @@ ...@@ -97,6 +97,7 @@
#include <linux/mm_inline.h> #include <linux/mm_inline.h>
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/swapops.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -426,41 +427,69 @@ static inline bool queue_pages_required(struct page *page, ...@@ -426,41 +427,69 @@ static inline bool queue_pages_required(struct page *page,
return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
} }
/* static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
* Scan through pages checking if pages follow certain conditions,
* and move them to the pagelist if they do.
*/
static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk) unsigned long end, struct mm_walk *walk)
{ {
struct vm_area_struct *vma = walk->vma; int ret = 0;
struct page *page; struct page *page;
struct queue_pages *qp = walk->private; struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags; unsigned long flags;
int nid, ret;
pte_t *pte;
spinlock_t *ptl;
if (pmd_trans_huge(*pmd)) { if (unlikely(is_pmd_migration_entry(*pmd))) {
ptl = pmd_lock(walk->mm, pmd); ret = 1;
if (pmd_trans_huge(*pmd)) { goto unlock;
}
page = pmd_page(*pmd); page = pmd_page(*pmd);
if (is_huge_zero_page(page)) { if (is_huge_zero_page(page)) {
spin_unlock(ptl); spin_unlock(ptl);
__split_huge_pmd(vma, pmd, addr, false, NULL); __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
} else { goto out;
}
if (!thp_migration_supported()) {
get_page(page); get_page(page);
spin_unlock(ptl); spin_unlock(ptl);
lock_page(page); lock_page(page);
ret = split_huge_page(page); ret = split_huge_page(page);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
if (ret) goto out;
return 0;
} }
} else { if (!queue_pages_required(page, qp)) {
spin_unlock(ptl); ret = 1;
goto unlock;
} }
ret = 1;
flags = qp->flags;
/* go to thp migration */
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
migrate_page_add(page, qp->pagelist, flags);
unlock:
spin_unlock(ptl);
out:
return ret;
}
/*
* Scan through pages checking if pages follow certain conditions,
* and move them to the pagelist if they do.
*/
static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
struct page *page;
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
int ret;
pte_t *pte;
spinlock_t *ptl;
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
if (ret)
return 0;
} }
if (pmd_trans_unstable(pmd)) if (pmd_trans_unstable(pmd))
...@@ -481,7 +510,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -481,7 +510,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
continue; continue;
if (!queue_pages_required(page, qp)) if (!queue_pages_required(page, qp))
continue; continue;
if (PageTransCompound(page)) { if (PageTransCompound(page) && !thp_migration_supported()) {
get_page(page); get_page(page);
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
lock_page(page); lock_page(page);
...@@ -893,19 +922,21 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, ...@@ -893,19 +922,21 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
/* /*
* page migration * page migration, thp tail pages can be passed.
*/ */
static void migrate_page_add(struct page *page, struct list_head *pagelist, static void migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags) unsigned long flags)
{ {
struct page *head = compound_head(page);
/* /*
* Avoid migrating a page that is shared with others. * Avoid migrating a page that is shared with others.
*/ */
if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
if (!isolate_lru_page(page)) { if (!isolate_lru_page(head)) {
list_add_tail(&page->lru, pagelist); list_add_tail(&head->lru, pagelist);
inc_node_page_state(page, NR_ISOLATED_ANON + mod_node_page_state(page_pgdat(head),
page_is_file_cache(page)); NR_ISOLATED_ANON + page_is_file_cache(head),
hpage_nr_pages(head));
} }
} }
} }
...@@ -915,7 +946,17 @@ static struct page *new_node_page(struct page *page, unsigned long node, int **x ...@@ -915,7 +946,17 @@ static struct page *new_node_page(struct page *page, unsigned long node, int **x
if (PageHuge(page)) if (PageHuge(page))
return alloc_huge_page_node(page_hstate(compound_head(page)), return alloc_huge_page_node(page_hstate(compound_head(page)),
node); node);
else else if (thp_migration_supported() && PageTransHuge(page)) {
struct page *thp;
thp = alloc_pages_node(node,
(GFP_TRANSHUGE | __GFP_THISNODE),
HPAGE_PMD_ORDER);
if (!thp)
return NULL;
prep_transhuge_page(thp);
return thp;
} else
return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE | return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
__GFP_THISNODE, 0); __GFP_THISNODE, 0);
} }
...@@ -1081,6 +1122,15 @@ static struct page *new_page(struct page *page, unsigned long start, int **x) ...@@ -1081,6 +1122,15 @@ static struct page *new_page(struct page *page, unsigned long start, int **x)
if (PageHuge(page)) { if (PageHuge(page)) {
BUG_ON(!vma); BUG_ON(!vma);
return alloc_huge_page_noerr(vma, address, 1); return alloc_huge_page_noerr(vma, address, 1);
} else if (thp_migration_supported() && PageTransHuge(page)) {
struct page *thp;
thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
HPAGE_PMD_ORDER);
if (!thp)
return NULL;
prep_transhuge_page(thp);
return thp;
} }
/* /*
* if !vma, alloc_page_vma() will use task or system default policy * if !vma, alloc_page_vma() will use task or system default policy
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment