Commit d1e153fe authored by Pavel Tatashin's avatar Pavel Tatashin Committed by Linus Torvalds

mm/gup: migrate pinned pages out of movable zone

We should not pin pages in ZONE_MOVABLE.  Currently, we do not pin only
movable CMA pages.  Generalize the function that migrates CMA pages to
migrate all movable pages.  Use is_pinnable_page() to check which pages
need to be migrated

Link: https://lkml.kernel.org/r/20210215161349.246722-10-pasha.tatashin@soleen.comSigned-off-by: default avatarPavel Tatashin <pasha.tatashin@soleen.com>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: James Morris <jmorris@namei.org>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sasha Levin <sashal@kernel.org>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Tyler Hicks <tyhicks@linux.microsoft.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9afaf30f
...@@ -27,6 +27,7 @@ enum migrate_reason { ...@@ -27,6 +27,7 @@ enum migrate_reason {
MR_MEMPOLICY_MBIND, MR_MEMPOLICY_MBIND,
MR_NUMA_MISPLACED, MR_NUMA_MISPLACED,
MR_CONTIG_RANGE, MR_CONTIG_RANGE,
MR_LONGTERM_PIN,
MR_TYPES MR_TYPES
}; };
......
...@@ -407,8 +407,13 @@ enum zone_type { ...@@ -407,8 +407,13 @@ enum zone_type {
* to increase the number of THP/huge pages. Notable special cases are: * to increase the number of THP/huge pages. Notable special cases are:
* *
* 1. Pinned pages: (long-term) pinning of movable pages might * 1. Pinned pages: (long-term) pinning of movable pages might
* essentially turn such pages unmovable. Memory offlining might * essentially turn such pages unmovable. Therefore, we do not allow
* retry a long time. * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
* faulted, they come from the right zone right away. However, it is
* still possible that address space already has pages in
* ZONE_MOVABLE at the time when pages are pinned (i.e. user has
* touches that memory before pinning). In such case we migrate them
* to a different zone. When migration fails - pinning fails.
* 2. memblock allocations: kernelcore/movablecore setups might create * 2. memblock allocations: kernelcore/movablecore setups might create
* situations where ZONE_MOVABLE contains unmovable allocations * situations where ZONE_MOVABLE contains unmovable allocations
* after boot. Memory offlining and allocations fail early. * after boot. Memory offlining and allocations fail early.
......
...@@ -20,7 +20,8 @@ ...@@ -20,7 +20,8 @@
EM( MR_SYSCALL, "syscall_or_cpuset") \ EM( MR_SYSCALL, "syscall_or_cpuset") \
EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \ EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \
EM( MR_NUMA_MISPLACED, "numa_misplaced") \ EM( MR_NUMA_MISPLACED, "numa_misplaced") \
EMe(MR_CONTIG_RANGE, "contig_range") EM( MR_CONTIG_RANGE, "contig_range") \
EMe(MR_LONGTERM_PIN, "longterm_pin")
/* /*
* First define the enums in the above macros to be exported to userspace * First define the enums in the above macros to be exported to userspace
......
...@@ -87,11 +87,12 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page, ...@@ -87,11 +87,12 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page,
int orig_refs = refs; int orig_refs = refs;
/* /*
* Can't do FOLL_LONGTERM + FOLL_PIN with CMA in the gup fast * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
* path, so fail and let the caller fall back to the slow path. * right zone, so fail and let the caller fall back to the slow
* path.
*/ */
if (unlikely(flags & FOLL_LONGTERM) && if (unlikely((flags & FOLL_LONGTERM) &&
is_migrate_cma_page(page)) !is_pinnable_page(page)))
return NULL; return NULL;
/* /*
...@@ -1600,8 +1601,8 @@ struct page *get_dump_page(unsigned long addr) ...@@ -1600,8 +1601,8 @@ struct page *get_dump_page(unsigned long addr)
} }
#endif /* CONFIG_ELF_CORE */ #endif /* CONFIG_ELF_CORE */
#ifdef CONFIG_CMA #ifdef CONFIG_MIGRATION
static long check_and_migrate_cma_pages(struct mm_struct *mm, static long check_and_migrate_movable_pages(struct mm_struct *mm,
unsigned long start, unsigned long start,
unsigned long nr_pages, unsigned long nr_pages,
struct page **pages, struct page **pages,
...@@ -1610,7 +1611,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, ...@@ -1610,7 +1611,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
{ {
unsigned long i, isolation_error_count; unsigned long i, isolation_error_count;
bool drain_allow; bool drain_allow;
LIST_HEAD(cma_page_list); LIST_HEAD(movable_page_list);
long ret = nr_pages; long ret = nr_pages;
struct page *prev_head, *head; struct page *prev_head, *head;
struct migration_target_control mtc = { struct migration_target_control mtc = {
...@@ -1628,13 +1629,12 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, ...@@ -1628,13 +1629,12 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
continue; continue;
prev_head = head; prev_head = head;
/* /*
* If we get a page from the CMA zone, since we are going to * If we get a movable page, since we are going to be pinning
* be pinning these entries, we might as well move them out * these entries, try to move them out if possible.
* of the CMA zone if possible.
*/ */
if (is_migrate_cma_page(head)) { if (!is_pinnable_page(head)) {
if (PageHuge(head)) { if (PageHuge(head)) {
if (!isolate_huge_page(head, &cma_page_list)) if (!isolate_huge_page(head, &movable_page_list))
isolation_error_count++; isolation_error_count++;
} else { } else {
if (!PageLRU(head) && drain_allow) { if (!PageLRU(head) && drain_allow) {
...@@ -1646,7 +1646,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, ...@@ -1646,7 +1646,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
isolation_error_count++; isolation_error_count++;
continue; continue;
} }
list_add_tail(&head->lru, &cma_page_list); list_add_tail(&head->lru, &movable_page_list);
mod_node_page_state(page_pgdat(head), mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + NR_ISOLATED_ANON +
page_is_file_lru(head), page_is_file_lru(head),
...@@ -1659,10 +1659,10 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, ...@@ -1659,10 +1659,10 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
* If list is empty, and no isolation errors, means that all pages are * If list is empty, and no isolation errors, means that all pages are
* in the correct zone. * in the correct zone.
*/ */
if (list_empty(&cma_page_list) && !isolation_error_count) if (list_empty(&movable_page_list) && !isolation_error_count)
return ret; return ret;
if (!list_empty(&cma_page_list)) { if (!list_empty(&movable_page_list)) {
/* /*
* drop the above get_user_pages reference. * drop the above get_user_pages reference.
*/ */
...@@ -1672,12 +1672,12 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, ...@@ -1672,12 +1672,12 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
for (i = 0; i < nr_pages; i++) for (i = 0; i < nr_pages; i++)
put_page(pages[i]); put_page(pages[i]);
ret = migrate_pages(&cma_page_list, alloc_migration_target, ret = migrate_pages(&movable_page_list, alloc_migration_target,
NULL, (unsigned long)&mtc, MIGRATE_SYNC, NULL, (unsigned long)&mtc, MIGRATE_SYNC,
MR_CONTIG_RANGE); MR_LONGTERM_PIN);
if (ret) { if (ret) {
if (!list_empty(&cma_page_list)) if (!list_empty(&movable_page_list))
putback_movable_pages(&cma_page_list); putback_movable_pages(&movable_page_list);
return ret > 0 ? -ENOMEM : ret; return ret > 0 ? -ENOMEM : ret;
} }
...@@ -1696,7 +1696,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, ...@@ -1696,7 +1696,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
goto check_again; goto check_again;
} }
#else #else
static long check_and_migrate_cma_pages(struct mm_struct *mm, static long check_and_migrate_movable_pages(struct mm_struct *mm,
unsigned long start, unsigned long start,
unsigned long nr_pages, unsigned long nr_pages,
struct page **pages, struct page **pages,
...@@ -1705,7 +1705,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, ...@@ -1705,7 +1705,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
{ {
return nr_pages; return nr_pages;
} }
#endif /* CONFIG_CMA */ #endif /* CONFIG_MIGRATION */
/* /*
* __gup_longterm_locked() is a wrapper for __get_user_pages_locked which * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
...@@ -1729,8 +1729,9 @@ static long __gup_longterm_locked(struct mm_struct *mm, ...@@ -1729,8 +1729,9 @@ static long __gup_longterm_locked(struct mm_struct *mm,
if (gup_flags & FOLL_LONGTERM) { if (gup_flags & FOLL_LONGTERM) {
if (rc > 0) if (rc > 0)
rc = check_and_migrate_cma_pages(mm, start, rc, pages, rc = check_and_migrate_movable_pages(mm, start, rc,
vmas, gup_flags); pages, vmas,
gup_flags);
memalloc_pin_restore(flags); memalloc_pin_restore(flags);
} }
return rc; return rc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment