Commit f0f44638 authored by Pavel Tatashin's avatar Pavel Tatashin Committed by Linus Torvalds

mm/gup: return an error on migration failure

When migration failure occurs, we still pin pages, which means that we
may pin CMA movable pages which should never be the case.

Instead return an error without pinning pages when migration failure
happens.

No need to retry migrating, because migrate_pages() already retries 10
times.

Link: https://lkml.kernel.org/r/20210215161349.246722-4-pasha.tatashin@soleen.comSigned-off-by: default avatarPavel Tatashin <pasha.tatashin@soleen.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: James Morris <jmorris@namei.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sasha Levin <sashal@kernel.org>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Tyler Hicks <tyhicks@linux.microsoft.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 83c02c23
...@@ -1610,7 +1610,6 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, ...@@ -1610,7 +1610,6 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
{ {
unsigned long i; unsigned long i;
bool drain_allow = true; bool drain_allow = true;
bool migrate_allow = true;
LIST_HEAD(cma_page_list); LIST_HEAD(cma_page_list);
long ret = nr_pages; long ret = nr_pages;
struct page *prev_head, *head; struct page *prev_head, *head;
...@@ -1661,17 +1660,15 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, ...@@ -1661,17 +1660,15 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
for (i = 0; i < nr_pages; i++) for (i = 0; i < nr_pages; i++)
put_page(pages[i]); put_page(pages[i]);
if (migrate_pages(&cma_page_list, alloc_migration_target, NULL, ret = migrate_pages(&cma_page_list, alloc_migration_target,
(unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) { NULL, (unsigned long)&mtc, MIGRATE_SYNC,
/* MR_CONTIG_RANGE);
* some of the pages failed migration. Do get_user_pages if (ret) {
* without migration.
*/
migrate_allow = false;
if (!list_empty(&cma_page_list)) if (!list_empty(&cma_page_list))
putback_movable_pages(&cma_page_list); putback_movable_pages(&cma_page_list);
return ret > 0 ? -ENOMEM : ret;
} }
/* /*
* We did migrate all the pages, Try to get the page references * We did migrate all the pages, Try to get the page references
* again migrating any new CMA pages which we failed to isolate * again migrating any new CMA pages which we failed to isolate
...@@ -1681,7 +1678,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm, ...@@ -1681,7 +1678,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
pages, vmas, NULL, pages, vmas, NULL,
gup_flags); gup_flags);
if ((ret > 0) && migrate_allow) { if (ret > 0) {
nr_pages = ret; nr_pages = ret;
drain_allow = true; drain_allow = true;
goto check_again; goto check_again;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment