Commit 0dabec93 authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

mm: migration: clean up unmap_and_move()

unmap_and_move() is one a big messy function.  Clean it up.
Signed-off-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f80c0673
......@@ -621,38 +621,18 @@ static int move_to_new_page(struct page *newpage, struct page *page,
return rc;
}
/*
* Obtain the lock on page, remove all ptes and migrate the page
* to the newly allocated page in newpage.
*/
static int unmap_and_move(new_page_t get_new_page, unsigned long private,
struct page *page, int force, bool offlining, bool sync)
static int __unmap_and_move(struct page *page, struct page *newpage,
int force, bool offlining, bool sync)
{
int rc = 0;
int *result = NULL;
struct page *newpage = get_new_page(page, private, &result);
int rc = -EAGAIN;
int remap_swapcache = 1;
int charge = 0;
struct mem_cgroup *mem;
struct anon_vma *anon_vma = NULL;
if (!newpage)
return -ENOMEM;
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
goto move_newpage;
}
if (unlikely(PageTransHuge(page)))
if (unlikely(split_huge_page(page)))
goto move_newpage;
/* prepare cgroup just returns 0 or -ENOMEM */
rc = -EAGAIN;
if (!trylock_page(page)) {
if (!force || !sync)
goto move_newpage;
goto out;
/*
* It's not safe for direct compaction to call lock_page.
......@@ -668,7 +648,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
* altogether.
*/
if (current->flags & PF_MEMALLOC)
goto move_newpage;
goto out;
lock_page(page);
}
......@@ -785,8 +765,35 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
mem_cgroup_end_migration(mem, page, newpage, rc == 0);
unlock:
unlock_page(page);
out:
return rc;
}
move_newpage:
/*
* Obtain the lock on page, remove all ptes and migrate the page
* to the newly allocated page in newpage.
*/
static int unmap_and_move(new_page_t get_new_page, unsigned long private,
struct page *page, int force, bool offlining, bool sync)
{
int rc = 0;
int *result = NULL;
struct page *newpage = get_new_page(page, private, &result);
if (!newpage)
return -ENOMEM;
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
goto out;
}
if (unlikely(PageTransHuge(page)))
if (unlikely(split_huge_page(page)))
goto out;
rc = __unmap_and_move(page, newpage, force, offlining, sync);
out:
if (rc != -EAGAIN) {
/*
* A page that has been migrated has all references
......@@ -799,13 +806,11 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
page_is_file_cache(page));
putback_lru_page(page);
}
/*
* Move the new page to the LRU. If migration was not successful
* then this will free the page.
*/
putback_lru_page(newpage);
if (result) {
if (rc)
*result = rc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment