Commit 49f51859 authored by Huang Ying's avatar Huang Ying Committed by Andrew Morton

migrate: convert unmap_and_move() to use folios

Patch series "migrate: convert migrate_pages()/unmap_and_move() to use
folios", v2.

The conversion is quite straightforward, just replace the page API to the
corresponding folio API.  migrate_pages() and unmap_and_move() mostly work
with folios (head pages) only.


This patch (of 2):

Quite straightforward, the page functions are converted to corresponding
folio functions.  Same for comments.

Link: https://lkml.kernel.org/r/20221109012348.93849-1-ying.huang@intel.com
Link: https://lkml.kernel.org/r/20221109012348.93849-2-ying.huang@intel.comSigned-off-by: default avatar"Huang, Ying" <ying.huang@intel.com>
Reviewed-by: default avatarYang Shi <shy828301@gmail.com>
Reviewed-by: default avatarZi Yan <ziy@nvidia.com>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarBaolin Wang <baolin.wang@linux.alibaba.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 16fd6b31
...@@ -1150,79 +1150,79 @@ static int __unmap_and_move(struct folio *src, struct folio *dst, ...@@ -1150,79 +1150,79 @@ static int __unmap_and_move(struct folio *src, struct folio *dst,
} }
/* /*
* Obtain the lock on page, remove all ptes and migrate the page * Obtain the lock on folio, remove all ptes and migrate the folio
* to the newly allocated page in newpage. * to the newly allocated folio in dst.
*/ */
static int unmap_and_move(new_page_t get_new_page, static int unmap_and_move(new_page_t get_new_page,
free_page_t put_new_page, free_page_t put_new_page,
unsigned long private, struct page *page, unsigned long private, struct folio *src,
int force, enum migrate_mode mode, int force, enum migrate_mode mode,
enum migrate_reason reason, enum migrate_reason reason,
struct list_head *ret) struct list_head *ret)
{ {
struct folio *dst, *src = page_folio(page); struct folio *dst;
int rc = MIGRATEPAGE_SUCCESS; int rc = MIGRATEPAGE_SUCCESS;
struct page *newpage = NULL; struct page *newpage = NULL;
if (!thp_migration_supported() && PageTransHuge(page)) if (!thp_migration_supported() && folio_test_transhuge(src))
return -ENOSYS; return -ENOSYS;
if (page_count(page) == 1) { if (folio_ref_count(src) == 1) {
/* Page was freed from under us. So we are done. */ /* Folio was freed from under us. So we are done. */
ClearPageActive(page); folio_clear_active(src);
ClearPageUnevictable(page); folio_clear_unevictable(src);
/* free_pages_prepare() will clear PG_isolated. */ /* free_pages_prepare() will clear PG_isolated. */
goto out; goto out;
} }
newpage = get_new_page(page, private); newpage = get_new_page(&src->page, private);
if (!newpage) if (!newpage)
return -ENOMEM; return -ENOMEM;
dst = page_folio(newpage); dst = page_folio(newpage);
newpage->private = 0; dst->private = 0;
rc = __unmap_and_move(src, dst, force, mode); rc = __unmap_and_move(src, dst, force, mode);
if (rc == MIGRATEPAGE_SUCCESS) if (rc == MIGRATEPAGE_SUCCESS)
set_page_owner_migrate_reason(newpage, reason); set_page_owner_migrate_reason(&dst->page, reason);
out: out:
if (rc != -EAGAIN) { if (rc != -EAGAIN) {
/* /*
* A page that has been migrated has all references * A folio that has been migrated has all references
* removed and will be freed. A page that has not been * removed and will be freed. A folio that has not been
* migrated will have kept its references and be restored. * migrated will have kept its references and be restored.
*/ */
list_del(&page->lru); list_del(&src->lru);
} }
/* /*
* If migration is successful, releases reference grabbed during * If migration is successful, releases reference grabbed during
* isolation. Otherwise, restore the page to right list unless * isolation. Otherwise, restore the folio to right list unless
* we want to retry. * we want to retry.
*/ */
if (rc == MIGRATEPAGE_SUCCESS) { if (rc == MIGRATEPAGE_SUCCESS) {
/* /*
* Compaction can migrate also non-LRU pages which are * Compaction can migrate also non-LRU folios which are
* not accounted to NR_ISOLATED_*. They can be recognized * not accounted to NR_ISOLATED_*. They can be recognized
* as __PageMovable * as __folio_test_movable
*/ */
if (likely(!__PageMovable(page))) if (likely(!__folio_test_movable(src)))
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
page_is_file_lru(page), -thp_nr_pages(page)); folio_is_file_lru(src), -folio_nr_pages(src));
if (reason != MR_MEMORY_FAILURE) if (reason != MR_MEMORY_FAILURE)
/* /*
* We release the page in page_handle_poison. * We release the folio in page_handle_poison.
*/ */
put_page(page); folio_put(src);
} else { } else {
if (rc != -EAGAIN) if (rc != -EAGAIN)
list_add_tail(&page->lru, ret); list_add_tail(&src->lru, ret);
if (put_new_page) if (put_new_page)
put_new_page(newpage, private); put_new_page(&dst->page, private);
else else
put_page(newpage); folio_put(dst);
} }
return rc; return rc;
...@@ -1459,7 +1459,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, ...@@ -1459,7 +1459,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
&ret_pages); &ret_pages);
else else
rc = unmap_and_move(get_new_page, put_new_page, rc = unmap_and_move(get_new_page, put_new_page,
private, page, pass > 2, mode, private, page_folio(page), pass > 2, mode,
reason, &ret_pages); reason, &ret_pages);
/* /*
* The rules are: * The rules are:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment