hugetlb: Convert to migrate_folio

This involves converting migrate_huge_page_move_mapping().  We also need a
folio variant of hugetlb_set_page_subpool(), but that's for a later patch.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarMuchun Song <songmuchun@bytedance.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
parent 3648951c
...@@ -954,28 +954,33 @@ static int hugetlbfs_symlink(struct user_namespace *mnt_userns, ...@@ -954,28 +954,33 @@ static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
return error; return error;
} }
static int hugetlbfs_migrate_page(struct address_space *mapping, #ifdef CONFIG_MIGRATION
struct page *newpage, struct page *page, static int hugetlbfs_migrate_folio(struct address_space *mapping,
struct folio *dst, struct folio *src,
enum migrate_mode mode) enum migrate_mode mode)
{ {
int rc; int rc;
rc = migrate_huge_page_move_mapping(mapping, newpage, page); rc = migrate_huge_page_move_mapping(mapping, dst, src);
if (rc != MIGRATEPAGE_SUCCESS) if (rc != MIGRATEPAGE_SUCCESS)
return rc; return rc;
if (hugetlb_page_subpool(page)) { if (hugetlb_page_subpool(&src->page)) {
hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page)); hugetlb_set_page_subpool(&dst->page,
hugetlb_set_page_subpool(page, NULL); hugetlb_page_subpool(&src->page));
hugetlb_set_page_subpool(&src->page, NULL);
} }
if (mode != MIGRATE_SYNC_NO_COPY) if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page); folio_migrate_copy(dst, src);
else else
migrate_page_states(newpage, page); folio_migrate_flags(dst, src);
return MIGRATEPAGE_SUCCESS; return MIGRATEPAGE_SUCCESS;
} }
#else
#define hugetlbfs_migrate_folio NULL
#endif
static int hugetlbfs_error_remove_page(struct address_space *mapping, static int hugetlbfs_error_remove_page(struct address_space *mapping,
struct page *page) struct page *page)
...@@ -1142,7 +1147,7 @@ static const struct address_space_operations hugetlbfs_aops = { ...@@ -1142,7 +1147,7 @@ static const struct address_space_operations hugetlbfs_aops = {
.write_begin = hugetlbfs_write_begin, .write_begin = hugetlbfs_write_begin,
.write_end = hugetlbfs_write_end, .write_end = hugetlbfs_write_end,
.dirty_folio = noop_dirty_folio, .dirty_folio = noop_dirty_folio,
.migratepage = hugetlbfs_migrate_page, .migrate_folio = hugetlbfs_migrate_folio,
.error_remove_page = hugetlbfs_error_remove_page, .error_remove_page = hugetlbfs_error_remove_page,
}; };
......
...@@ -72,8 +72,8 @@ extern int isolate_movable_page(struct page *page, isolate_mode_t mode); ...@@ -72,8 +72,8 @@ extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
extern void migrate_page_states(struct page *newpage, struct page *page); extern void migrate_page_states(struct page *newpage, struct page *page);
extern void migrate_page_copy(struct page *newpage, struct page *page); extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping, int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page); struct folio *dst, struct folio *src);
extern int migrate_page_move_mapping(struct address_space *mapping, extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, int extra_count); struct page *newpage, struct page *page, int extra_count);
void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep, void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
...@@ -104,7 +104,7 @@ static inline void migrate_page_copy(struct page *newpage, ...@@ -104,7 +104,7 @@ static inline void migrate_page_copy(struct page *newpage,
struct page *page) {} struct page *page) {}
static inline int migrate_huge_page_move_mapping(struct address_space *mapping, static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page) struct folio *dst, struct folio *src)
{ {
return -ENOSYS; return -ENOSYS;
} }
......
...@@ -474,26 +474,26 @@ EXPORT_SYMBOL(folio_migrate_mapping); ...@@ -474,26 +474,26 @@ EXPORT_SYMBOL(folio_migrate_mapping);
* of folio_migrate_mapping(). * of folio_migrate_mapping().
*/ */
int migrate_huge_page_move_mapping(struct address_space *mapping, int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page) struct folio *dst, struct folio *src)
{ {
XA_STATE(xas, &mapping->i_pages, page_index(page)); XA_STATE(xas, &mapping->i_pages, folio_index(src));
int expected_count; int expected_count;
xas_lock_irq(&xas); xas_lock_irq(&xas);
expected_count = 2 + page_has_private(page); expected_count = 2 + folio_has_private(src);
if (!page_ref_freeze(page, expected_count)) { if (!folio_ref_freeze(src, expected_count)) {
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
return -EAGAIN; return -EAGAIN;
} }
newpage->index = page->index; dst->index = src->index;
newpage->mapping = page->mapping; dst->mapping = src->mapping;
get_page(newpage); folio_get(dst);
xas_store(&xas, newpage); xas_store(&xas, dst);
page_ref_unfreeze(page, expected_count - 1); folio_ref_unfreeze(src, expected_count - 1);
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment