Commit 049b2604 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert soft_offline_in_use_page() to use a folio

Replace the existing head-page logic with folio logic.

Link: https://lkml.kernel.org/r/20231108182809.602073-5-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 19369d86
...@@ -2645,40 +2645,40 @@ static int soft_offline_in_use_page(struct page *page) ...@@ -2645,40 +2645,40 @@ static int soft_offline_in_use_page(struct page *page)
{ {
long ret = 0; long ret = 0;
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
struct page *hpage = compound_head(page); struct folio *folio = page_folio(page);
char const *msg_page[] = {"page", "hugepage"}; char const *msg_page[] = {"page", "hugepage"};
bool huge = PageHuge(page); bool huge = folio_test_hugetlb(folio);
LIST_HEAD(pagelist); LIST_HEAD(pagelist);
struct migration_target_control mtc = { struct migration_target_control mtc = {
.nid = NUMA_NO_NODE, .nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
}; };
if (!huge && PageTransHuge(hpage)) { if (!huge && folio_test_large(folio)) {
if (try_to_split_thp_page(page)) { if (try_to_split_thp_page(page)) {
pr_info("soft offline: %#lx: thp split failed\n", pfn); pr_info("soft offline: %#lx: thp split failed\n", pfn);
return -EBUSY; return -EBUSY;
} }
hpage = page; folio = page_folio(page);
} }
lock_page(page); folio_lock(folio);
if (!huge) if (!huge)
wait_on_page_writeback(page); folio_wait_writeback(folio);
if (PageHWPoison(page)) { if (PageHWPoison(page)) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
pr_info("soft offline: %#lx page already poisoned\n", pfn); pr_info("soft offline: %#lx page already poisoned\n", pfn);
return 0; return 0;
} }
if (!huge && PageLRU(page) && !PageSwapCache(page)) if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio))
/* /*
* Try to invalidate first. This should work for * Try to invalidate first. This should work for
* non dirty unmapped page cache pages. * non dirty unmapped page cache pages.
*/ */
ret = invalidate_inode_page(page); ret = mapping_evict_folio(folio_mapping(folio), folio);
unlock_page(page); folio_unlock(folio);
if (ret) { if (ret) {
pr_info("soft_offline: %#lx: invalidated\n", pfn); pr_info("soft_offline: %#lx: invalidated\n", pfn);
...@@ -2686,7 +2686,7 @@ static int soft_offline_in_use_page(struct page *page) ...@@ -2686,7 +2686,7 @@ static int soft_offline_in_use_page(struct page *page)
return 0; return 0;
} }
if (isolate_page(hpage, &pagelist)) { if (isolate_page(&folio->page, &pagelist)) {
ret = migrate_pages(&pagelist, alloc_migration_target, NULL, ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
if (!ret) { if (!ret) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment