Commit 6aa3a920 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/hugetlb: convert isolate_hugetlb to folios

Patch series "continue hugetlb folio conversion", v3.

This series continues the conversion of core hugetlb functions to use
folios. This series converts many helper funtions in the hugetlb fault
path. This is in preparation for another series to convert the hugetlb
fault code paths to operate on folios.


This patch (of 8):

Convert isolate_hugetlb() to take in a folio and convert its callers to
pass a folio.  Use page_folio() to convert the callers to use a folio is
safe as isolate_hugetlb() operates on a head page.

Link: https://lkml.kernel.org/r/20230113223057.173292-1-sidhartha.kumar@oracle.com
Link: https://lkml.kernel.org/r/20230113223057.173292-2-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f528260b
...@@ -171,7 +171,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to, ...@@ -171,7 +171,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
vm_flags_t vm_flags); vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed); long freed);
int isolate_hugetlb(struct page *page, struct list_head *list); int isolate_hugetlb(struct folio *folio, struct list_head *list);
int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison); int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
int get_huge_page_for_hwpoison(unsigned long pfn, int flags, int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared); bool *migratable_cleared);
...@@ -413,7 +413,7 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, ...@@ -413,7 +413,7 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return NULL; return NULL;
} }
static inline int isolate_hugetlb(struct page *page, struct list_head *list) static inline int isolate_hugetlb(struct folio *folio, struct list_head *list)
{ {
return -EBUSY; return -EBUSY;
} }
......
...@@ -1930,7 +1930,7 @@ static unsigned long collect_longterm_unpinnable_pages( ...@@ -1930,7 +1930,7 @@ static unsigned long collect_longterm_unpinnable_pages(
continue; continue;
if (folio_test_hugetlb(folio)) { if (folio_test_hugetlb(folio)) {
isolate_hugetlb(&folio->page, movable_page_list); isolate_hugetlb(folio, movable_page_list);
continue; continue;
} }
......
...@@ -2925,7 +2925,7 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, ...@@ -2925,7 +2925,7 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
* Fail with -EBUSY if not possible. * Fail with -EBUSY if not possible.
*/ */
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
ret = isolate_hugetlb(&old_folio->page, list); ret = isolate_hugetlb(old_folio, list);
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
goto free_new; goto free_new;
} else if (!folio_test_hugetlb_freed(old_folio)) { } else if (!folio_test_hugetlb_freed(old_folio)) {
...@@ -3000,7 +3000,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) ...@@ -3000,7 +3000,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
if (hstate_is_gigantic(h)) if (hstate_is_gigantic(h))
return -ENOMEM; return -ENOMEM;
if (folio_ref_count(folio) && !isolate_hugetlb(&folio->page, list)) if (folio_ref_count(folio) && !isolate_hugetlb(folio, list))
ret = 0; ret = 0;
else if (!folio_ref_count(folio)) else if (!folio_ref_count(folio))
ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
...@@ -7250,19 +7250,19 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h) ...@@ -7250,19 +7250,19 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
* These functions are overwritable if your architecture needs its own * These functions are overwritable if your architecture needs its own
* behavior. * behavior.
*/ */
int isolate_hugetlb(struct page *page, struct list_head *list) int isolate_hugetlb(struct folio *folio, struct list_head *list)
{ {
int ret = 0; int ret = 0;
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
if (!PageHeadHuge(page) || if (!folio_test_hugetlb(folio) ||
!HPageMigratable(page) || !folio_test_hugetlb_migratable(folio) ||
!get_page_unless_zero(page)) { !folio_try_get(folio)) {
ret = -EBUSY; ret = -EBUSY;
goto unlock; goto unlock;
} }
ClearHPageMigratable(page); folio_clear_hugetlb_migratable(folio);
list_move_tail(&page->lru, list); list_move_tail(&folio->lru, list);
unlock: unlock:
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
return ret; return ret;
......
...@@ -2508,7 +2508,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist) ...@@ -2508,7 +2508,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
bool isolated = false; bool isolated = false;
if (PageHuge(page)) { if (PageHuge(page)) {
isolated = !isolate_hugetlb(page, pagelist); isolated = !isolate_hugetlb(page_folio(page), pagelist);
} else { } else {
bool lru = !__PageMovable(page); bool lru = !__PageMovable(page);
......
...@@ -1641,7 +1641,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) ...@@ -1641,7 +1641,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (PageHuge(page)) { if (PageHuge(page)) {
pfn = page_to_pfn(head) + compound_nr(head) - 1; pfn = page_to_pfn(head) + compound_nr(head) - 1;
isolate_hugetlb(head, &source); isolate_hugetlb(folio, &source);
continue; continue;
} else if (PageTransHuge(page)) } else if (PageTransHuge(page))
pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
......
...@@ -602,7 +602,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, ...@@ -602,7 +602,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
if (flags & (MPOL_MF_MOVE_ALL) || if (flags & (MPOL_MF_MOVE_ALL) ||
(flags & MPOL_MF_MOVE && page_mapcount(page) == 1 && (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
!hugetlb_pmd_shared(pte))) { !hugetlb_pmd_shared(pte))) {
if (isolate_hugetlb(page, qp->pagelist) && if (isolate_hugetlb(page_folio(page), qp->pagelist) &&
(flags & MPOL_MF_STRICT)) (flags & MPOL_MF_STRICT))
/* /*
* Failed to isolate page but allow migrating pages * Failed to isolate page but allow migrating pages
......
...@@ -1773,7 +1773,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, ...@@ -1773,7 +1773,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
if (PageHuge(page)) { if (PageHuge(page)) {
if (PageHead(page)) { if (PageHead(page)) {
err = isolate_hugetlb(page, pagelist); err = isolate_hugetlb(page_folio(page), pagelist);
if (!err) if (!err)
err = 1; err = 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment