Commit d5b43e96 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

hugetlb: convert remove_pool_huge_page() to remove_pool_hugetlb_folio()

Convert the callers to expect a folio and remove the unnecesary conversion
back to a struct page.

Link: https://lkml.kernel.org/r/20230824141325.2704553-4-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 04bbfd84
...@@ -1446,7 +1446,7 @@ static int hstate_next_node_to_alloc(struct hstate *h, ...@@ -1446,7 +1446,7 @@ static int hstate_next_node_to_alloc(struct hstate *h,
} }
/* /*
* helper for remove_pool_huge_page() - return the previously saved * helper for remove_pool_hugetlb_folio() - return the previously saved
* node ["this node"] from which to free a huge page. Advance the * node ["this node"] from which to free a huge page. Advance the
* next node id whether or not we find a free huge page to free so * next node id whether or not we find a free huge page to free so
* that the next attempt to free addresses the next node. * that the next attempt to free addresses the next node.
...@@ -2222,9 +2222,8 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, ...@@ -2222,9 +2222,8 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
* an additional call to free the page to low level allocators. * an additional call to free the page to low level allocators.
* Called with hugetlb_lock locked. * Called with hugetlb_lock locked.
*/ */
static struct page *remove_pool_huge_page(struct hstate *h, static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
nodemask_t *nodes_allowed, nodemask_t *nodes_allowed, bool acct_surplus)
bool acct_surplus)
{ {
int nr_nodes, node; int nr_nodes, node;
struct folio *folio = NULL; struct folio *folio = NULL;
...@@ -2244,7 +2243,7 @@ static struct page *remove_pool_huge_page(struct hstate *h, ...@@ -2244,7 +2243,7 @@ static struct page *remove_pool_huge_page(struct hstate *h,
} }
} }
return &folio->page; return folio;
} }
/* /*
...@@ -2598,7 +2597,6 @@ static void return_unused_surplus_pages(struct hstate *h, ...@@ -2598,7 +2597,6 @@ static void return_unused_surplus_pages(struct hstate *h,
unsigned long unused_resv_pages) unsigned long unused_resv_pages)
{ {
unsigned long nr_pages; unsigned long nr_pages;
struct page *page;
LIST_HEAD(page_list); LIST_HEAD(page_list);
lockdep_assert_held(&hugetlb_lock); lockdep_assert_held(&hugetlb_lock);
...@@ -2619,15 +2617,17 @@ static void return_unused_surplus_pages(struct hstate *h, ...@@ -2619,15 +2617,17 @@ static void return_unused_surplus_pages(struct hstate *h,
* evenly across all nodes with memory. Iterate across these nodes * evenly across all nodes with memory. Iterate across these nodes
* until we can no longer free unreserved surplus pages. This occurs * until we can no longer free unreserved surplus pages. This occurs
* when the nodes with surplus pages have no free pages. * when the nodes with surplus pages have no free pages.
* remove_pool_huge_page() will balance the freed pages across the * remove_pool_hugetlb_folio() will balance the freed pages across the
* on-line nodes with memory and will handle the hstate accounting. * on-line nodes with memory and will handle the hstate accounting.
*/ */
while (nr_pages--) { while (nr_pages--) {
page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1); struct folio *folio;
if (!page)
folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
if (!folio)
goto out; goto out;
list_add(&page->lru, &page_list); list_add(&folio->lru, &page_list);
} }
out: out:
...@@ -3472,7 +3472,6 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, ...@@ -3472,7 +3472,6 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
nodemask_t *nodes_allowed) nodemask_t *nodes_allowed)
{ {
unsigned long min_count, ret; unsigned long min_count, ret;
struct page *page;
LIST_HEAD(page_list); LIST_HEAD(page_list);
NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
...@@ -3594,11 +3593,13 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, ...@@ -3594,11 +3593,13 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
* Collect pages to be removed on list without dropping lock * Collect pages to be removed on list without dropping lock
*/ */
while (min_count < persistent_huge_pages(h)) { while (min_count < persistent_huge_pages(h)) {
page = remove_pool_huge_page(h, nodes_allowed, 0); struct folio *folio;
if (!page)
folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
if (!folio)
break; break;
list_add(&page->lru, &page_list); list_add(&folio->lru, &page_list);
} }
/* free the pages after dropping lock */ /* free the pages after dropping lock */
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment