Commit 5691753d authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert huge_zero_page to huge_zero_folio

With all callers of is_huge_zero_page() converted, we can now switch the
huge_zero_page itself from being a compound page to a folio.

Link: https://lkml.kernel.org/r/20240326202833.523759-6-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent b002a7b0
......@@ -348,17 +348,12 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
extern struct page *huge_zero_page;
extern struct folio *huge_zero_folio;
extern unsigned long huge_zero_pfn;
static inline bool is_huge_zero_page(const struct page *page)
{
return READ_ONCE(huge_zero_page) == page;
}
static inline bool is_huge_zero_folio(const struct folio *folio)
{
return READ_ONCE(huge_zero_page) == &folio->page;
return READ_ONCE(huge_zero_folio) == folio;
}
static inline bool is_huge_zero_pmd(pmd_t pmd)
......@@ -371,9 +366,14 @@ static inline bool is_huge_zero_pud(pud_t pud)
return false;
}
struct page *mm_get_huge_zero_page(struct mm_struct *mm);
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
void mm_put_huge_zero_page(struct mm_struct *mm);
static inline struct page *mm_get_huge_zero_page(struct mm_struct *mm)
{
return &mm_get_huge_zero_folio(mm)->page;
}
#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
static inline bool thp_migration_supported(void)
......@@ -485,11 +485,6 @@ static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
return 0;
}
static inline bool is_huge_zero_page(const struct page *page)
{
return false;
}
static inline bool is_huge_zero_folio(const struct folio *folio)
{
return false;
......
......@@ -74,7 +74,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
struct shrink_control *sc);
static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
struct folio *huge_zero_folio __read_mostly;
unsigned long huge_zero_pfn __read_mostly = ~0UL;
unsigned long huge_anon_orders_always __read_mostly;
unsigned long huge_anon_orders_madvise __read_mostly;
......@@ -192,24 +192,24 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
static bool get_huge_zero_page(void)
{
struct page *zero_page;
struct folio *zero_folio;
retry:
if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
return true;
zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
HPAGE_PMD_ORDER);
if (!zero_page) {
if (!zero_folio) {
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
return false;
}
preempt_disable();
if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
preempt_enable();
__free_pages(zero_page, compound_order(zero_page));
folio_put(zero_folio);
goto retry;
}
WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio));
/* We take additional reference here. It will be put back by shrinker */
atomic_set(&huge_zero_refcount, 2);
......@@ -227,10 +227,10 @@ static void put_huge_zero_page(void)
BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
}
struct page *mm_get_huge_zero_page(struct mm_struct *mm)
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
{
if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
return READ_ONCE(huge_zero_page);
return READ_ONCE(huge_zero_folio);
if (!get_huge_zero_page())
return NULL;
......@@ -238,7 +238,7 @@ struct page *mm_get_huge_zero_page(struct mm_struct *mm)
if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
put_huge_zero_page();
return READ_ONCE(huge_zero_page);
return READ_ONCE(huge_zero_folio);
}
void mm_put_huge_zero_page(struct mm_struct *mm)
......@@ -258,10 +258,10 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
struct page *zero_page = xchg(&huge_zero_page, NULL);
BUG_ON(zero_page == NULL);
struct folio *zero_folio = xchg(&huge_zero_folio, NULL);
BUG_ON(zero_folio == NULL);
WRITE_ONCE(huge_zero_pfn, ~0UL);
__free_pages(zero_page, compound_order(zero_page));
folio_put(zero_folio);
return HPAGE_PMD_NR;
}
......@@ -1340,7 +1340,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* since we already have a zero page to copy. It just takes a
* reference.
*/
mm_get_huge_zero_page(dst_mm);
mm_get_huge_zero_folio(dst_mm);
goto out_zero_page;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment