Commit 6c141973 authored by Mike Kravetz's avatar Mike Kravetz Committed by Andrew Morton

hugetlb: clear flags in tail pages that will be freed individually

hugetlb manually creates and destroys compound pages.  As such it makes
assumptions about struct page layout.  Commit ebc1baf5 ("mm: free up a
word in the first tail page") breaks hugetlb.  The following will fix the
breakage.

Link: https://lkml.kernel.org/r/20230822231741.GC4509@monkey
Fixes: ebc1baf5 ("mm: free up a word in the first tail page")
Signed-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent fcbc329f
...@@ -1484,6 +1484,7 @@ static void __destroy_compound_gigantic_folio(struct folio *folio, ...@@ -1484,6 +1484,7 @@ static void __destroy_compound_gigantic_folio(struct folio *folio,
for (i = 1; i < nr_pages; i++) { for (i = 1; i < nr_pages; i++) {
p = folio_page(folio, i); p = folio_page(folio, i);
p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE;
p->mapping = NULL; p->mapping = NULL;
clear_compound_head(p); clear_compound_head(p);
if (!demote) if (!demote)
...@@ -1702,8 +1703,6 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio, ...@@ -1702,8 +1703,6 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
static void __update_and_free_hugetlb_folio(struct hstate *h, static void __update_and_free_hugetlb_folio(struct hstate *h,
struct folio *folio) struct folio *folio)
{ {
int i;
struct page *subpage;
bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio); bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio);
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
...@@ -1745,14 +1744,6 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, ...@@ -1745,14 +1744,6 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
} }
for (i = 0; i < pages_per_huge_page(h); i++) {
subpage = folio_page(folio, i);
subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_referenced | 1 << PG_dirty |
1 << PG_active | 1 << PG_private |
1 << PG_writeback);
}
/* /*
* Non-gigantic pages demoted from CMA allocated gigantic pages * Non-gigantic pages demoted from CMA allocated gigantic pages
* need to be given back to CMA in free_gigantic_folio. * need to be given back to CMA in free_gigantic_folio.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment