Commit d8f5f7e4 authored by Mike Kravetz's avatar Mike Kravetz Committed by Andrew Morton

hugetlb: set hugetlb page flag before optimizing vmemmap

Currently, vmemmap optimization of hugetlb pages is performed before the
hugetlb flag (previously hugetlb destructor) is set identifying it as a
hugetlb folio.  This means there is a window of time where an ordinary
folio does not have all associated vmemmap present.  The core mm only
expects vmemmap to be potentially optimized for hugetlb and device dax. 
This can cause problems in code such as memory error handling that may
want to write to tail struct pages.

There is only one call to perform hugetlb vmemmap optimization today.  To
fix this issue, simply set the hugetlb flag before that call.

There was a similar issue in the free hugetlb path that was previously
addressed.  The two routines that optimize or restore hugetlb vmemmap
should only be passed hugetlb folios/pages.  To catch any callers not
following this rule, add VM_WARN_ON calls to the routines.  In the hugetlb
free code paths, some calls could be made to restore vmemmap after
clearing the hugetlb flag.  This was 'safe' as in these cases vmemmap was
already present and the call was a NOOP.  However, for consistency these
calls where eliminated so that we can add the VM_WARN_ON checks.

Link: https://lkml.kernel.org/r/20230829213734.69673-1-mike.kravetz@oracle.com
Fixes: f41f2ed4 ("mm: hugetlb: free the vmemmap pages associated with each HugeTLB page")
Signed-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Usama Arif <usama.arif@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent dd34d9fe
...@@ -1720,7 +1720,12 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, ...@@ -1720,7 +1720,12 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
if (folio_test_hugetlb_raw_hwp_unreliable(folio)) if (folio_test_hugetlb_raw_hwp_unreliable(folio))
return; return;
if (hugetlb_vmemmap_restore(h, &folio->page)) { /*
* If folio is not vmemmap optimized (!clear_dtor), then the folio
* is no longer identified as a hugetlb page. hugetlb_vmemmap_restore
* can only be passed hugetlb pages and will BUG otherwise.
*/
if (clear_dtor && hugetlb_vmemmap_restore(h, &folio->page)) {
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
/* /*
* If we cannot allocate vmemmap pages, just refuse to free the * If we cannot allocate vmemmap pages, just refuse to free the
...@@ -1930,9 +1935,9 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid) ...@@ -1930,9 +1935,9 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{ {
folio_set_hugetlb(folio);
hugetlb_vmemmap_optimize(h, &folio->page); hugetlb_vmemmap_optimize(h, &folio->page);
INIT_LIST_HEAD(&folio->lru); INIT_LIST_HEAD(&folio->lru);
folio_set_hugetlb(folio);
hugetlb_set_folio_subpool(folio, NULL); hugetlb_set_folio_subpool(folio, NULL);
set_hugetlb_cgroup(folio, NULL); set_hugetlb_cgroup(folio, NULL);
set_hugetlb_cgroup_rsvd(folio, NULL); set_hugetlb_cgroup_rsvd(folio, NULL);
...@@ -3580,13 +3585,21 @@ static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio) ...@@ -3580,13 +3585,21 @@ static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
remove_hugetlb_folio_for_demote(h, folio, false); remove_hugetlb_folio_for_demote(h, folio, false);
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
rc = hugetlb_vmemmap_restore(h, &folio->page); /*
if (rc) { * If vmemmap already existed for folio, the remove routine above would
/* Allocation of vmemmmap failed, we can not demote folio */ * have cleared the hugetlb folio flag. Hence the folio is technically
spin_lock_irq(&hugetlb_lock); * no longer a hugetlb folio. hugetlb_vmemmap_restore can only be
folio_ref_unfreeze(folio, 1); * passed hugetlb folios and will BUG otherwise.
add_hugetlb_folio(h, folio, false); */
return rc; if (folio_test_hugetlb(folio)) {
rc = hugetlb_vmemmap_restore(h, &folio->page);
if (rc) {
/* Allocation of vmemmmap failed, we can not demote folio */
spin_lock_irq(&hugetlb_lock);
folio_ref_unfreeze(folio, 1);
add_hugetlb_folio(h, folio, false);
return rc;
}
} }
/* /*
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/bootmem_info.h> #include <linux/bootmem_info.h>
#include <linux/mmdebug.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include "hugetlb_vmemmap.h" #include "hugetlb_vmemmap.h"
...@@ -456,6 +457,7 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head) ...@@ -456,6 +457,7 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
unsigned long vmemmap_start = (unsigned long)head, vmemmap_end; unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
unsigned long vmemmap_reuse; unsigned long vmemmap_reuse;
VM_WARN_ON_ONCE(!PageHuge(head));
if (!HPageVmemmapOptimized(head)) if (!HPageVmemmapOptimized(head))
return 0; return 0;
...@@ -550,6 +552,7 @@ void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head) ...@@ -550,6 +552,7 @@ void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
unsigned long vmemmap_start = (unsigned long)head, vmemmap_end; unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
unsigned long vmemmap_reuse; unsigned long vmemmap_reuse;
VM_WARN_ON_ONCE(!PageHuge(head));
if (!vmemmap_should_optimize(h, head)) if (!vmemmap_should_optimize(h, head))
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment