Commit 959a78b6 authored by ZhangPeng's avatar ZhangPeng Committed by Andrew Morton

mm/hugetlb: use a folio in hugetlb_wp()

We can replace nine implict calls to compound_head() with one by using
old_folio.  The page we get back is always a head page, so we just convert
old_page to old_folio.

Link: https://lkml.kernel.org/r/20230606062013.2947002-3-zhangpeng362@huawei.comSigned-off-by: default avatarZhangPeng <zhangpeng362@huawei.com>
Suggested-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Reviewed-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent ad27ce20
...@@ -5540,7 +5540,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -5540,7 +5540,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
const bool unshare = flags & FAULT_FLAG_UNSHARE; const bool unshare = flags & FAULT_FLAG_UNSHARE;
pte_t pte = huge_ptep_get(ptep); pte_t pte = huge_ptep_get(ptep);
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
struct page *old_page; struct folio *old_folio;
struct folio *new_folio; struct folio *new_folio;
int outside_reserve = 0; int outside_reserve = 0;
vm_fault_t ret = 0; vm_fault_t ret = 0;
...@@ -5571,7 +5571,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -5571,7 +5571,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
return 0; return 0;
} }
old_page = pte_page(pte); old_folio = page_folio(pte_page(pte));
delayacct_wpcopy_start(); delayacct_wpcopy_start();
...@@ -5580,17 +5580,17 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -5580,17 +5580,17 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
* If no-one else is actually using this page, we're the exclusive * If no-one else is actually using this page, we're the exclusive
* owner and can reuse this page. * owner and can reuse this page.
*/ */
if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
if (!PageAnonExclusive(old_page)) if (!PageAnonExclusive(&old_folio->page))
page_move_anon_rmap(old_page, vma); page_move_anon_rmap(&old_folio->page, vma);
if (likely(!unshare)) if (likely(!unshare))
set_huge_ptep_writable(vma, haddr, ptep); set_huge_ptep_writable(vma, haddr, ptep);
delayacct_wpcopy_end(); delayacct_wpcopy_end();
return 0; return 0;
} }
VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page), VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
old_page); PageAnonExclusive(&old_folio->page), &old_folio->page);
/* /*
* If the process that created a MAP_PRIVATE mapping is about to * If the process that created a MAP_PRIVATE mapping is about to
...@@ -5602,10 +5602,10 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -5602,10 +5602,10 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
* of the full address range. * of the full address range.
*/ */
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
page_folio(old_page) != pagecache_folio) old_folio != pagecache_folio)
outside_reserve = 1; outside_reserve = 1;
get_page(old_page); folio_get(old_folio);
/* /*
* Drop page table lock as buddy allocator may be called. It will * Drop page table lock as buddy allocator may be called. It will
...@@ -5627,7 +5627,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -5627,7 +5627,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
pgoff_t idx; pgoff_t idx;
u32 hash; u32 hash;
put_page(old_page); folio_put(old_folio);
/* /*
* Drop hugetlb_fault_mutex and vma_lock before * Drop hugetlb_fault_mutex and vma_lock before
* unmapping. unmapping needs to hold vma_lock * unmapping. unmapping needs to hold vma_lock
...@@ -5642,7 +5642,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -5642,7 +5642,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
hugetlb_vma_unlock_read(vma); hugetlb_vma_unlock_read(vma);
mutex_unlock(&hugetlb_fault_mutex_table[hash]); mutex_unlock(&hugetlb_fault_mutex_table[hash]);
unmap_ref_private(mm, vma, old_page, haddr); unmap_ref_private(mm, vma, &old_folio->page, haddr);
mutex_lock(&hugetlb_fault_mutex_table[hash]); mutex_lock(&hugetlb_fault_mutex_table[hash]);
hugetlb_vma_lock_read(vma); hugetlb_vma_lock_read(vma);
...@@ -5672,7 +5672,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -5672,7 +5672,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_release_all; goto out_release_all;
} }
if (copy_user_large_folio(new_folio, page_folio(old_page), address, vma)) { if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
ret = VM_FAULT_HWPOISON_LARGE; ret = VM_FAULT_HWPOISON_LARGE;
goto out_release_all; goto out_release_all;
} }
...@@ -5694,14 +5694,14 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -5694,14 +5694,14 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
/* Break COW or unshare */ /* Break COW or unshare */
huge_ptep_clear_flush(vma, haddr, ptep); huge_ptep_clear_flush(vma, haddr, ptep);
mmu_notifier_invalidate_range(mm, range.start, range.end); mmu_notifier_invalidate_range(mm, range.start, range.end);
page_remove_rmap(old_page, vma, true); page_remove_rmap(&old_folio->page, vma, true);
hugepage_add_new_anon_rmap(new_folio, vma, haddr); hugepage_add_new_anon_rmap(new_folio, vma, haddr);
if (huge_pte_uffd_wp(pte)) if (huge_pte_uffd_wp(pte))
newpte = huge_pte_mkuffd_wp(newpte); newpte = huge_pte_mkuffd_wp(newpte);
set_huge_pte_at(mm, haddr, ptep, newpte); set_huge_pte_at(mm, haddr, ptep, newpte);
folio_set_hugetlb_migratable(new_folio); folio_set_hugetlb_migratable(new_folio);
/* Make the old page be freed below */ /* Make the old page be freed below */
new_folio = page_folio(old_page); new_folio = old_folio;
} }
spin_unlock(ptl); spin_unlock(ptl);
mmu_notifier_invalidate_range_end(&range); mmu_notifier_invalidate_range_end(&range);
...@@ -5710,11 +5710,11 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -5710,11 +5710,11 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
* No restore in case of successful pagetable update (Break COW or * No restore in case of successful pagetable update (Break COW or
* unshare) * unshare)
*/ */
if (new_folio != page_folio(old_page)) if (new_folio != old_folio)
restore_reserve_on_error(h, vma, haddr, new_folio); restore_reserve_on_error(h, vma, haddr, new_folio);
folio_put(new_folio); folio_put(new_folio);
out_release_old: out_release_old:
put_page(old_page); folio_put(old_folio);
spin_lock(ptl); /* Caller expects lock to be held */ spin_lock(ptl); /* Caller expects lock to be held */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment