Commit 28d41a48 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert wp_page_copy() to use folios

Use new_folio instead of new_page throughout, because we allocated it
and know it's an order-0 folio.  Most old_page uses become old_folio,
but use vmf->page where we need the precise page.

Link: https://lkml.kernel.org/r/20230116191813.2145215-4-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarZi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent cb3184de
...@@ -3043,8 +3043,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -3043,8 +3043,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct page *old_page = vmf->page; struct folio *old_folio = NULL;
struct page *new_page = NULL; struct folio *new_folio = NULL;
pte_t entry; pte_t entry;
int page_copied = 0; int page_copied = 0;
struct mmu_notifier_range range; struct mmu_notifier_range range;
...@@ -3052,23 +3052,22 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -3052,23 +3052,22 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
delayacct_wpcopy_start(); delayacct_wpcopy_start();
if (vmf->page)
old_folio = page_folio(vmf->page);
if (unlikely(anon_vma_prepare(vma))) if (unlikely(anon_vma_prepare(vma)))
goto oom; goto oom;
if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
struct folio *new_folio;
new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address); new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
if (!new_folio) if (!new_folio)
goto oom; goto oom;
new_page = &new_folio->page;
} else { } else {
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
vmf->address); vmf->address, false);
if (!new_page) if (!new_folio)
goto oom; goto oom;
ret = __wp_page_copy_user(new_page, old_page, vmf); ret = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
if (ret) { if (ret) {
/* /*
* COW failed, if the fault was solved by other, * COW failed, if the fault was solved by other,
...@@ -3077,21 +3076,21 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -3077,21 +3076,21 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* from the second attempt. * from the second attempt.
* The -EHWPOISON case will not be retried. * The -EHWPOISON case will not be retried.
*/ */
put_page(new_page); folio_put(new_folio);
if (old_page) if (old_folio)
put_page(old_page); folio_put(old_folio);
delayacct_wpcopy_end(); delayacct_wpcopy_end();
return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0; return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
} }
kmsan_copy_page_meta(new_page, old_page); kmsan_copy_page_meta(&new_folio->page, vmf->page);
} }
if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL)) if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL))
goto oom_free_new; goto oom_free_new;
cgroup_throttle_swaprate(new_page, GFP_KERNEL); cgroup_throttle_swaprate(&new_folio->page, GFP_KERNEL);
__SetPageUptodate(new_page); __folio_mark_uptodate(new_folio);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
vmf->address & PAGE_MASK, vmf->address & PAGE_MASK,
...@@ -3103,16 +3102,16 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -3103,16 +3102,16 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
*/ */
vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
if (old_page) { if (old_folio) {
if (!PageAnon(old_page)) { if (!folio_test_anon(old_folio)) {
dec_mm_counter(mm, mm_counter_file(old_page)); dec_mm_counter(mm, mm_counter_file(&old_folio->page));
inc_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_ANONPAGES);
} }
} else { } else {
inc_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_ANONPAGES);
} }
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot); entry = mk_pte(&new_folio->page, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry); entry = pte_sw_mkyoung(entry);
if (unlikely(unshare)) { if (unlikely(unshare)) {
if (pte_soft_dirty(vmf->orig_pte)) if (pte_soft_dirty(vmf->orig_pte))
...@@ -3131,8 +3130,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -3131,8 +3130,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* some TLBs while the old PTE remains in others. * some TLBs while the old PTE remains in others.
*/ */
ptep_clear_flush_notify(vma, vmf->address, vmf->pte); ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
page_add_new_anon_rmap(new_page, vma, vmf->address); folio_add_new_anon_rmap(new_folio, vma, vmf->address);
lru_cache_add_inactive_or_unevictable(new_page, vma); folio_add_lru_vma(new_folio, vma);
/* /*
* We call the notify macro here because, when using secondary * We call the notify macro here because, when using secondary
* mmu page tables (such as kvm shadow page tables), we want the * mmu page tables (such as kvm shadow page tables), we want the
...@@ -3141,7 +3140,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -3141,7 +3140,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
BUG_ON(unshare && pte_write(entry)); BUG_ON(unshare && pte_write(entry));
set_pte_at_notify(mm, vmf->address, vmf->pte, entry); set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
update_mmu_cache(vma, vmf->address, vmf->pte); update_mmu_cache(vma, vmf->address, vmf->pte);
if (old_page) { if (old_folio) {
/* /*
* Only after switching the pte to the new page may * Only after switching the pte to the new page may
* we remove the mapcount here. Otherwise another * we remove the mapcount here. Otherwise another
...@@ -3164,18 +3163,18 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -3164,18 +3163,18 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* mapcount is visible. So transitively, TLBs to * mapcount is visible. So transitively, TLBs to
* old page will be flushed before it can be reused. * old page will be flushed before it can be reused.
*/ */
page_remove_rmap(old_page, vma, false); page_remove_rmap(vmf->page, vma, false);
} }
/* Free the old page.. */ /* Free the old page.. */
new_page = old_page; new_folio = old_folio;
page_copied = 1; page_copied = 1;
} else { } else {
update_mmu_tlb(vma, vmf->address, vmf->pte); update_mmu_tlb(vma, vmf->address, vmf->pte);
} }
if (new_page) if (new_folio)
put_page(new_page); folio_put(new_folio);
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
/* /*
...@@ -3183,19 +3182,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -3183,19 +3182,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* the above ptep_clear_flush_notify() did already call it. * the above ptep_clear_flush_notify() did already call it.
*/ */
mmu_notifier_invalidate_range_only_end(&range); mmu_notifier_invalidate_range_only_end(&range);
if (old_page) { if (old_folio) {
if (page_copied) if (page_copied)
free_swap_cache(old_page); free_swap_cache(&old_folio->page);
put_page(old_page); folio_put(old_folio);
} }
delayacct_wpcopy_end(); delayacct_wpcopy_end();
return 0; return 0;
oom_free_new: oom_free_new:
put_page(new_page); folio_put(new_folio);
oom: oom:
if (old_page) if (old_folio)
put_page(old_page); folio_put(old_folio);
delayacct_wpcopy_end(); delayacct_wpcopy_end();
return VM_FAULT_OOM; return VM_FAULT_OOM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment