Commit f8d93776 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/memory: optimize fork() with PTE-mapped THP

Let's implement PTE batching when consecutive (present) PTEs map
consecutive pages of the same large folio, and all other PTE bits besides
the PFNs are equal.

We will optimize folio_pte_batch() separately, to ignore selected PTE
bits.  This patch is based on work by Ryan Roberts.

Use __always_inline for __copy_present_ptes() and keep the handling for
single PTEs completely separate from the multi-PTE case: we really want
the compiler to optimize for the single-PTE case with small folios, to not
degrade performance.

Note that PTE batching will never exceed a single page table and will
always stay within VMA boundaries.

Further, processing PTE-mapped THP that maybe pinned and have
PageAnonExclusive set on at least one subpage should work as expected, but
there is room for improvement: We will repeatedly (1) detect a PTE batch
(2) detect that we have to copy a page (3) fall back and allocate a single
page to copy a single page.  For now we won't care as pinned pages are a
corner case, and we should rather look into maintaining only a single
PageAnonExclusive bit for large folios.

Link: https://lkml.kernel.org/r/20240129124649.189745-14-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarRyan Roberts <ryan.roberts@arm.com>
Reviewed-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Russell King (Oracle) <linux@armlinux.org.uk>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 53723298
...@@ -650,6 +650,37 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres ...@@ -650,6 +650,37 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
} }
#endif #endif
#ifndef wrprotect_ptes
/**
* wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same
* folio.
* @mm: Address space the pages are mapped into.
* @addr: Address the first page is mapped at.
* @ptep: Page table pointer for the first entry.
* @nr: Number of entries to write-protect.
*
* May be overridden by the architecture; otherwise, implemented as a simple
* loop over ptep_set_wrprotect().
*
* Note that PTE bits in the PTE range besides the PFN can differ. For example,
* some PTEs might be write-protected.
*
* Context: The caller holds the page table lock. The PTEs map consecutive
* pages that belong to the same folio. The PTEs are all in the same PMD.
*/
static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned int nr)
{
for (;;) {
ptep_set_wrprotect(mm, addr, ptep);
if (--nr == 0)
break;
ptep++;
addr += PAGE_SIZE;
}
}
#endif
/* /*
* On some architectures hardware does not set page access bit when accessing * On some architectures hardware does not set page access bit when accessing
* memory page, it is responsibility of software setting this bit. It brings * memory page, it is responsibility of software setting this bit. It brings
......
...@@ -930,15 +930,15 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma ...@@ -930,15 +930,15 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
return 0; return 0;
} }
static inline void __copy_present_pte(struct vm_area_struct *dst_vma, static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
pte_t pte, unsigned long addr) pte_t pte, unsigned long addr, int nr)
{ {
struct mm_struct *src_mm = src_vma->vm_mm; struct mm_struct *src_mm = src_vma->vm_mm;
/* If it's a COW mapping, write protect it both processes. */ /* If it's a COW mapping, write protect it both processes. */
if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) { if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
ptep_set_wrprotect(src_mm, addr, src_pte); wrprotect_ptes(src_mm, addr, src_pte, nr);
pte = pte_wrprotect(pte); pte = pte_wrprotect(pte);
} }
...@@ -950,26 +950,93 @@ static inline void __copy_present_pte(struct vm_area_struct *dst_vma, ...@@ -950,26 +950,93 @@ static inline void __copy_present_pte(struct vm_area_struct *dst_vma,
if (!userfaultfd_wp(dst_vma)) if (!userfaultfd_wp(dst_vma))
pte = pte_clear_uffd_wp(pte); pte = pte_clear_uffd_wp(pte);
set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
}
/*
* Detect a PTE batch: consecutive (present) PTEs that map consecutive
* pages of the same folio.
*
* All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN.
*/
static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
pte_t *start_ptep, pte_t pte, int max_nr)
{
unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
const pte_t *end_ptep = start_ptep + max_nr;
pte_t expected_pte = pte_next_pfn(pte);
pte_t *ptep = start_ptep + 1;
VM_WARN_ON_FOLIO(!pte_present(pte), folio);
while (ptep != end_ptep) {
pte = ptep_get(ptep);
if (!pte_same(pte, expected_pte))
break;
/*
* Stop immediately once we reached the end of the folio. In
* corner cases the next PFN might fall into a different
* folio.
*/
if (pte_pfn(pte) == folio_end_pfn)
break;
expected_pte = pte_next_pfn(expected_pte);
ptep++;
}
return ptep - start_ptep;
} }
/* /*
* Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page * Copy one present PTE, trying to batch-process subsequent PTEs that map
* is required to copy this pte. * consecutive pages of the same folio by copying them as well.
*
* Returns -EAGAIN if one preallocated page is required to copy the next PTE.
* Otherwise, returns the number of copied PTEs (at least 1).
*/ */
static inline int static inline int
copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr, pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr,
int *rss, struct folio **prealloc) int max_nr, int *rss, struct folio **prealloc)
{ {
struct page *page; struct page *page;
struct folio *folio; struct folio *folio;
int err, nr;
page = vm_normal_page(src_vma, addr, pte); page = vm_normal_page(src_vma, addr, pte);
if (unlikely(!page)) if (unlikely(!page))
goto copy_pte; goto copy_pte;
folio = page_folio(page); folio = page_folio(page);
/*
* If we likely have to copy, just don't bother with batching. Make
* sure that the common "small folio" case is as fast as possible
* by keeping the batching logic separate.
*/
if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr);
folio_ref_add(folio, nr);
if (folio_test_anon(folio)) {
if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
nr, src_vma))) {
folio_ref_sub(folio, nr);
return -EAGAIN;
}
rss[MM_ANONPAGES] += nr;
VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
} else {
folio_dup_file_rmap_ptes(folio, page, nr);
rss[mm_counter_file(folio)] += nr;
}
__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
addr, nr);
return nr;
}
folio_get(folio); folio_get(folio);
if (folio_test_anon(folio)) { if (folio_test_anon(folio)) {
/* /*
...@@ -981,8 +1048,9 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, ...@@ -981,8 +1048,9 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) { if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
/* Page may be pinned, we have to copy. */ /* Page may be pinned, we have to copy. */
folio_put(folio); folio_put(folio);
return copy_present_page(dst_vma, src_vma, dst_pte, src_pte, err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
addr, rss, prealloc, page); addr, rss, prealloc, page);
return err ? err : 1;
} }
rss[MM_ANONPAGES]++; rss[MM_ANONPAGES]++;
VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
...@@ -992,8 +1060,8 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, ...@@ -992,8 +1060,8 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
} }
copy_pte: copy_pte:
__copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, pte, addr); __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1);
return 0; return 1;
} }
static inline struct folio *folio_prealloc(struct mm_struct *src_mm, static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
...@@ -1030,10 +1098,11 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, ...@@ -1030,10 +1098,11 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_t *src_pte, *dst_pte; pte_t *src_pte, *dst_pte;
pte_t ptent; pte_t ptent;
spinlock_t *src_ptl, *dst_ptl; spinlock_t *src_ptl, *dst_ptl;
int progress, ret = 0; int progress, max_nr, ret = 0;
int rss[NR_MM_COUNTERS]; int rss[NR_MM_COUNTERS];
swp_entry_t entry = (swp_entry_t){0}; swp_entry_t entry = (swp_entry_t){0};
struct folio *prealloc = NULL; struct folio *prealloc = NULL;
int nr;
again: again:
progress = 0; progress = 0;
...@@ -1064,6 +1133,8 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, ...@@ -1064,6 +1133,8 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
do { do {
nr = 1;
/* /*
* We are holding two locks at this point - either of them * We are holding two locks at this point - either of them
* could generate latencies in another task on another CPU. * could generate latencies in another task on another CPU.
...@@ -1102,9 +1173,10 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, ...@@ -1102,9 +1173,10 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
*/ */
WARN_ON_ONCE(ret != -ENOENT); WARN_ON_ONCE(ret != -ENOENT);
} }
/* copy_present_pte() will clear `*prealloc' if consumed */ /* copy_present_ptes() will clear `*prealloc' if consumed */
ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, max_nr = (end - addr) / PAGE_SIZE;
ptent, addr, rss, &prealloc); ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte,
ptent, addr, max_nr, rss, &prealloc);
/* /*
* If we need a pre-allocated page for this pte, drop the * If we need a pre-allocated page for this pte, drop the
* locks, allocate, and try again. * locks, allocate, and try again.
...@@ -1121,8 +1193,10 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, ...@@ -1121,8 +1193,10 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
folio_put(prealloc); folio_put(prealloc);
prealloc = NULL; prealloc = NULL;
} }
progress += 8; nr = ret;
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); progress += 8 * nr;
} while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
addr != end);
arch_leave_lazy_mmu_mode(); arch_leave_lazy_mmu_mode();
pte_unmap_unlock(orig_src_pte, src_ptl); pte_unmap_unlock(orig_src_pte, src_ptl);
...@@ -1143,7 +1217,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, ...@@ -1143,7 +1217,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
prealloc = folio_prealloc(src_mm, src_vma, addr, false); prealloc = folio_prealloc(src_mm, src_vma, addr, false);
if (!prealloc) if (!prealloc)
return -ENOMEM; return -ENOMEM;
} else if (ret) { } else if (ret < 0) {
VM_WARN_ON_ONCE(1); VM_WARN_ON_ONCE(1);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment