Commit d7c0e5f7 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/memory: ignore writable bit in folio_pte_batch()

...  and conditionally return to the caller if any PTE except the first
one is writable.  fork() has to make sure to properly write-protect in
case any PTE is writable.  Other users (e.g., page unmaping) are expected
to not care.

Link: https://lkml.kernel.org/r/20240129124649.189745-16-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarRyan Roberts <ryan.roberts@arm.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Russell King (Oracle) <linux@armlinux.org.uk>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 25365e10
...@@ -968,7 +968,7 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) ...@@ -968,7 +968,7 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
pte = pte_mkclean(pte); pte = pte_mkclean(pte);
if (likely(flags & FPB_IGNORE_SOFT_DIRTY)) if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
pte = pte_clear_soft_dirty(pte); pte = pte_clear_soft_dirty(pte);
return pte_mkold(pte); return pte_wrprotect(pte_mkold(pte));
} }
/* /*
...@@ -976,21 +976,32 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) ...@@ -976,21 +976,32 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
* pages of the same folio. * pages of the same folio.
* *
* All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN, * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
* the accessed bit, dirty bit (with FPB_IGNORE_DIRTY) and soft-dirty bit * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
* (with FPB_IGNORE_SOFT_DIRTY). * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
*
* If "any_writable" is set, it will indicate if any other PTE besides the
* first (given) PTE is writable.
*/ */
static inline int folio_pte_batch(struct folio *folio, unsigned long addr, static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags) pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
bool *any_writable)
{ {
unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
const pte_t *end_ptep = start_ptep + max_nr; const pte_t *end_ptep = start_ptep + max_nr;
pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte), flags); pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte), flags);
pte_t *ptep = start_ptep + 1; pte_t *ptep = start_ptep + 1;
bool writable;
if (any_writable)
*any_writable = false;
VM_WARN_ON_FOLIO(!pte_present(pte), folio); VM_WARN_ON_FOLIO(!pte_present(pte), folio);
while (ptep != end_ptep) { while (ptep != end_ptep) {
pte = __pte_batch_clear_ignored(ptep_get(ptep), flags); pte = ptep_get(ptep);
if (any_writable)
writable = !!pte_write(pte);
pte = __pte_batch_clear_ignored(pte, flags);
if (!pte_same(pte, expected_pte)) if (!pte_same(pte, expected_pte))
break; break;
...@@ -1003,6 +1014,9 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, ...@@ -1003,6 +1014,9 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
if (pte_pfn(pte) == folio_end_pfn) if (pte_pfn(pte) == folio_end_pfn)
break; break;
if (any_writable)
*any_writable |= writable;
expected_pte = pte_next_pfn(expected_pte); expected_pte = pte_next_pfn(expected_pte);
ptep++; ptep++;
} }
...@@ -1024,6 +1038,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma ...@@ -1024,6 +1038,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
{ {
struct page *page; struct page *page;
struct folio *folio; struct folio *folio;
bool any_writable;
fpb_t flags = 0; fpb_t flags = 0;
int err, nr; int err, nr;
...@@ -1044,7 +1059,8 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma ...@@ -1044,7 +1059,8 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
if (!vma_soft_dirty_enabled(src_vma)) if (!vma_soft_dirty_enabled(src_vma))
flags |= FPB_IGNORE_SOFT_DIRTY; flags |= FPB_IGNORE_SOFT_DIRTY;
nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags); nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags,
&any_writable);
folio_ref_add(folio, nr); folio_ref_add(folio, nr);
if (folio_test_anon(folio)) { if (folio_test_anon(folio)) {
if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
...@@ -1058,6 +1074,8 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma ...@@ -1058,6 +1074,8 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
folio_dup_file_rmap_ptes(folio, page, nr); folio_dup_file_rmap_ptes(folio, page, nr);
rss[mm_counter_file(folio)] += nr; rss[mm_counter_file(folio)] += nr;
} }
if (any_writable)
pte = pte_mkwrite(pte, src_vma);
__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
addr, nr); addr, nr);
return nr; return nr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment