Commit 98e51a22 authored by Suren Baghdasaryan's avatar Suren Baghdasaryan Committed by Andrew Morton

mm: conditionally write-lock VMA in free_pgtables

Normally free_pgtables needs to lock affected VMAs except for the case
when VMAs were isolated under VMA write-lock.  munmap() does just that,
isolating while holding appropriate locks and then downgrading mmap_lock
and dropping per-VMA locks before freeing page tables.  Add a parameter to
free_pgtables for such scenario.

Link: https://lkml.kernel.org/r/20230227173632.3292573-20-surenb@google.comSigned-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 73046fd0
...@@ -105,7 +105,7 @@ void folio_activate(struct folio *folio); ...@@ -105,7 +105,7 @@ void folio_activate(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
struct vm_area_struct *start_vma, unsigned long floor, struct vm_area_struct *start_vma, unsigned long floor,
unsigned long ceiling); unsigned long ceiling, bool mm_wr_locked);
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
struct zap_details; struct zap_details;
......
...@@ -362,7 +362,7 @@ void free_pgd_range(struct mmu_gather *tlb, ...@@ -362,7 +362,7 @@ void free_pgd_range(struct mmu_gather *tlb,
void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
struct vm_area_struct *vma, unsigned long floor, struct vm_area_struct *vma, unsigned long floor,
unsigned long ceiling) unsigned long ceiling, bool mm_wr_locked)
{ {
MA_STATE(mas, mt, vma->vm_end, vma->vm_end); MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
...@@ -380,6 +380,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, ...@@ -380,6 +380,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
* Hide vma from rmap and truncate_pagecache before freeing * Hide vma from rmap and truncate_pagecache before freeing
* pgtables * pgtables
*/ */
if (mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma); unlink_anon_vmas(vma);
unlink_file_vma(vma); unlink_file_vma(vma);
...@@ -394,6 +396,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, ...@@ -394,6 +396,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
&& !is_vm_hugetlb_page(next)) { && !is_vm_hugetlb_page(next)) {
vma = next; vma = next;
next = mas_find(&mas, ceiling - 1); next = mas_find(&mas, ceiling - 1);
if (mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma); unlink_anon_vmas(vma);
unlink_file_vma(vma); unlink_file_vma(vma);
} }
......
...@@ -2167,7 +2167,8 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt, ...@@ -2167,7 +2167,8 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
update_hiwater_rss(mm); update_hiwater_rss(mm);
unmap_vmas(&tlb, mt, vma, start, end, mm_wr_locked); unmap_vmas(&tlb, mt, vma, start, end, mm_wr_locked);
free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
next ? next->vm_start : USER_PGTABLES_CEILING); next ? next->vm_start : USER_PGTABLES_CEILING,
mm_wr_locked);
tlb_finish_mmu(&tlb); tlb_finish_mmu(&tlb);
} }
...@@ -3064,7 +3065,7 @@ void exit_mmap(struct mm_struct *mm) ...@@ -3064,7 +3065,7 @@ void exit_mmap(struct mm_struct *mm)
set_bit(MMF_OOM_SKIP, &mm->flags); set_bit(MMF_OOM_SKIP, &mm->flags);
mmap_write_lock(mm); mmap_write_lock(mm);
free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS, free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
USER_PGTABLES_CEILING); USER_PGTABLES_CEILING, true);
tlb_finish_mmu(&tlb); tlb_finish_mmu(&tlb);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment