Commit 6b73cff2 authored by Liam R. Howlett's avatar Liam R. Howlett Committed by Andrew Morton

mm: change munmap splitting order and move_vma()

Splitting can be more efficient when the order is not of concern.  Change
do_vmi_align_munmap() to reduce walking of the tree during split
operations.

move_vma() must also be altered to remove the dependency of keeping the
original VMA as the active part of the split.  Transition to using vma
iterator to look up the prev and/or next vma after munmap.

[Liam.Howlett@oracle.com: fix vma iterator initialization]
  Link: https://lkml.kernel.org/r/20230126212011.980350-1-Liam.Howlett@oracle.com
Link: https://lkml.kernel.org/r/20230120162650.984577-39-Liam.Howlett@oracle.comSigned-off-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent cc8d1b09
...@@ -2329,21 +2329,9 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, ...@@ -2329,21 +2329,9 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
for_each_vma_range(*vmi, next, end) { for_each_vma_range(*vmi, next, end) {
/* Does it split the end? */ /* Does it split the end? */
if (next->vm_end > end) { if (next->vm_end > end) {
struct vm_area_struct *split; error = __split_vma(vmi, next, end, 0);
error = __split_vma(vmi, next, end, 1);
if (error) if (error)
goto end_split_failed; goto end_split_failed;
split = vma_prev(vmi);
error = munmap_sidetree(split, &mas_detach);
if (error)
goto munmap_sidetree_failed;
count++;
if (vma == next)
vma = split;
break;
} }
error = munmap_sidetree(next, &mas_detach); error = munmap_sidetree(next, &mas_detach);
if (error) if (error)
...@@ -2356,9 +2344,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, ...@@ -2356,9 +2344,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
#endif #endif
} }
if (!next)
next = vma_next(vmi); next = vma_next(vmi);
if (unlikely(uf)) { if (unlikely(uf)) {
/* /*
* If userfaultfd_unmap_prep returns an error the vmas * If userfaultfd_unmap_prep returns an error the vmas
......
...@@ -580,11 +580,12 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -580,11 +580,12 @@ static unsigned long move_vma(struct vm_area_struct *vma,
unsigned long vm_flags = vma->vm_flags; unsigned long vm_flags = vma->vm_flags;
unsigned long new_pgoff; unsigned long new_pgoff;
unsigned long moved_len; unsigned long moved_len;
unsigned long excess = 0; unsigned long account_start = 0;
unsigned long account_end = 0;
unsigned long hiwater_vm; unsigned long hiwater_vm;
int split = 0;
int err = 0; int err = 0;
bool need_rmap_locks; bool need_rmap_locks;
struct vma_iterator vmi;
/* /*
* We'd prefer to avoid failure later on in do_munmap: * We'd prefer to avoid failure later on in do_munmap:
...@@ -662,10 +663,10 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -662,10 +663,10 @@ static unsigned long move_vma(struct vm_area_struct *vma,
/* Conceal VM_ACCOUNT so old reservation is not undone */ /* Conceal VM_ACCOUNT so old reservation is not undone */
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
vma->vm_flags &= ~VM_ACCOUNT; vma->vm_flags &= ~VM_ACCOUNT;
excess = vma->vm_end - vma->vm_start - old_len; if (vma->vm_start < old_addr)
if (old_addr > vma->vm_start && account_start = vma->vm_start;
old_addr + old_len < vma->vm_end) if (vma->vm_end > old_addr + old_len)
split = 1; account_end = vma->vm_end;
} }
/* /*
...@@ -700,11 +701,12 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -700,11 +701,12 @@ static unsigned long move_vma(struct vm_area_struct *vma,
return new_addr; return new_addr;
} }
if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) { vma_iter_init(&vmi, mm, old_addr);
if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) {
/* OOM: unable to split vma, just get accounts right */ /* OOM: unable to split vma, just get accounts right */
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
vm_acct_memory(old_len >> PAGE_SHIFT); vm_acct_memory(old_len >> PAGE_SHIFT);
excess = 0; account_start = account_end = 0;
} }
if (vm_flags & VM_LOCKED) { if (vm_flags & VM_LOCKED) {
...@@ -715,10 +717,14 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -715,10 +717,14 @@ static unsigned long move_vma(struct vm_area_struct *vma,
mm->hiwater_vm = hiwater_vm; mm->hiwater_vm = hiwater_vm;
/* Restore VM_ACCOUNT if one or two pieces of vma left */ /* Restore VM_ACCOUNT if one or two pieces of vma left */
if (excess) { if (account_start) {
vma = vma_prev(&vmi);
vma->vm_flags |= VM_ACCOUNT;
}
if (account_end) {
vma = vma_next(&vmi);
vma->vm_flags |= VM_ACCOUNT; vma->vm_flags |= VM_ACCOUNT;
if (split)
find_vma(mm, vma->vm_end)->vm_flags |= VM_ACCOUNT;
} }
return new_addr; return new_addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment