Commit 96d99023 authored by Liam Howlett's avatar Liam Howlett Committed by Linus Torvalds

mm/mmap: introduce unlock_range() for code cleanup

Both __do_munmap() and exit_mmap() unlock a range of VMAs using almost
identical code blocks.  Replace both blocks by a static inline function.

[akpm@linux-foundation.org: tweak code layout]

Link: https://lkml.kernel.org/r/20210510211021.2797427-1-Liam.Howlett@Oracle.comSigned-off-by: default avatarLiam R. Howlett <Liam.Howlett@Oracle.com>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 78d9cf60
...@@ -2802,6 +2802,22 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2802,6 +2802,22 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
return __split_vma(mm, vma, addr, new_below); return __split_vma(mm, vma, addr, new_below);
} }
static inline void
unlock_range(struct vm_area_struct *start, unsigned long limit)
{
struct mm_struct *mm = start->vm_mm;
struct vm_area_struct *tmp = start;
while (tmp && tmp->vm_start < limit) {
if (tmp->vm_flags & VM_LOCKED) {
mm->locked_vm -= vma_pages(tmp);
munlock_vma_pages_all(tmp);
}
tmp = tmp->vm_next;
}
}
/* Munmap is split into 2 main parts -- this part which finds /* Munmap is split into 2 main parts -- this part which finds
* what needs doing, and the areas themselves, which do the * what needs doing, and the areas themselves, which do the
* work. This now handles partial unmappings. * work. This now handles partial unmappings.
...@@ -2885,17 +2901,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, ...@@ -2885,17 +2901,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
/* /*
* unlock any mlock()ed ranges before detaching vmas * unlock any mlock()ed ranges before detaching vmas
*/ */
if (mm->locked_vm) { if (mm->locked_vm)
struct vm_area_struct *tmp = vma; unlock_range(vma, end);
while (tmp && tmp->vm_start < end) {
if (tmp->vm_flags & VM_LOCKED) {
mm->locked_vm -= vma_pages(tmp);
munlock_vma_pages_all(tmp);
}
tmp = tmp->vm_next;
}
}
/* Detach vmas from rbtree */ /* Detach vmas from rbtree */
if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
...@@ -3180,14 +3187,8 @@ void exit_mmap(struct mm_struct *mm) ...@@ -3180,14 +3187,8 @@ void exit_mmap(struct mm_struct *mm)
mmap_write_unlock(mm); mmap_write_unlock(mm);
} }
if (mm->locked_vm) { if (mm->locked_vm)
vma = mm->mmap; unlock_range(mm->mmap, ULONG_MAX);
while (vma) {
if (vma->vm_flags & VM_LOCKED)
munlock_vma_pages_all(vma);
vma = vma->vm_next;
}
}
arch_exit_mmap(mm); arch_exit_mmap(mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment