Commit cf51e86d authored by Liam R. Howlett's avatar Liam R. Howlett Committed by Andrew Morton

mm/mmap: don't use __vma_adjust() in shift_arg_pages()

Introduce shrink_vma() which uses the vma_prepare() and vma_complete()
functions to reduce the vma coverage.

Convert shift_arg_pages() to use expand_vma() and the new shrink_vma()
function.  Remove support from __vma_adjust() to reduce a vma size since
shift_arg_pages() is the only user that shrinks a VMA in this way.

Link: https://lkml.kernel.org/r/20230120162650.984577-46-Liam.Howlett@oracle.comSigned-off-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 7c9813e8
...@@ -699,7 +699,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) ...@@ -699,7 +699,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
/* /*
* cover the whole range: [new_start, old_end) * cover the whole range: [new_start, old_end)
*/ */
if (vma_adjust(&vmi, vma, new_start, old_end, vma->vm_pgoff)) if (vma_expand(&vmi, vma, new_start, old_end, vma->vm_pgoff, NULL))
return -ENOMEM; return -ENOMEM;
/* /*
...@@ -733,7 +733,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) ...@@ -733,7 +733,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
vma_prev(&vmi); vma_prev(&vmi);
/* Shrink the vma to just the new range */ /* Shrink the vma to just the new range */
return vma_adjust(&vmi, vma, new_start, new_end, vma->vm_pgoff); return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);
} }
/* /*
......
...@@ -2831,17 +2831,11 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node); ...@@ -2831,17 +2831,11 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
/* mmap.c */ /* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
extern int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand);
static inline int vma_adjust(struct vma_iterator *vmi,
struct vm_area_struct *vma, unsigned long start, unsigned long end,
pgoff_t pgoff)
{
return __vma_adjust(vmi, vma, start, end, pgoff, NULL);
}
extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff, unsigned long start, unsigned long end, pgoff_t pgoff,
struct vm_area_struct *next); struct vm_area_struct *next);
extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff);
extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi, extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags, struct anon_vma *, unsigned long end, unsigned long vm_flags, struct anon_vma *,
......
...@@ -682,6 +682,44 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, ...@@ -682,6 +682,44 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
nomem: nomem:
return -ENOMEM; return -ENOMEM;
} }
/*
* vma_shrink() - Reduce an existing VMAs memory area
* @vmi: The vma iterator
* @vma: The VMA to modify
* @start: The new start
* @end: The new end
*
* Returns: 0 on success, -ENOMEM otherwise
*/
int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff)
{
struct vma_prepare vp;
WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
if (vma_iter_prealloc(vmi))
return -ENOMEM;
init_vma_prep(&vp, vma);
vma_adjust_trans_huge(vma, start, end, 0);
vma_prepare(&vp);
if (vma->vm_start < start)
vma_iter_clear(vmi, vma->vm_start, start);
if (vma->vm_end > end)
vma_iter_clear(vmi, end, vma->vm_end);
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
vma_complete(&vp, vmi, vma->vm_mm);
validate_mm(vma->vm_mm);
return 0;
}
/* /*
* We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
* is already present in an i_mmap tree without adjusting the tree. * is already present in an i_mmap tree without adjusting the tree.
...@@ -797,14 +835,7 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma, ...@@ -797,14 +835,7 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
vma_prepare(&vma_prep); vma_prepare(&vma_prep);
if (vma->vm_start < start) if (start < vma->vm_start || end > vma->vm_end)
vma_iter_clear(vmi, vma->vm_start, start);
else if (start != vma->vm_start)
vma_changed = true;
if (vma->vm_end > end)
vma_iter_clear(vmi, end, vma->vm_end);
else if (end != vma->vm_end)
vma_changed = true; vma_changed = true;
vma->vm_start = start; vma->vm_start = start;
...@@ -817,8 +848,11 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma, ...@@ -817,8 +848,11 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (adjust_next) { if (adjust_next) {
next->vm_start += adjust_next; next->vm_start += adjust_next;
next->vm_pgoff += adjust_next >> PAGE_SHIFT; next->vm_pgoff += adjust_next >> PAGE_SHIFT;
if (adjust_next < 0) {
WARN_ON_ONCE(vma_changed);
vma_iter_store(vmi, next); vma_iter_store(vmi, next);
} }
}
vma_complete(&vma_prep, vmi, mm); vma_complete(&vma_prep, vmi, mm);
vma_iter_free(vmi); vma_iter_free(vmi);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment