Commit a5271c10 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] remove acct arg from do_munmap

An acct flag was added to do_munmap, true everywhere but in mremap's
move_vma: instead of updating the arch and driver sources, revert that
that change and temporarily mask VM_ACCOUNT around that one do_munmap.

Also, noticed that do_mremap fails needlessly if both shrinking _and_
moving a mapping: update old_len to pass vm area boundaries test.
parent d9acf5fe
...@@ -430,7 +430,7 @@ static inline unsigned long do_mmap(struct file *file, unsigned long addr, ...@@ -430,7 +430,7 @@ static inline unsigned long do_mmap(struct file *file, unsigned long addr,
return ret; return ret;
} }
extern int do_munmap(struct mm_struct *, unsigned long, size_t, int); extern int do_munmap(struct mm_struct *, unsigned long, size_t);
extern unsigned long do_brk(unsigned long, unsigned long); extern unsigned long do_brk(unsigned long, unsigned long);
......
...@@ -671,7 +671,7 @@ asmlinkage long sys_shmdt (char *shmaddr) ...@@ -671,7 +671,7 @@ asmlinkage long sys_shmdt (char *shmaddr)
shmdnext = shmd->vm_next; shmdnext = shmd->vm_next;
if (shmd->vm_ops == &shm_vm_ops if (shmd->vm_ops == &shm_vm_ops
&& shmd->vm_start - (shmd->vm_pgoff << PAGE_SHIFT) == (ulong) shmaddr) { && shmd->vm_start - (shmd->vm_pgoff << PAGE_SHIFT) == (ulong) shmaddr) {
do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start, 1); do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start);
retval = 0; retval = 0;
} }
} }
......
...@@ -197,7 +197,7 @@ asmlinkage unsigned long sys_brk(unsigned long brk) ...@@ -197,7 +197,7 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
/* Always allow shrinking brk. */ /* Always allow shrinking brk. */
if (brk <= mm->brk) { if (brk <= mm->brk) {
if (!do_munmap(mm, newbrk, oldbrk-newbrk, 1)) if (!do_munmap(mm, newbrk, oldbrk-newbrk))
goto set_brk; goto set_brk;
goto out; goto out;
} }
...@@ -517,7 +517,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, ...@@ -517,7 +517,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
munmap_back: munmap_back:
vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
if (vma && vma->vm_start < addr + len) { if (vma && vma->vm_start < addr + len) {
if (do_munmap(mm, addr, len, 1)) if (do_munmap(mm, addr, len))
return -ENOMEM; return -ENOMEM;
goto munmap_back; goto munmap_back;
} }
...@@ -945,8 +945,7 @@ static void unmap_region(struct mm_struct *mm, ...@@ -945,8 +945,7 @@ static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *mpnt, struct vm_area_struct *mpnt,
struct vm_area_struct *prev, struct vm_area_struct *prev,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end)
int acct)
{ {
mmu_gather_t *tlb; mmu_gather_t *tlb;
...@@ -960,7 +959,7 @@ static void unmap_region(struct mm_struct *mm, ...@@ -960,7 +959,7 @@ static void unmap_region(struct mm_struct *mm,
unmap_page_range(tlb, mpnt, from, to); unmap_page_range(tlb, mpnt, from, to);
if (acct && (mpnt->vm_flags & VM_ACCOUNT)) { if (mpnt->vm_flags & VM_ACCOUNT) {
len = to - from; len = to - from;
vm_unacct_memory(len >> PAGE_SHIFT); vm_unacct_memory(len >> PAGE_SHIFT);
} }
...@@ -1041,7 +1040,7 @@ static int splitvma(struct mm_struct *mm, struct vm_area_struct *mpnt, unsigned ...@@ -1041,7 +1040,7 @@ static int splitvma(struct mm_struct *mm, struct vm_area_struct *mpnt, unsigned
* work. This now handles partial unmappings. * work. This now handles partial unmappings.
* Jeremy Fitzhardine <jeremy@sw.oz.au> * Jeremy Fitzhardine <jeremy@sw.oz.au>
*/ */
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, int acct) int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
{ {
unsigned long end; unsigned long end;
struct vm_area_struct *mpnt, *prev, *last; struct vm_area_struct *mpnt, *prev, *last;
...@@ -1085,7 +1084,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, int acct) ...@@ -1085,7 +1084,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, int acct)
*/ */
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
mpnt = touched_by_munmap(mm, mpnt, prev, end); mpnt = touched_by_munmap(mm, mpnt, prev, end);
unmap_region(mm, mpnt, prev, start, end, acct); unmap_region(mm, mpnt, prev, start, end);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
/* Fix up all other VM information */ /* Fix up all other VM information */
...@@ -1100,7 +1099,7 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len) ...@@ -1100,7 +1099,7 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len)
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
ret = do_munmap(mm, addr, len, 1); ret = do_munmap(mm, addr, len);
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
return ret; return ret;
} }
...@@ -1137,7 +1136,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) ...@@ -1137,7 +1136,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
munmap_back: munmap_back:
vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
if (vma && vma->vm_start < addr + len) { if (vma && vma->vm_start < addr + len) {
if (do_munmap(mm, addr, len, 1)) if (do_munmap(mm, addr, len))
return -ENOMEM; return -ENOMEM;
goto munmap_back; goto munmap_back;
} }
......
...@@ -150,6 +150,7 @@ static inline unsigned long move_vma(struct vm_area_struct * vma, ...@@ -150,6 +150,7 @@ static inline unsigned long move_vma(struct vm_area_struct * vma,
struct mm_struct * mm = vma->vm_mm; struct mm_struct * mm = vma->vm_mm;
struct vm_area_struct * new_vma, * next, * prev; struct vm_area_struct * new_vma, * next, * prev;
int allocated_vma; int allocated_vma;
int split = 0;
new_vma = NULL; new_vma = NULL;
next = find_vma_prev(mm, new_addr, &prev); next = find_vma_prev(mm, new_addr, &prev);
...@@ -210,11 +211,26 @@ static inline unsigned long move_vma(struct vm_area_struct * vma, ...@@ -210,11 +211,26 @@ static inline unsigned long move_vma(struct vm_area_struct * vma,
new_vma->vm_ops->open(new_vma); new_vma->vm_ops->open(new_vma);
insert_vm_struct(current->mm, new_vma); insert_vm_struct(current->mm, new_vma);
} }
/*
* The old VMA has been accounted for, /* Conceal VM_ACCOUNT so old reservation is not undone */
* don't double account if (vma->vm_flags & VM_ACCOUNT) {
*/ vma->vm_flags &= ~VM_ACCOUNT;
do_munmap(current->mm, addr, old_len, 0); if (addr > vma->vm_start) {
if (addr + old_len < vma->vm_end)
split = 1;
} else if (addr + old_len == vma->vm_end)
vma = NULL; /* it will be removed */
} else
vma = NULL; /* nothing more to do */
do_munmap(current->mm, addr, old_len);
/* Restore VM_ACCOUNT if one or two pieces of vma left */
if (vma) {
vma->vm_flags |= VM_ACCOUNT;
if (split)
vma->vm_next->vm_flags |= VM_ACCOUNT;
}
current->mm->total_vm += new_len >> PAGE_SHIFT; current->mm->total_vm += new_len >> PAGE_SHIFT;
if (new_vma->vm_flags & VM_LOCKED) { if (new_vma->vm_flags & VM_LOCKED) {
current->mm->locked_vm += new_len >> PAGE_SHIFT; current->mm->locked_vm += new_len >> PAGE_SHIFT;
...@@ -272,7 +288,7 @@ unsigned long do_mremap(unsigned long addr, ...@@ -272,7 +288,7 @@ unsigned long do_mremap(unsigned long addr,
if ((addr <= new_addr) && (addr+old_len) > new_addr) if ((addr <= new_addr) && (addr+old_len) > new_addr)
goto out; goto out;
do_munmap(current->mm, new_addr, new_len, 1); do_munmap(current->mm, new_addr, new_len);
} }
/* /*
...@@ -282,9 +298,10 @@ unsigned long do_mremap(unsigned long addr, ...@@ -282,9 +298,10 @@ unsigned long do_mremap(unsigned long addr,
*/ */
ret = addr; ret = addr;
if (old_len >= new_len) { if (old_len >= new_len) {
do_munmap(current->mm, addr+new_len, old_len - new_len, 1); do_munmap(current->mm, addr+new_len, old_len - new_len);
if (!(flags & MREMAP_FIXED) || (new_addr == addr)) if (!(flags & MREMAP_FIXED) || (new_addr == addr))
goto out; goto out;
old_len = new_len;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment