Commit e054680b authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] fix shared and private accounting

do_mmap_pgoff's (file == NULL) check was incorrect: it caused shared
MAP_ANONYMOUS objects to be counted twice (again in shmem_file_setup),
and again on fork(); whereas the equivalent shared /dev/zero objects
were correctly counted.  Conversely, a private readonly file mapping
was (correctly) not counted, but still not counted when mprotected to
writable: mprotect_fixup had pointless "charged = 0" changes, now it
does vm_enough_memory checking when private is first made writable
(but later we may want to refine behaviour on a noreserve mapping).

Also changed correct (flags & MAP_SHARED) test in do_mmap_pgoff to
equivalent (vm_flags & VM_SHARED) test: because do_mmap_pgoff is
dealing with vm_flags rather than the input flags by that stage.
parent 3f7583d3
...@@ -527,16 +527,14 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, ...@@ -527,16 +527,14 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
> current->rlim[RLIMIT_AS].rlim_cur) > current->rlim[RLIMIT_AS].rlim_cur)
return -ENOMEM; return -ENOMEM;
if (sysctl_overcommit_memory > 1) if (!(flags & MAP_NORESERVE) || sysctl_overcommit_memory > 1) {
flags &= ~MAP_NORESERVE; if ((vm_flags & (VM_SHARED|VM_WRITE)) == VM_WRITE) {
/* Private writable mapping: check memory availability */
/* Private writable mapping? Check memory availability.. */ charged = len >> PAGE_SHIFT;
if ((((vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE) || if (!vm_enough_memory(charged))
(file == NULL)) && !(flags & MAP_NORESERVE)) { return -ENOMEM;
charged = len >> PAGE_SHIFT; vm_flags |= VM_ACCOUNT;
if (!vm_enough_memory(charged)) }
return -ENOMEM;
vm_flags |= VM_ACCOUNT;
} }
/* Can we just expand an old anonymous mapping? */ /* Can we just expand an old anonymous mapping? */
...@@ -579,7 +577,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, ...@@ -579,7 +577,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
error = file->f_op->mmap(file, vma); error = file->f_op->mmap(file, vma);
if (error) if (error)
goto unmap_and_free_vma; goto unmap_and_free_vma;
} else if (flags & MAP_SHARED) { } else if (vm_flags & VM_SHARED) {
error = shmem_zero_setup(vma); error = shmem_zero_setup(vma);
if (error) if (error)
goto free_vma; goto free_vma;
......
...@@ -257,6 +257,22 @@ static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** ...@@ -257,6 +257,22 @@ static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct **
*pprev = vma; *pprev = vma;
return 0; return 0;
} }
/*
* If we make a private mapping writable we increase our commit;
* but (without finer accounting) cannot reduce our commit if we
* make it unwritable again.
*
* FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
* a MAP_NORESERVE private mapping to writable will now reserve.
*/
if ((newflags & VM_WRITE) &&
!(vma->vm_flags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
charged = (end - start) >> PAGE_SHIFT;
if (!vm_enough_memory(charged))
return -ENOMEM;
newflags |= VM_ACCOUNT;
}
newprot = protection_map[newflags & 0xf]; newprot = protection_map[newflags & 0xf];
if (start == vma->vm_start) { if (start == vma->vm_start) {
if (end == vma->vm_end) if (end == vma->vm_end)
...@@ -267,19 +283,10 @@ static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** ...@@ -267,19 +283,10 @@ static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct **
error = mprotect_fixup_end(vma, pprev, start, newflags, newprot); error = mprotect_fixup_end(vma, pprev, start, newflags, newprot);
else else
error = mprotect_fixup_middle(vma, pprev, start, end, newflags, newprot); error = mprotect_fixup_middle(vma, pprev, start, end, newflags, newprot);
if (error) { if (error) {
if (newflags & PROT_WRITE) vm_unacct_memory(charged);
vm_unacct_memory(charged);
return error; return error;
} }
/*
* Delayed accounting for reduction of memory use - done last to
* avoid allocation races
*/
if (charged && !(newflags & PROT_WRITE))
vm_unacct_memory(charged);
change_protection(vma, start, end, newprot); change_protection(vma, start, end, newprot);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment