Commit f67d9b15 authored by Bob Liu's avatar Bob Liu Committed by Linus Torvalds

nommu: add page alignment to mmap

Currently on nommu arch mmap(),mremap() and munmap() doesn't do
page_align() which isn't consist with mmu arch and cause some issues.

First, some drivers' mmap() function depends on vma->vm_end - vma->start
is page aligned which is true on mmu arch but not on nommu.  eg: uvc
camera driver.

Second munmap() may return -EINVAL[split file] error in cases when end is
not page aligned(passed into from userspace) but vma->vm_end is aligned
dure to split or driver's mmap() ops.

Add page alignment to fix those issues.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarBob Liu <lliubbo@gmail.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Greg Ungerer <gerg@snapgear.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent eb709b0d
...@@ -1124,7 +1124,7 @@ static int do_mmap_private(struct vm_area_struct *vma, ...@@ -1124,7 +1124,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
unsigned long capabilities) unsigned long capabilities)
{ {
struct page *pages; struct page *pages;
unsigned long total, point, n, rlen; unsigned long total, point, n;
void *base; void *base;
int ret, order; int ret, order;
...@@ -1148,13 +1148,12 @@ static int do_mmap_private(struct vm_area_struct *vma, ...@@ -1148,13 +1148,12 @@ static int do_mmap_private(struct vm_area_struct *vma,
* make a private copy of the data and map that instead */ * make a private copy of the data and map that instead */
} }
rlen = PAGE_ALIGN(len);
/* allocate some memory to hold the mapping /* allocate some memory to hold the mapping
* - note that this may not return a page-aligned address if the object * - note that this may not return a page-aligned address if the object
* we're allocating is smaller than a page * we're allocating is smaller than a page
*/ */
order = get_order(rlen); order = get_order(len);
kdebug("alloc order %d for %lx", order, len); kdebug("alloc order %d for %lx", order, len);
pages = alloc_pages(GFP_KERNEL, order); pages = alloc_pages(GFP_KERNEL, order);
...@@ -1164,7 +1163,7 @@ static int do_mmap_private(struct vm_area_struct *vma, ...@@ -1164,7 +1163,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
total = 1 << order; total = 1 << order;
atomic_long_add(total, &mmap_pages_allocated); atomic_long_add(total, &mmap_pages_allocated);
point = rlen >> PAGE_SHIFT; point = len >> PAGE_SHIFT;
/* we allocated a power-of-2 sized page set, so we may want to trim off /* we allocated a power-of-2 sized page set, so we may want to trim off
* the excess */ * the excess */
...@@ -1186,7 +1185,7 @@ static int do_mmap_private(struct vm_area_struct *vma, ...@@ -1186,7 +1185,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
base = page_address(pages); base = page_address(pages);
region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
region->vm_start = (unsigned long) base; region->vm_start = (unsigned long) base;
region->vm_end = region->vm_start + rlen; region->vm_end = region->vm_start + len;
region->vm_top = region->vm_start + (total << PAGE_SHIFT); region->vm_top = region->vm_start + (total << PAGE_SHIFT);
vma->vm_start = region->vm_start; vma->vm_start = region->vm_start;
...@@ -1202,15 +1201,15 @@ static int do_mmap_private(struct vm_area_struct *vma, ...@@ -1202,15 +1201,15 @@ static int do_mmap_private(struct vm_area_struct *vma,
old_fs = get_fs(); old_fs = get_fs();
set_fs(KERNEL_DS); set_fs(KERNEL_DS);
ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos); ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
set_fs(old_fs); set_fs(old_fs);
if (ret < 0) if (ret < 0)
goto error_free; goto error_free;
/* clear the last little bit */ /* clear the last little bit */
if (ret < rlen) if (ret < len)
memset(base + ret, 0, rlen - ret); memset(base + ret, 0, len - ret);
} }
...@@ -1259,6 +1258,7 @@ unsigned long do_mmap_pgoff(struct file *file, ...@@ -1259,6 +1258,7 @@ unsigned long do_mmap_pgoff(struct file *file,
/* we ignore the address hint */ /* we ignore the address hint */
addr = 0; addr = 0;
len = PAGE_ALIGN(len);
/* we've determined that we can make the mapping, now translate what we /* we've determined that we can make the mapping, now translate what we
* now know into VMA flags */ * now know into VMA flags */
...@@ -1635,14 +1635,17 @@ static int shrink_vma(struct mm_struct *mm, ...@@ -1635,14 +1635,17 @@ static int shrink_vma(struct mm_struct *mm,
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long end = start + len; unsigned long end;
int ret; int ret;
kenter(",%lx,%zx", start, len); kenter(",%lx,%zx", start, len);
len = PAGE_ALIGN(len);
if (len == 0) if (len == 0)
return -EINVAL; return -EINVAL;
end = start + len;
/* find the first potentially overlapping VMA */ /* find the first potentially overlapping VMA */
vma = find_vma(mm, start); vma = find_vma(mm, start);
if (!vma) { if (!vma) {
...@@ -1762,6 +1765,8 @@ unsigned long do_mremap(unsigned long addr, ...@@ -1762,6 +1765,8 @@ unsigned long do_mremap(unsigned long addr,
struct vm_area_struct *vma; struct vm_area_struct *vma;
/* insanity checks first */ /* insanity checks first */
old_len = PAGE_ALIGN(old_len);
new_len = PAGE_ALIGN(new_len);
if (old_len == 0 || new_len == 0) if (old_len == 0 || new_len == 0)
return (unsigned long) -EINVAL; return (unsigned long) -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment