Commit b6661861 authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Linus Torvalds

mm: use vm_unmapped_area() on mips architecture

Update the mips arch_get_unmapped_area[_topdown] functions to make use of
vm_unmapped_area() instead of implementing a brute force search.

[akpm@linux-foundation.org: remove now-unused COLOUR_ALIGN_DOWN()]
Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cdc17344
...@@ -45,18 +45,6 @@ static unsigned long mmap_base(unsigned long rnd) ...@@ -45,18 +45,6 @@ static unsigned long mmap_base(unsigned long rnd)
return PAGE_ALIGN(TASK_SIZE - gap - rnd); return PAGE_ALIGN(TASK_SIZE - gap - rnd);
} }
static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
unsigned long pgoff)
{
unsigned long base = addr & ~shm_align_mask;
unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
if (base + off <= addr)
return base + off;
return base - off;
}
#define COLOUR_ALIGN(addr, pgoff) \ #define COLOUR_ALIGN(addr, pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \ ((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask)) (((pgoff) << PAGE_SHIFT) & shm_align_mask))
...@@ -71,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, ...@@ -71,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long addr = addr0; unsigned long addr = addr0;
int do_color_align; int do_color_align;
struct vm_unmapped_area_info info;
if (unlikely(len > TASK_SIZE)) if (unlikely(len > TASK_SIZE))
return -ENOMEM; return -ENOMEM;
...@@ -107,97 +96,31 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, ...@@ -107,97 +96,31 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
return addr; return addr;
} }
if (dir == UP) { info.length = len;
addr = mm->mmap_base; info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
if (do_color_align) info.align_offset = pgoff << PAGE_SHIFT;
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { if (dir == DOWN) {
/* At this point: (!vma || addr < vma->vm_end). */ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
if (TASK_SIZE - len < addr) info.low_limit = PAGE_SIZE;
return -ENOMEM; info.high_limit = mm->mmap_base;
if (!vma || addr + len <= vma->vm_start) addr = vm_unmapped_area(&info);
return addr;
addr = vma->vm_end; if (!(addr & ~PAGE_MASK))
if (do_color_align) return addr;
addr = COLOUR_ALIGN(addr, pgoff);
}
} else {
/* check if free_area_cache is useful for us */
if (len <= mm->cached_hole_size) {
mm->cached_hole_size = 0;
mm->free_area_cache = mm->mmap_base;
}
/*
* either no address requested, or the mapping can't fit into
* the requested address hole
*/
addr = mm->free_area_cache;
if (do_color_align) {
unsigned long base =
COLOUR_ALIGN_DOWN(addr - len, pgoff);
addr = base + len;
}
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr - len);
if (!vma || addr <= vma->vm_start) {
/* cache the address as a hint for next time */
return mm->free_area_cache = addr - len;
}
}
if (unlikely(mm->mmap_base < len))
goto bottomup;
addr = mm->mmap_base - len;
if (do_color_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
do {
/*
* Lookup failure means no vma is above this address,
* else if new region fits below vma->vm_start,
* return with success:
*/
vma = find_vma(mm, addr);
if (likely(!vma || addr + len <= vma->vm_start)) {
/* cache the address as a hint for next time */
return mm->free_area_cache = addr;
}
/* remember the largest hole we saw so far */
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
/* try just below the current vma->vm_start */
addr = vma->vm_start - len;
if (do_color_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
} while (likely(len < vma->vm_start));
bottomup:
/* /*
* A failed mmap() very likely causes application failure, * A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario * so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap() * can happen with large stack limits and large mmap()
* allocations. * allocations.
*/ */
mm->cached_hole_size = ~0UL;
mm->free_area_cache = TASK_UNMAPPED_BASE;
addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
/*
* Restore the topdown base:
*/
mm->free_area_cache = mm->mmap_base;
mm->cached_hole_size = ~0UL;
return addr;
} }
info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
return vm_unmapped_area(&info);
} }
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment