Commit f75c28d8 authored by James Hogan's avatar James Hogan

metag: hugetlb: convert to vm_unmapped_area()

Convert hugetlb_get_unmapped_area_new_pmd() to use vm_unmapped_area()
rather than searching the virtual address space itself. This fixes the
following errors in linux-next due to the specified members being
removed after other architectures have already been converted:

arch/metag/mm/hugetlbpage.c: In function 'hugetlb_get_unmapped_area_new_pmd':
arch/metag/mm/hugetlbpage.c:199: error: 'struct mm_struct' has no member named 'cached_hole_size'
arch/metag/mm/hugetlbpage.c:200: error: 'struct mm_struct' has no member named 'free_area_cache'
arch/metag/mm/hugetlbpage.c:215: error: 'struct mm_struct' has no member named 'cached_hole_size'
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Acked-by: default avatarMichel Lespinasse <walken@google.com>
parent c838e72a
...@@ -192,43 +192,15 @@ hugetlb_get_unmapped_area_existing(unsigned long len) ...@@ -192,43 +192,15 @@ hugetlb_get_unmapped_area_existing(unsigned long len)
static unsigned long static unsigned long
hugetlb_get_unmapped_area_new_pmd(unsigned long len) hugetlb_get_unmapped_area_new_pmd(unsigned long len)
{ {
struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info;
struct vm_area_struct *vma;
unsigned long start_addr, addr; info.flags = 0;
info.length = len;
if (ALIGN_HUGEPT(len) > mm->cached_hole_size) info.low_limit = TASK_UNMAPPED_BASE;
start_addr = mm->free_area_cache; info.high_limit = TASK_SIZE;
else info.align_mask = PAGE_MASK & HUGEPT_MASK;
start_addr = TASK_UNMAPPED_BASE; info.align_offset = 0;
return vm_unmapped_area(&info);
new_search:
addr = ALIGN_HUGEPT(start_addr);
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
if (TASK_SIZE - len < addr) {
/*
* Start a new search - just in case we missed
* some holes.
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = TASK_UNMAPPED_BASE;
mm->cached_hole_size = 0;
goto new_search;
}
return 0;
}
/* skip ahead if we've aligned right over some vmas */
if (vma && vma->vm_end <= addr)
continue;
if (!vma || ALIGN_HUGEPT(addr + len) <= vma->vm_start) {
#if HPAGE_SHIFT < HUGEPT_SHIFT
if (len & HUGEPT_MASK)
mm->context.part_huge = addr + len;
#endif
return addr;
}
addr = ALIGN_HUGEPT(vma->vm_end);
}
} }
unsigned long unsigned long
...@@ -266,11 +238,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -266,11 +238,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* Find an unmapped naturally aligned set of 4MB blocks that we can use * Find an unmapped naturally aligned set of 4MB blocks that we can use
* for huge pages. * for huge pages.
*/ */
addr = hugetlb_get_unmapped_area_new_pmd(len); return hugetlb_get_unmapped_area_new_pmd(len);
if (likely(addr))
return addr;
return -EINVAL;
} }
#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment