Commit be77e999 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/radix: Use mm->task_size for boundary checking instead of addr_limit

We don't init addr_limit correctly for 32 bit applications. So default to using
mm->task_size for boundary condition checking. We use addr_limit to only control
free space search. This makes sure that we do the right thing with 32 bit
applications.

We should consolidate the usage of TASK_SIZE/mm->task_size and
mm->context.addr_limit later.

This partially reverts commit fbfef902 (powerpc/mm: Switch some
TASK_SIZE checks to use mm_context addr_limit).

Fixes: fbfef902 ("powerpc/mm: Switch some TASK_SIZE checks to use mm_context addr_limit")
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 8d1b48ef
...@@ -55,7 +55,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -55,7 +55,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (len & ~huge_page_mask(h)) if (len & ~huge_page_mask(h))
return -EINVAL; return -EINVAL;
if (len > mm->context.addr_limit) if (len > mm->task_size)
return -ENOMEM; return -ENOMEM;
if (flags & MAP_FIXED) { if (flags & MAP_FIXED) {
...@@ -67,7 +67,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -67,7 +67,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (addr) { if (addr) {
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (mm->context.addr_limit - len >= addr && if (mm->task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vma->vm_start))
return addr; return addr;
} }
......
...@@ -100,7 +100,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -100,7 +100,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (unlikely(addr > mm->context.addr_limit && addr < TASK_SIZE)) if (unlikely(addr > mm->context.addr_limit && addr < TASK_SIZE))
mm->context.addr_limit = TASK_SIZE; mm->context.addr_limit = TASK_SIZE;
if (len > mm->context.addr_limit - mmap_min_addr) if (len > mm->task_size - mmap_min_addr)
return -ENOMEM; return -ENOMEM;
if (flags & MAP_FIXED) if (flags & MAP_FIXED)
...@@ -109,7 +109,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -109,7 +109,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) { if (addr) {
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (mm->context.addr_limit - len >= addr && addr >= mmap_min_addr && if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vma->vm_start))
return addr; return addr;
} }
...@@ -143,7 +143,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, ...@@ -143,7 +143,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
mm->context.addr_limit = TASK_SIZE; mm->context.addr_limit = TASK_SIZE;
/* requested length too big for entire address space */ /* requested length too big for entire address space */
if (len > mm->context.addr_limit - mmap_min_addr) if (len > mm->task_size - mmap_min_addr)
return -ENOMEM; return -ENOMEM;
if (flags & MAP_FIXED) if (flags & MAP_FIXED)
...@@ -153,7 +153,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, ...@@ -153,7 +153,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
if (addr) { if (addr) {
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (mm->context.addr_limit - len >= addr && addr >= mmap_min_addr && if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vma->vm_start))
return addr; return addr;
} }
......
...@@ -292,8 +292,8 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm, ...@@ -292,8 +292,8 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
* Check if we need to reduce the range, or if we can * Check if we need to reduce the range, or if we can
* extend it to cover the next available slice. * extend it to cover the next available slice.
*/ */
if (addr >= mm->context.addr_limit) if (addr >= high_limit)
addr = mm->context.addr_limit; addr = high_limit;
else if (slice_scan_available(addr, available, 1, &next_end)) { else if (slice_scan_available(addr, available, 1, &next_end)) {
addr = next_end; addr = next_end;
goto next_slice; goto next_slice;
......
...@@ -197,8 +197,8 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) ...@@ -197,8 +197,8 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
/* Check parameters */ /* Check parameters */
if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) || if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
addr >= mm->context.addr_limit || len >= mm->context.addr_limit || addr >= mm->task_size || len >= mm->task_size ||
addr + len > mm->context.addr_limit) addr + len > mm->task_size)
return -EINVAL; return -EINVAL;
if (is_hugepage_only_range(mm, addr, len)) if (is_hugepage_only_range(mm, addr, len))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment