Commit 6a72dc03 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s/hash: Fix 128TB-512TB virtual address boundary case allocation

When allocating VA space with a hint that crosses 128TB, the SLB
addr_limit variable is not expanded if addr is not > 128TB, but the
slice allocation looks at task_size, which is 512TB. This results in
slice_check_fit() incorrectly succeeding because the slice_count
truncates off bit 128 of the requested mask, so the comparison to the
available mask succeeds.

Fix this by using mm->context.addr_limit instead of mm->task_size for
testing allocation limits. This causes such allocations to fail.

Fixes: f4ea6dcb ("powerpc/mm: Enable mappings above 128TB")
Cc: stable@vger.kernel.org # v4.12+
Reported-by: default avatarFlorian Weimer <fweimer@redhat.com>
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Reviewed-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 7ece3709
......@@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
{
struct vm_area_struct *vma;
if ((mm->task_size - len) < addr)
if ((mm->context.addr_limit - len) < addr)
return 0;
vma = find_vma(mm, addr);
return (!vma || (addr + len) <= vm_start_gap(vma));
......@@ -133,7 +133,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
if (!slice_low_has_vma(mm, i))
ret->low_slices |= 1u << i;
if (mm->task_size <= SLICE_LOW_TOP)
if (mm->context.addr_limit <= SLICE_LOW_TOP)
return;
for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++)
......@@ -412,25 +412,31 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
struct slice_mask compat_mask;
int fixed = (flags & MAP_FIXED);
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
unsigned long page_size = 1UL << pshift;
struct mm_struct *mm = current->mm;
unsigned long newaddr;
unsigned long high_limit;
/*
* Check if we need to expland slice area.
*/
if (unlikely(addr >= mm->context.addr_limit &&
mm->context.addr_limit != TASK_SIZE)) {
mm->context.addr_limit = TASK_SIZE;
high_limit = DEFAULT_MAP_WINDOW;
if (addr >= high_limit)
high_limit = TASK_SIZE;
if (len > high_limit)
return -ENOMEM;
if (len & (page_size - 1))
return -EINVAL;
if (fixed) {
if (addr & (page_size - 1))
return -EINVAL;
if (addr > high_limit - len)
return -ENOMEM;
}
if (high_limit > mm->context.addr_limit) {
mm->context.addr_limit = high_limit;
on_each_cpu(slice_flush_segments, mm, 1);
}
/*
* This mmap request can allocate upt to 512TB
*/
if (addr >= DEFAULT_MAP_WINDOW)
high_limit = mm->context.addr_limit;
else
high_limit = DEFAULT_MAP_WINDOW;
/*
* init different masks
*/
......@@ -446,27 +452,19 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* Sanity checks */
BUG_ON(mm->task_size == 0);
BUG_ON(mm->context.addr_limit == 0);
VM_BUG_ON(radix_enabled());
slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
addr, len, flags, topdown);
if (len > mm->task_size)
return -ENOMEM;
if (len & ((1ul << pshift) - 1))
return -EINVAL;
if (fixed && (addr & ((1ul << pshift) - 1)))
return -EINVAL;
if (fixed && addr > (mm->task_size - len))
return -ENOMEM;
/* If hint, make sure it matches our alignment restrictions */
if (!fixed && addr) {
addr = _ALIGN_UP(addr, 1ul << pshift);
addr = _ALIGN_UP(addr, page_size);
slice_dbg(" aligned addr=%lx\n", addr);
/* Ignore hint if it's too large or overlaps a VMA */
if (addr > mm->task_size - len ||
if (addr > high_limit - len ||
!slice_area_is_free(mm, addr, len))
addr = 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment