Commit e9acfc13 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: allow MAP_FIXED hugepage mappings

From: David Gibson <david@gibson.dropbear.id.au>

On PowerPC64 the "low" hugepage range (at 2-3G for use by 32-bit processes)
needs to be activated before it can be used.  hugetlb_get_unmapped_area()
automatically activates the range for hugepage mappings in 32-bit processes
which are not MAP_FIXED.  However for MAP_FIXED mmap()s, even at a suitable
address will fail if the region is not already activated, because there is
no suitable callback from the generic MAP_FIXED code path into the arch
code.

This patch corrects this problem and allows PPC64 to do MAP_FIXED hugepage
mappings in the low hugepage range.
parent ccfcbaed
......@@ -295,6 +295,16 @@ static int open_32bit_htlbpage_range(struct mm_struct *mm)
return 0;
}
int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
if (is_hugepage_high_range(addr, len))
return 0;
else if (is_hugepage_low_range(addr, len))
return open_32bit_htlbpage_range(current->mm);
return -EINVAL;
}
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
......
......@@ -38,10 +38,17 @@
#define TASK_HPAGE_END_32 (0xc0000000UL)
#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
#define is_hugepage_low_range(addr, len) \
(((addr) > (TASK_HPAGE_BASE_32-(len))) && ((addr) < TASK_HPAGE_END_32))
#define is_hugepage_high_range(addr, len) \
(((addr) > (TASK_HPAGE_BASE-(len))) && ((addr) < TASK_HPAGE_END))
#define is_hugepage_only_range(addr, len) \
( ((addr > (TASK_HPAGE_BASE-len)) && (addr < TASK_HPAGE_END)) || \
(current->mm->context.low_hpages && \
(addr > (TASK_HPAGE_BASE_32-len)) && (addr < TASK_HPAGE_END_32)) )
(is_hugepage_high_range((addr), (len)) || \
(current->mm->context.low_hpages \
&& is_hugepage_low_range((addr), (len))))
#define hugetlb_free_pgtables free_pgtables
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
......
......@@ -42,6 +42,13 @@ mark_mm_hugetlb(struct mm_struct *mm, struct vm_area_struct *vma)
#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
#endif
#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
#define prepare_hugepage_range(addr, len) \
is_aligned_hugepage_range(addr, len)
#else
int prepare_hugepage_range(unsigned long addr, unsigned long len);
#endif
#else /* !CONFIG_HUGETLB_PAGE */
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
......@@ -62,6 +69,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
#define mark_mm_hugetlb(mm, vma) do { } while (0)
#define follow_huge_pmd(mm, addr, pmd, write) 0
#define is_aligned_hugepage_range(addr, len) 0
#define prepare_hugepage_range(addr, len) (-EINVAL)
#define pmd_huge(x) 0
#define is_hugepage_only_range(addr, len) 0
#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
......
......@@ -807,9 +807,10 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
return -EINVAL;
if (file && is_file_hugepages(file)) {
/*
* Make sure that addr and length are properly aligned.
* Check if the given range is hugepage aligned, and
* can be made suitable for hugepages.
*/
ret = is_aligned_hugepage_range(addr, len);
ret = prepare_hugepage_range(addr, len);
} else {
/*
* Ensure that a normal request is not falling in a
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment