Commit c32c2cb2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "I thought we were done for 4.5, but then the 64k-page chaps came
  crawling out of the woodwork.  *sigh*

  The vmemmap fix I sent for -rc7 caused a regression with 64k pages and
  sparsemem and at some point during the release cycle the new hugetlb
  code using contiguous ptes started failing the libhugetlbfs tests with
  64k pages enabled.

  So here are a couple of patches that fix the vmemmap alignment and
  disable the new hugetlb page sizes whilst a proper fix is being
  developed:

   - Temporarily disable huge pages built using contiguous ptes

   - Ensure vmemmap region is sufficiently aligned for sparsemem
     sections"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: hugetlb: partial revert of 66b3923a
  arm64: account for sparsemem section alignment when choosing vmemmap offset
parents 2da33f9f ff792584
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
* fixed mappings and modules * fixed mappings and modules
*/ */
#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE) #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
#ifndef CONFIG_KASAN #ifndef CONFIG_KASAN
#define VMALLOC_START (VA_START) #define VMALLOC_START (VA_START)
...@@ -52,7 +52,8 @@ ...@@ -52,7 +52,8 @@
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define VMEMMAP_START (VMALLOC_END + SZ_64K) #define VMEMMAP_START (VMALLOC_END + SZ_64K)
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) #define vmemmap ((struct page *)VMEMMAP_START - \
SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
#define FIRST_USER_ADDRESS 0UL #define FIRST_USER_ADDRESS 0UL
......
...@@ -306,10 +306,6 @@ static __init int setup_hugepagesz(char *opt) ...@@ -306,10 +306,6 @@ static __init int setup_hugepagesz(char *opt)
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) { } else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else if (ps == (PAGE_SIZE * CONT_PTES)) {
hugetlb_add_hstate(CONT_PTE_SHIFT);
} else if (ps == (PMD_SIZE * CONT_PMDS)) {
hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
} else { } else {
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
return 0; return 0;
...@@ -317,13 +313,3 @@ static __init int setup_hugepagesz(char *opt) ...@@ -317,13 +313,3 @@ static __init int setup_hugepagesz(char *opt)
return 1; return 1;
} }
__setup("hugepagesz=", setup_hugepagesz); __setup("hugepagesz=", setup_hugepagesz);
#ifdef CONFIG_ARM64_64K_PAGES
static __init int add_default_hugepagesz(void)
{
if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
hugetlb_add_hstate(CONT_PMD_SHIFT);
return 0;
}
arch_initcall(add_default_hugepagesz);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment