Commit 9e953cda authored by Alexandre Ghiti's avatar Alexandre Ghiti Committed by Paul Walmsley

riscv: Introduce huge page support for 32/64bit kernel

This patch implements both 4MB huge page support for 32bit kernel
and 2MB/1GB huge pages support for 64bit kernel.
Signed-off-by: default avatarAlexandre Ghiti <alex@ghiti.fr>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarPaul Walmsley <paul.walmsley@sifive.com>
parent 3876d4a3
......@@ -50,6 +50,8 @@ config RISCV
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MMIOWB
select HAVE_EBPF_JIT if 64BIT
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
config MMU
def_bool y
......@@ -64,6 +66,12 @@ config PAGE_OFFSET
default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
config ARCH_WANT_GENERAL_HUGETLB
def_bool y
config SYS_SUPPORTS_HUGETLBFS
def_bool y
config STACKTRACE_SUPPORT
def_bool y
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_RISCV_HUGETLB_H
#define _ASM_RISCV_HUGETLB_H
#include <asm-generic/hugetlb.h>
#include <asm/page.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len) {
return 0;
}
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
#endif /* _ASM_RISCV_HUGETLB_H */
......@@ -16,6 +16,16 @@
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
#ifdef CONFIG_64BIT
#define HUGE_MAX_HSTATE 2
#else
#define HUGE_MAX_HSTATE 1
#endif
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
/*
* PAGE_OFFSET -- the first address of the first page of memory.
* When not using MMU this corresponds to the first free page in
......
......@@ -113,7 +113,6 @@ static inline void pmd_clear(pmd_t *pmdp)
set_pmd(pmdp, __pmd(0));
}
static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
{
return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
......@@ -250,6 +249,11 @@ static inline pte_t pte_mkspecial(pte_t pte)
return __pte(pte_val(pte) | _PAGE_SPECIAL);
}
static inline pte_t pte_mkhuge(pte_t pte)
{
return pte;
}
/* Modify page protection bits */
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
......@@ -409,7 +413,7 @@ static inline void pgtable_cache_init(void)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
/*
* Task size is 0x40000000000 for RV64 or 0xb800000 for RV32.
* Task size is 0x4000000000 for RV64 or 0xb800000 for RV32.
* Note that PGDIR_SIZE must evenly divide TASK_SIZE.
*/
#ifdef CONFIG_64BIT
......
......@@ -12,3 +12,5 @@ obj-y += ioremap.o
obj-y += cacheflush.o
obj-y += context.o
obj-y += sifive_l2_cache.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
// SPDX-License-Identifier: GPL-2.0
#include <linux/hugetlb.h>
#include <linux/err.h>
int pud_huge(pud_t pud)
{
return pud_present(pud) &&
(pud_val(pud) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
}
int pmd_huge(pmd_t pmd)
{
return pmd_present(pmd) &&
(pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
}
static __init int setup_hugepagesz(char *opt)
{
unsigned long ps = memparse(opt, &opt);
if (ps == HPAGE_SIZE) {
hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
} else if (IS_ENABLED(CONFIG_64BIT) && ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else {
hugetlb_bad_size();
pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20);
return 0;
}
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
#ifdef CONFIG_CONTIG_ALLOC
static __init int gigantic_pages_init(void)
{
/* With CONTIG_ALLOC, we can allocate gigantic pages at runtime */
if (IS_ENABLED(CONFIG_64BIT) && !size_to_hstate(1UL << PUD_SHIFT))
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
return 0;
}
arch_initcall(gigantic_pages_init);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment