Commit d6eacedd authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/book3s: Use config independent helpers for page table walk

Even when we have HugeTLB and THP disabled, kernel linear map can still be
mapped with hugepages. This is only an issue with radix translation because hash
MMU doesn't map kernel linear range in linux page table and other kernel
map areas are not mapped using hugepage.

Add config independent helpers and put WARN_ON() when we don't expect things
to be mapped via hugepages.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 259a948c
......@@ -1350,5 +1350,26 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
return false;
}
/*
* Like pmd_huge() and pmd_large(), but works regardless of config options
*/
#define pmd_is_leaf pmd_is_leaf
static inline bool pmd_is_leaf(pmd_t pmd)
{
return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
}
#define pud_is_leaf pud_is_leaf
static inline bool pud_is_leaf(pud_t pud)
{
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
}
#define pgd_is_leaf pgd_is_leaf
static inline bool pgd_is_leaf(pgd_t pgd)
{
return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
......@@ -140,6 +140,30 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p)
}
#endif
#ifndef pmd_is_leaf
#define pmd_is_leaf pmd_is_leaf
static inline bool pmd_is_leaf(pmd_t pmd)
{
return false;
}
#endif
#ifndef pud_is_leaf
#define pud_is_leaf pud_is_leaf
static inline bool pud_is_leaf(pud_t pud)
{
return false;
}
#endif
#ifndef pgd_is_leaf
#define pgd_is_leaf pgd_is_leaf
static inline bool pgd_is_leaf(pgd_t pgd)
{
return false;
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
......@@ -10,8 +10,20 @@ extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
bool *is_thp, unsigned *hshift)
{
pte_t *pte;
VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
return __find_linux_pte(pgdir, ea, is_thp, hshift);
pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
#if defined(CONFIG_DEBUG_VM) && \
!(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
/*
* We should not find huge page if these configs are not enabled.
*/
if (hshift)
WARN_ON(*hshift);
#endif
return pte;
}
static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
......@@ -26,10 +38,22 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
bool *is_thp, unsigned *hshift)
{
pte_t *pte;
VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
VM_WARN(pgdir != current->mm->pgd,
"%s lock less page table lookup called on wrong mm\n", __func__);
return __find_linux_pte(pgdir, ea, is_thp, hshift);
pte = __find_linux_pte(pgdir, ea, is_thp, hshift);
#if defined(CONFIG_DEBUG_VM) && \
!(defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE))
/*
* We should not find huge page if these configs are not enabled.
*/
if (hshift)
WARN_ON(*hshift);
#endif
return pte;
}
#endif /* _ASM_POWERPC_PTE_WALK_H */
......@@ -363,12 +363,6 @@ static void kvmppc_pte_free(pte_t *ptep)
kmem_cache_free(kvm_pte_cache, ptep);
}
/* Like pmd_huge() and pmd_large(), but works regardless of config options */
static inline int pmd_is_leaf(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_PTE);
}
static pmd_t *kvmppc_pmd_alloc(void)
{
return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
......@@ -489,7 +483,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
if (!pud_present(*p))
continue;
if (pud_huge(*p)) {
if (pud_is_leaf(*p)) {
pud_clear(p);
} else {
pmd_t *pmd;
......@@ -588,7 +582,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
new_pud = pud_alloc_one(kvm->mm, gpa);
pmd = NULL;
if (pud && pud_present(*pud) && !pud_huge(*pud))
if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
pmd = pmd_offset(pud, gpa);
else if (level <= 1)
new_pmd = kvmppc_pmd_alloc();
......@@ -611,7 +605,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
new_pud = NULL;
}
pud = pud_offset(pgd, gpa);
if (pud_huge(*pud)) {
if (pud_is_leaf(*pud)) {
unsigned long hgpa = gpa & PUD_MASK;
/* Check if we raced and someone else has set the same thing */
......
......@@ -203,14 +203,14 @@ void radix__change_memory_range(unsigned long start, unsigned long end,
pudp = pud_alloc(&init_mm, pgdp, idx);
if (!pudp)
continue;
if (pud_huge(*pudp)) {
if (pud_is_leaf(*pudp)) {
ptep = (pte_t *)pudp;
goto update_the_pte;
}
pmdp = pmd_alloc(&init_mm, pudp, idx);
if (!pmdp)
continue;
if (pmd_huge(*pmdp)) {
if (pmd_is_leaf(*pmdp)) {
ptep = pmdp_ptep(pmdp);
goto update_the_pte;
}
......@@ -835,7 +835,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
if (!pmd_present(*pmd))
continue;
if (pmd_huge(*pmd)) {
if (pmd_is_leaf(*pmd)) {
split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
continue;
}
......@@ -860,7 +860,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
if (!pud_present(*pud))
continue;
if (pud_huge(*pud)) {
if (pud_is_leaf(*pud)) {
split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
continue;
}
......@@ -886,7 +886,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
if (!pgd_present(*pgd))
continue;
if (pgd_huge(*pgd)) {
if (pgd_is_leaf(*pgd)) {
split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
continue;
}
......
......@@ -340,10 +340,11 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
if (pgd_none(pgd))
return NULL;
if (pgd_huge(pgd)) {
if (pgd_is_leaf(pgd)) {
ret_pte = (pte_t *)pgdp;
goto out;
}
if (is_hugepd(__hugepd(pgd_val(pgd)))) {
hpdp = (hugepd_t *)&pgd;
goto out_huge;
......@@ -361,14 +362,16 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
if (pud_none(pud))
return NULL;
if (pud_huge(pud)) {
if (pud_is_leaf(pud)) {
ret_pte = (pte_t *)pudp;
goto out;
}
if (is_hugepd(__hugepd(pud_val(pud)))) {
hpdp = (hugepd_t *)&pud;
goto out_huge;
}
pdshift = PMD_SHIFT;
pmdp = pmd_offset(&pud, ea);
pmd = READ_ONCE(*pmdp);
......@@ -397,15 +400,12 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
ret_pte = (pte_t *)pmdp;
goto out;
}
/*
* pmd_large check below will handle the swap pmd pte
* we need to do both the check because they are config
* dependent.
*/
if (pmd_huge(pmd) || pmd_large(pmd)) {
if (pmd_is_leaf(pmd)) {
ret_pte = (pte_t *)pmdp;
goto out;
}
if (is_hugepd(__hugepd(pmd_val(pmd)))) {
hpdp = (hugepd_t *)&pmd;
goto out_huge;
......
......@@ -309,16 +309,20 @@ EXPORT_SYMBOL(__iounmap_at);
/* 4 level page table */
struct page *pgd_page(pgd_t pgd)
{
if (pgd_huge(pgd))
if (pgd_is_leaf(pgd)) {
VM_WARN_ON(!pgd_huge(pgd));
return pte_page(pgd_pte(pgd));
}
return virt_to_page(pgd_page_vaddr(pgd));
}
#endif
struct page *pud_page(pud_t pud)
{
if (pud_huge(pud))
if (pud_is_leaf(pud)) {
VM_WARN_ON(!pud_huge(pud));
return pte_page(pud_pte(pud));
}
return virt_to_page(pud_page_vaddr(pud));
}
......@@ -328,8 +332,10 @@ struct page *pud_page(pud_t pud)
*/
struct page *pmd_page(pmd_t pmd)
{
if (pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
if (pmd_is_leaf(pmd)) {
VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd)));
return pte_page(pmd_pte(pmd));
}
return virt_to_page(pmd_page_vaddr(pmd));
}
......
......@@ -277,7 +277,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
addr = start + i * PMD_SIZE;
if (!pmd_none(*pmd) && !pmd_huge(*pmd))
if (!pmd_none(*pmd) && !pmd_is_leaf(*pmd))
/* pmd exists */
walk_pte(st, pmd, addr);
else
......@@ -293,7 +293,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
addr = start + i * PUD_SIZE;
if (!pud_none(*pud) && !pud_huge(*pud))
if (!pud_none(*pud) && !pud_is_leaf(*pud))
/* pud exists */
walk_pmd(st, pud, addr);
else
......@@ -314,7 +314,7 @@ static void walk_pagetables(struct pg_state *st)
* the hash pagetable.
*/
for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
if (!pgd_none(*pgd) && !pgd_huge(*pgd))
if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd))
/* pgd exists */
walk_pud(st, pgd, addr);
else
......
......@@ -3098,7 +3098,7 @@ static void show_pte(unsigned long addr)
printf("pgd @ 0x%px\n", pgdir);
if (pgd_huge(*pgdp)) {
if (pgd_is_leaf(*pgdp)) {
format_pte(pgdp, pgd_val(*pgdp));
return;
}
......@@ -3111,7 +3111,7 @@ static void show_pte(unsigned long addr)
return;
}
if (pud_huge(*pudp)) {
if (pud_is_leaf(*pudp)) {
format_pte(pudp, pud_val(*pudp));
return;
}
......@@ -3125,7 +3125,7 @@ static void show_pte(unsigned long addr)
return;
}
if (pmd_huge(*pmdp)) {
if (pmd_is_leaf(*pmdp)) {
format_pte(pmdp, pmd_val(*pmdp));
return;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment