Commit 0a845e0f authored by Peter Xu's avatar Peter Xu Committed by Andrew Morton

mm/treewide: replace pud_large() with pud_leaf()

pud_large() is always defined as pud_leaf().  Merge their usages.  Chose
pud_leaf() because pud_leaf() is a global API, while pud_large() is not.

Link: https://lkml.kernel.org/r/20240305043750.93762-9-peterx@redhat.comSigned-off-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 2f709f7b
...@@ -130,7 +130,7 @@ void set_pud_at(struct mm_struct *mm, unsigned long addr, ...@@ -130,7 +130,7 @@ void set_pud_at(struct mm_struct *mm, unsigned long addr,
WARN_ON(pte_hw_valid(pud_pte(*pudp))); WARN_ON(pte_hw_valid(pud_pte(*pudp)));
assert_spin_locked(pud_lockptr(mm, pudp)); assert_spin_locked(pud_lockptr(mm, pudp));
WARN_ON(!(pud_large(pud))); WARN_ON(!(pud_leaf(pud)));
#endif #endif
trace_hugepage_set_pud(addr, pud_val(pud)); trace_hugepage_set_pud(addr, pud_val(pud));
return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud)); return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
......
...@@ -366,7 +366,7 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e ...@@ -366,7 +366,7 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
} }
pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY); pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
pud_populate(&init_mm, pud, pmd); pud_populate(&init_mm, pud, pmd);
} else if (pud_large(*pud)) { } else if (pud_leaf(*pud)) {
continue; continue;
} }
pgtable_pmd_populate(pud, addr, next, mode); pgtable_pmd_populate(pud, addr, next, mode);
......
...@@ -730,7 +730,7 @@ static inline int pud_bad(pud_t pud) ...@@ -730,7 +730,7 @@ static inline int pud_bad(pud_t pud)
{ {
unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK; unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud)) if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud))
return 1; return 1;
if (type < _REGION_ENTRY_TYPE_R3) if (type < _REGION_ENTRY_TYPE_R3)
return 0; return 0;
...@@ -1400,7 +1400,7 @@ static inline unsigned long pud_deref(pud_t pud) ...@@ -1400,7 +1400,7 @@ static inline unsigned long pud_deref(pud_t pud)
unsigned long origin_mask; unsigned long origin_mask;
origin_mask = _REGION_ENTRY_ORIGIN; origin_mask = _REGION_ENTRY_ORIGIN;
if (pud_large(pud)) if (pud_leaf(pud))
origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
return (unsigned long)__va(pud_val(pud) & origin_mask); return (unsigned long)__va(pud_val(pud) & origin_mask);
} }
......
...@@ -598,7 +598,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) ...@@ -598,7 +598,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
pud = pud_offset(p4d, vmaddr); pud = pud_offset(p4d, vmaddr);
VM_BUG_ON(pud_none(*pud)); VM_BUG_ON(pud_none(*pud));
/* large puds cannot yet be handled */ /* large puds cannot yet be handled */
if (pud_large(*pud)) if (pud_leaf(*pud))
return -EFAULT; return -EFAULT;
pmd = pmd_offset(pud, vmaddr); pmd = pmd_offset(pud, vmaddr);
VM_BUG_ON(pmd_none(*pmd)); VM_BUG_ON(pmd_none(*pmd));
......
...@@ -224,7 +224,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, ...@@ -224,7 +224,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
if (p4d_present(*p4dp)) { if (p4d_present(*p4dp)) {
pudp = pud_offset(p4dp, addr); pudp = pud_offset(p4dp, addr);
if (pud_present(*pudp)) { if (pud_present(*pudp)) {
if (pud_large(*pudp)) if (pud_leaf(*pudp))
return (pte_t *) pudp; return (pte_t *) pudp;
pmdp = pmd_offset(pudp, addr); pmdp = pmd_offset(pudp, addr);
} }
...@@ -240,7 +240,7 @@ int pmd_huge(pmd_t pmd) ...@@ -240,7 +240,7 @@ int pmd_huge(pmd_t pmd)
int pud_huge(pud_t pud) int pud_huge(pud_t pud)
{ {
return pud_large(pud); return pud_leaf(pud);
} }
bool __init arch_hugetlb_valid_size(unsigned long size) bool __init arch_hugetlb_valid_size(unsigned long size)
......
...@@ -274,7 +274,7 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end, ...@@ -274,7 +274,7 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
if (pud_none(*pudp)) if (pud_none(*pudp))
return -EINVAL; return -EINVAL;
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (pud_large(*pudp)) { if (pud_leaf(*pudp)) {
need_split = !!(flags & SET_MEMORY_4K); need_split = !!(flags & SET_MEMORY_4K);
need_split |= !!(addr & ~PUD_MASK); need_split |= !!(addr & ~PUD_MASK);
need_split |= !!(addr + PUD_SIZE > next); need_split |= !!(addr + PUD_SIZE > next);
......
...@@ -470,7 +470,7 @@ static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp) ...@@ -470,7 +470,7 @@ static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp)
return -ENOENT; return -ENOENT;
/* Large PUDs are not supported yet. */ /* Large PUDs are not supported yet. */
if (pud_large(*pud)) if (pud_leaf(*pud))
return -EFAULT; return -EFAULT;
*pmdp = pmd_offset(pud, addr); *pmdp = pmd_offset(pud, addr);
......
...@@ -329,7 +329,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end, ...@@ -329,7 +329,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
if (!add) { if (!add) {
if (pud_none(*pud)) if (pud_none(*pud))
continue; continue;
if (pud_large(*pud)) { if (pud_leaf(*pud)) {
if (IS_ALIGNED(addr, PUD_SIZE) && if (IS_ALIGNED(addr, PUD_SIZE) &&
IS_ALIGNED(next, PUD_SIZE)) { IS_ALIGNED(next, PUD_SIZE)) {
pud_clear(pud); pud_clear(pud);
...@@ -350,7 +350,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end, ...@@ -350,7 +350,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
if (!pmd) if (!pmd)
goto out; goto out;
pud_populate(&init_mm, pud, pmd); pud_populate(&init_mm, pud, pmd);
} else if (pud_large(*pud)) { } else if (pud_leaf(*pud)) {
continue; continue;
} }
ret = modify_pmd_table(pud, addr, next, add, direct, altmap); ret = modify_pmd_table(pud, addr, next, add, direct, altmap);
...@@ -599,7 +599,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc) ...@@ -599,7 +599,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
if (!pmd) if (!pmd)
goto out; goto out;
pud_populate(&init_mm, pud, pmd); pud_populate(&init_mm, pud, pmd);
} else if (WARN_ON_ONCE(pud_large(*pud))) { } else if (WARN_ON_ONCE(pud_leaf(*pud))) {
goto out; goto out;
} }
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
......
...@@ -1665,7 +1665,7 @@ bool kern_addr_valid(unsigned long addr) ...@@ -1665,7 +1665,7 @@ bool kern_addr_valid(unsigned long addr)
if (pud_none(*pud)) if (pud_none(*pud))
return false; return false;
if (pud_large(*pud)) if (pud_leaf(*pud))
return pfn_valid(pud_pfn(*pud)); return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
......
...@@ -3126,7 +3126,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, ...@@ -3126,7 +3126,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
if (pud_none(pud) || !pud_present(pud)) if (pud_none(pud) || !pud_present(pud))
goto out; goto out;
if (pud_large(pud)) { if (pud_leaf(pud)) {
level = PG_LEVEL_1G; level = PG_LEVEL_1G;
goto out; goto out;
} }
......
...@@ -376,7 +376,7 @@ static void dump_pagetable(unsigned long address) ...@@ -376,7 +376,7 @@ static void dump_pagetable(unsigned long address)
goto bad; goto bad;
pr_cont("PUD %lx ", pud_val(*pud)); pr_cont("PUD %lx ", pud_val(*pud));
if (!pud_present(*pud) || pud_large(*pud)) if (!pud_present(*pud) || pud_leaf(*pud))
goto out; goto out;
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
...@@ -1046,7 +1046,7 @@ spurious_kernel_fault(unsigned long error_code, unsigned long address) ...@@ -1046,7 +1046,7 @@ spurious_kernel_fault(unsigned long error_code, unsigned long address)
if (!pud_present(*pud)) if (!pud_present(*pud))
return 0; return 0;
if (pud_large(*pud)) if (pud_leaf(*pud))
return spurious_kernel_fault_check(error_code, (pte_t *) pud); return spurious_kernel_fault_check(error_code, (pte_t *) pud);
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
......
...@@ -33,7 +33,7 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, ...@@ -33,7 +33,7 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
next = end; next = end;
/* if this is already a gbpage, this portion is already mapped */ /* if this is already a gbpage, this portion is already mapped */
if (pud_large(*pud)) if (pud_leaf(*pud))
continue; continue;
/* Is using a gbpage allowed? */ /* Is using a gbpage allowed? */
......
...@@ -617,7 +617,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, ...@@ -617,7 +617,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
} }
if (!pud_none(*pud)) { if (!pud_none(*pud)) {
if (!pud_large(*pud)) { if (!pud_leaf(*pud)) {
pmd = pmd_offset(pud, 0); pmd = pmd_offset(pud, 0);
paddr_last = phys_pmd_init(pmd, paddr, paddr_last = phys_pmd_init(pmd, paddr,
paddr_end, paddr_end,
...@@ -1163,7 +1163,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, ...@@ -1163,7 +1163,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
if (!pud_present(*pud)) if (!pud_present(*pud))
continue; continue;
if (pud_large(*pud) && if (pud_leaf(*pud) &&
IS_ALIGNED(addr, PUD_SIZE) && IS_ALIGNED(addr, PUD_SIZE) &&
IS_ALIGNED(next, PUD_SIZE)) { IS_ALIGNED(next, PUD_SIZE)) {
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
......
...@@ -115,7 +115,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr, ...@@ -115,7 +115,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
pud = pud_offset(p4d, addr); pud = pud_offset(p4d, addr);
do { do {
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (!pud_large(*pud)) if (!pud_leaf(*pud))
kasan_populate_pud(pud, addr, next, nid); kasan_populate_pud(pud, addr, next, nid);
} while (pud++, addr = next, addr != end); } while (pud++, addr = next, addr != end);
} }
......
...@@ -145,7 +145,7 @@ static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) ...@@ -145,7 +145,7 @@ static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
set_pud(pud, __pud(PUD_FLAGS | __pa(pmd))); set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
} }
if (pud_large(*pud)) if (pud_leaf(*pud))
return NULL; return NULL;
return pud; return pud;
......
...@@ -684,7 +684,7 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, ...@@ -684,7 +684,7 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
return NULL; return NULL;
*level = PG_LEVEL_1G; *level = PG_LEVEL_1G;
if (pud_large(*pud) || !pud_present(*pud)) if (pud_leaf(*pud) || !pud_present(*pud))
return (pte_t *)pud; return (pte_t *)pud;
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
...@@ -743,7 +743,7 @@ pmd_t *lookup_pmd_address(unsigned long address) ...@@ -743,7 +743,7 @@ pmd_t *lookup_pmd_address(unsigned long address)
return NULL; return NULL;
pud = pud_offset(p4d, address); pud = pud_offset(p4d, address);
if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud)) if (pud_none(*pud) || pud_leaf(*pud) || !pud_present(*pud))
return NULL; return NULL;
return pmd_offset(pud, address); return pmd_offset(pud, address);
...@@ -1274,7 +1274,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) ...@@ -1274,7 +1274,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
*/ */
while (end - start >= PUD_SIZE) { while (end - start >= PUD_SIZE) {
if (pud_large(*pud)) if (pud_leaf(*pud))
pud_clear(pud); pud_clear(pud);
else else
unmap_pmd_range(pud, start, start + PUD_SIZE); unmap_pmd_range(pud, start, start + PUD_SIZE);
......
...@@ -777,7 +777,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) ...@@ -777,7 +777,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
*/ */
int pud_clear_huge(pud_t *pud) int pud_clear_huge(pud_t *pud)
{ {
if (pud_large(*pud)) { if (pud_leaf(*pud)) {
pud_clear(pud); pud_clear(pud);
return 1; return 1;
} }
......
...@@ -217,7 +217,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) ...@@ -217,7 +217,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
pud = pud_offset(p4d, address); pud = pud_offset(p4d, address);
/* The user page tables do not use large mappings: */ /* The user page tables do not use large mappings: */
if (pud_large(*pud)) { if (pud_leaf(*pud)) {
WARN_ON(1); WARN_ON(1);
return NULL; return NULL;
} }
......
...@@ -170,7 +170,7 @@ int relocate_restore_code(void) ...@@ -170,7 +170,7 @@ int relocate_restore_code(void)
goto out; goto out;
} }
pud = pud_offset(p4d, relocated_restore_code); pud = pud_offset(p4d, relocated_restore_code);
if (pud_large(*pud)) { if (pud_leaf(*pud)) {
set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX)); set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
goto out; goto out;
} }
......
...@@ -1082,7 +1082,7 @@ static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin) ...@@ -1082,7 +1082,7 @@ static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
pmd_t *pmd_tbl; pmd_t *pmd_tbl;
int i; int i;
if (pud_large(*pud)) { if (pud_leaf(*pud)) {
pa = pud_val(*pud) & PHYSICAL_PAGE_MASK; pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, PUD_SIZE); xen_free_ro_pages(pa, PUD_SIZE);
return; return;
...@@ -1863,7 +1863,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) ...@@ -1863,7 +1863,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
if (!pud_present(pud)) if (!pud_present(pud))
return 0; return 0;
pa = pud_val(pud) & PTE_PFN_MASK; pa = pud_val(pud) & PTE_PFN_MASK;
if (pud_large(pud)) if (pud_leaf(pud))
return pa + (vaddr & ~PUD_MASK); return pa + (vaddr & ~PUD_MASK);
pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) * pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment