Commit 368a0590 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Andrew Morton

powerpc/book3s64/vmemmap: switch radix to use a different vmemmap handling function

This is in preparation to update radix to implement vmemmap optimization
for devdax.  Below are the rules w.r.t radix vmemmap mapping

1. First try to map things using PMD (2M)
2. With altmap if altmap cross-boundary check returns true, fall back to
   PAGE_SIZE
3. If we can't allocate PMD_SIZE backing memory for vmemmap, fallback to
   PAGE_SIZE

On removing vmemmap mapping, check if every subsection that is using the
vmemmap area is invalid.  If found to be invalid, that implies we can
safely free the vmemmap area.  We don't use the PAGE_UNUSED pattern used
by x86 because with 64K page size, we need to do the above check even at
the PAGE_SIZE granularity.

[aneesh.kumar@linux.ibm.com: fix section mismatch warning]
  Link: https://lkml.kernel.org/r/87h6pqvu5g.fsf@linux.ibm.com
[aneesh.kumar@linux.ibm.com: fix kernel build error]
  Link: https://lkml.kernel.org/r/877cqkwd20.fsf@linux.ibm.com
Link: https://lkml.kernel.org/r/20230724190759.483013-11-aneesh.kumar@linux.ibm.comSigned-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 27af67f3
......@@ -331,6 +331,8 @@ extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
unsigned long phys);
int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap);
void __ref radix__vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap);
extern void radix__vmemmap_remove_mapping(unsigned long start,
unsigned long page_size);
......
......@@ -157,6 +157,12 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
return (pgtable_t)pmd_page_vaddr(pmd);
}
#ifdef CONFIG_PPC64
int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
unsigned long page_size);
#endif /* CONFIG_PPC64 */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
This diff is collapsed.
......@@ -92,7 +92,7 @@ static struct page * __meminit vmemmap_subsection_start(unsigned long vmemmap_ad
* a page table lookup here because with the hash translation we don't keep
* vmemmap details in linux page table.
*/
static int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size)
int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size)
{
struct page *start;
unsigned long vmemmap_end = vmemmap_addr + vmemmap_map_size;
......@@ -183,8 +183,8 @@ static __meminit int vmemmap_list_populate(unsigned long phys,
return 0;
}
static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
unsigned long page_size)
bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
unsigned long page_size)
{
unsigned long nr_pfn = page_size / sizeof(struct page);
unsigned long start_pfn = page_to_pfn((struct page *)start);
......@@ -204,6 +204,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
bool altmap_alloc;
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
#ifdef CONFIG_PPC_BOOK3S_64
if (radix_enabled())
return radix__vmemmap_populate(start, end, node, altmap);
#endif
/* Align to the page size of the linear mapping. */
start = ALIGN_DOWN(start, page_size);
......@@ -303,8 +308,8 @@ static unsigned long vmemmap_list_free(unsigned long start)
return vmem_back->phys;
}
void __ref vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
static void __ref __vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
unsigned long page_order = get_order(page_size);
......@@ -362,6 +367,17 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
vmemmap_remove_mapping(start, page_size);
}
}
void __ref vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{
#ifdef CONFIG_PPC_BOOK3S_64
if (radix_enabled())
return radix__vmemmap_free(start, end, altmap);
#endif
return __vmemmap_free(start, end, altmap);
}
#endif
void register_page_bootmem_memmap(unsigned long section_nr,
struct page *start_page, unsigned long size)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment