Commit 14ed7409 authored by Anton Blanchard's avatar Anton Blanchard Committed by Michael Ellerman

powerpc: Remove some old bootmem related comments

Now bootmem is gone from powerpc we can remove comments mentioning it.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Tested-by: default avatarEmil Medve <Emilian.Medve@Freescale.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 10239733
......@@ -696,10 +696,7 @@ void __init early_init_devtree(void *params)
reserve_crashkernel();
early_reserve_mem();
/*
* Ensure that total memory size is page-aligned, because otherwise
* mark_bootmem() gets upset.
*/
/* Ensure that total memory size is page-aligned. */
limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
memblock_enforce_memory_limit(limit);
......
......@@ -1091,8 +1091,8 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
}
/*
* Call early during boot, before mem init or bootmem, to retrieve the RTAS
* informations from the device-tree and allocate the RMO buffer for userland
* Call early during boot, before mem init, to retrieve the RTAS
* information from the device-tree and allocate the RMO buffer for userland
* accesses.
*/
void __init rtas_initialize(void)
......
......@@ -154,7 +154,7 @@ EXPORT_SYMBOL_GPL(kvm_release_hpt);
* kvm_cma_reserve() - reserve area for kvm hash pagetable
*
* This function reserves memory from early allocator. It should be
* called by arch specific code once the early allocator (memblock or bootmem)
* called by arch specific code once the memblock allocator
* has been activated and all other subsystems have already allocated/reserved
* memory.
*/
......
......@@ -276,7 +276,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
#ifdef CONFIG_PPC_FSL_BOOK3E
/* Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy or bootmem allocator is setup.
* boot before the buddy allocator is setup.
*/
void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
{
......@@ -399,7 +399,7 @@ void __init reserve_hugetlb_gpages(void)
#else /* !PPC_FSL_BOOK3E */
/* Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy or bootmem allocator is setup.
* boot before the buddy allocator is setup.
*/
void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
{
......
......@@ -109,10 +109,6 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
__pgprot(flags)));
} else {
#ifdef CONFIG_PPC_MMU_NOHASH
/* Warning ! This will blow up if bootmem is not initialized
* which our ppc64 code is keen to do that, we'll need to
* fix it and/or be more careful
*/
pgdp = pgd_offset_k(ea);
#ifdef PUD_TABLE_SIZE
if (pgd_none(*pgdp)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment