Commit 97ad1087 authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds

memblock: replace BOOTMEM_ALLOC_* with MEMBLOCK variants

Drop BOOTMEM_ALLOC_ACCESSIBLE and BOOTMEM_ALLOC_ANYWHERE in favor of
identical MEMBLOCK definitions.

Link: http://lkml.kernel.org/r/1536927045-23536-29-git-send-email-rppt@linux.vnet.ibm.comSigned-off-by: default avatarMike Rapoport <rppt@linux.vnet.ibm.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Palmer Dabbelt <palmer@sifive.com>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Serge Semin <fancer.lancer@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bda49a81
...@@ -453,7 +453,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) ...@@ -453,7 +453,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE, ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE,
__pa(MAX_DMA_ADDRESS), __pa(MAX_DMA_ADDRESS),
BOOTMEM_ALLOC_ACCESSIBLE, MEMBLOCK_ALLOC_ACCESSIBLE,
bestnode); bestnode);
return ptr; return ptr;
......
...@@ -764,7 +764,7 @@ void __init emergency_stack_init(void) ...@@ -764,7 +764,7 @@ void __init emergency_stack_init(void)
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{ {
return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS), return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
BOOTMEM_ALLOC_ACCESSIBLE, MEMBLOCK_ALLOC_ACCESSIBLE,
early_cpu_to_node(cpu)); early_cpu_to_node(cpu));
} }
......
...@@ -1595,7 +1595,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size, ...@@ -1595,7 +1595,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
cpu, size, __pa(ptr)); cpu, size, __pa(ptr));
} else { } else {
ptr = memblock_alloc_try_nid(size, align, goal, ptr = memblock_alloc_try_nid(size, align, goal,
BOOTMEM_ALLOC_ACCESSIBLE, node); MEMBLOCK_ALLOC_ACCESSIBLE, node);
pr_debug("per cpu data for cpu%d %lu bytes on node%d at " pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
"%016lx\n", cpu, size, node, __pa(ptr)); "%016lx\n", cpu, size, node, __pa(ptr));
} }
......
...@@ -114,7 +114,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, ...@@ -114,7 +114,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
cpu, size, __pa(ptr)); cpu, size, __pa(ptr));
} else { } else {
ptr = memblock_alloc_try_nid_nopanic(size, align, goal, ptr = memblock_alloc_try_nid_nopanic(size, align, goal,
BOOTMEM_ALLOC_ACCESSIBLE, MEMBLOCK_ALLOC_ACCESSIBLE,
node); node);
pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n", pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
......
...@@ -29,10 +29,10 @@ static __init void *early_alloc(size_t size, int nid, bool panic) ...@@ -29,10 +29,10 @@ static __init void *early_alloc(size_t size, int nid, bool panic)
{ {
if (panic) if (panic)
return memblock_alloc_try_nid(size, size, return memblock_alloc_try_nid(size, size,
__pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
else else
return memblock_alloc_try_nid_nopanic(size, size, return memblock_alloc_try_nid_nopanic(size, size,
__pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
} }
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/cpuset.h> #include <linux/cpuset.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mmdebug.h> #include <linux/mmdebug.h>
...@@ -2102,7 +2103,7 @@ int __alloc_bootmem_huge_page(struct hstate *h) ...@@ -2102,7 +2103,7 @@ int __alloc_bootmem_huge_page(struct hstate *h)
addr = memblock_alloc_try_nid_raw( addr = memblock_alloc_try_nid_raw(
huge_page_size(h), huge_page_size(h), huge_page_size(h), huge_page_size(h),
0, BOOTMEM_ALLOC_ACCESSIBLE, node); 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
if (addr) { if (addr) {
/* /*
* Use the beginning of the huge page to store the * Use the beginning of the huge page to store the
......
...@@ -84,7 +84,7 @@ static inline bool kasan_zero_page_entry(pte_t pte) ...@@ -84,7 +84,7 @@ static inline bool kasan_zero_page_entry(pte_t pte)
static __init void *early_alloc(size_t size, int node) static __init void *early_alloc(size_t size, int node)
{ {
return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
BOOTMEM_ALLOC_ACCESSIBLE, node); MEMBLOCK_ALLOC_ACCESSIBLE, node);
} }
static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
......
...@@ -1342,7 +1342,7 @@ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t ali ...@@ -1342,7 +1342,7 @@ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t ali
* hold the requested memory. * hold the requested memory.
* *
* The allocation is performed from memory region limited by * The allocation is performed from memory region limited by
* memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
* *
* The memory block is aligned on %SMP_CACHE_BYTES if @align == 0. * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0.
* *
...@@ -1429,7 +1429,7 @@ static void * __init memblock_alloc_internal( ...@@ -1429,7 +1429,7 @@ static void * __init memblock_alloc_internal(
* @min_addr: the lower bound of the memory region from where the allocation * @min_addr: the lower bound of the memory region from where the allocation
* is preferred (phys address) * is preferred (phys address)
* @max_addr: the upper bound of the memory region from where the allocation * @max_addr: the upper bound of the memory region from where the allocation
* is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value * allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
* *
...@@ -1466,7 +1466,7 @@ void * __init memblock_alloc_try_nid_raw( ...@@ -1466,7 +1466,7 @@ void * __init memblock_alloc_try_nid_raw(
* @min_addr: the lower bound of the memory region from where the allocation * @min_addr: the lower bound of the memory region from where the allocation
* is preferred (phys address) * is preferred (phys address)
* @max_addr: the upper bound of the memory region from where the allocation * @max_addr: the upper bound of the memory region from where the allocation
* is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value * allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
* *
...@@ -1501,7 +1501,7 @@ void * __init memblock_alloc_try_nid_nopanic( ...@@ -1501,7 +1501,7 @@ void * __init memblock_alloc_try_nid_nopanic(
* @min_addr: the lower bound of the memory region from where the allocation * @min_addr: the lower bound of the memory region from where the allocation
* is preferred (phys address) * is preferred (phys address)
* @max_addr: the upper bound of the memory region from where the allocation * @max_addr: the upper bound of the memory region from where the allocation
* is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value * allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
* *
......
...@@ -163,7 +163,7 @@ static int __init alloc_node_page_ext(int nid) ...@@ -163,7 +163,7 @@ static int __init alloc_node_page_ext(int nid)
base = memblock_alloc_try_nid_nopanic( base = memblock_alloc_try_nid_nopanic(
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
BOOTMEM_ALLOC_ACCESSIBLE, nid); MEMBLOCK_ALLOC_ACCESSIBLE, nid);
if (!base) if (!base)
return -ENOMEM; return -ENOMEM;
NODE_DATA(nid)->node_page_ext = base; NODE_DATA(nid)->node_page_ext = base;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/memremap.h> #include <linux/memremap.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -43,7 +44,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node, ...@@ -43,7 +44,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
unsigned long goal) unsigned long goal)
{ {
return memblock_alloc_try_nid_raw(size, align, goal, return memblock_alloc_try_nid_raw(size, align, goal,
BOOTMEM_ALLOC_ACCESSIBLE, node); MEMBLOCK_ALLOC_ACCESSIBLE, node);
} }
void * __meminit vmemmap_alloc_block(unsigned long size, int node) void * __meminit vmemmap_alloc_block(unsigned long size, int node)
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/export.h> #include <linux/export.h>
...@@ -393,7 +394,7 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, ...@@ -393,7 +394,7 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
map = memblock_alloc_try_nid(size, map = memblock_alloc_try_nid(size,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS), PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
BOOTMEM_ALLOC_ACCESSIBLE, nid); MEMBLOCK_ALLOC_ACCESSIBLE, nid);
return map; return map;
} }
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
...@@ -407,7 +408,7 @@ static void __init sparse_buffer_init(unsigned long size, int nid) ...@@ -407,7 +408,7 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
sparsemap_buf = sparsemap_buf =
memblock_alloc_try_nid_raw(size, PAGE_SIZE, memblock_alloc_try_nid_raw(size, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS), __pa(MAX_DMA_ADDRESS),
BOOTMEM_ALLOC_ACCESSIBLE, nid); MEMBLOCK_ALLOC_ACCESSIBLE, nid);
sparsemap_buf_end = sparsemap_buf + size; sparsemap_buf_end = sparsemap_buf + size;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment