Commit 26fb3dae authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds

memblock: drop memblock_alloc_*_nopanic() variants

As all the memblock allocation functions return NULL in case of error
rather than panic(), the duplicates with _nopanic suffix can be removed.

Link: http://lkml.kernel.org/r/1548057848-15136-22-git-send-email-rppt@linux.ibm.comSigned-off-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Acked-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>		[printk]
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Guo Ren <ren_guo@c-sky.com>				[c-sky]
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Juergen Gross <jgross@suse.com>			[Xen]
Cc: Mark Salter <msalter@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Rob Herring <robh@kernel.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c0dbe825
...@@ -181,8 +181,7 @@ static void init_unwind_hdr(struct unwind_table *table, ...@@ -181,8 +181,7 @@ static void init_unwind_hdr(struct unwind_table *table,
*/ */
static void *__init unw_hdr_alloc_early(unsigned long sz) static void *__init unw_hdr_alloc_early(unsigned long sz)
{ {
return memblock_alloc_from_nopanic(sz, sizeof(unsigned int), return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS);
MAX_DMA_ADDRESS);
} }
static void *unw_hdr_alloc(unsigned long sz) static void *unw_hdr_alloc(unsigned long sz)
......
...@@ -202,7 +202,7 @@ void __init allocate_pgdat(unsigned int nid) ...@@ -202,7 +202,7 @@ void __init allocate_pgdat(unsigned int nid)
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
#ifdef CONFIG_NEED_MULTIPLE_NODES #ifdef CONFIG_NEED_MULTIPLE_NODES
NODE_DATA(nid) = memblock_alloc_try_nid_nopanic( NODE_DATA(nid) = memblock_alloc_try_nid(
sizeof(struct pglist_data), sizeof(struct pglist_data),
SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, nid); MEMBLOCK_ALLOC_ACCESSIBLE, nid);
......
...@@ -106,13 +106,13 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, ...@@ -106,13 +106,13 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
void *ptr; void *ptr;
if (!node_online(node) || !NODE_DATA(node)) { if (!node_online(node) || !NODE_DATA(node)) {
ptr = memblock_alloc_from_nopanic(size, align, goal); ptr = memblock_alloc_from(size, align, goal);
pr_info("cpu %d has no node %d or node-local memory\n", pr_info("cpu %d has no node %d or node-local memory\n",
cpu, node); cpu, node);
pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
cpu, size, __pa(ptr)); cpu, size, __pa(ptr));
} else { } else {
ptr = memblock_alloc_try_nid_nopanic(size, align, goal, ptr = memblock_alloc_try_nid(size, align, goal,
MEMBLOCK_ALLOC_ACCESSIBLE, MEMBLOCK_ALLOC_ACCESSIBLE,
node); node);
...@@ -121,7 +121,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, ...@@ -121,7 +121,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
} }
return ptr; return ptr;
#else #else
return memblock_alloc_from_nopanic(size, align, goal); return memblock_alloc_from(size, align, goal);
#endif #endif
} }
......
...@@ -24,14 +24,16 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES]; ...@@ -24,14 +24,16 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
static __init void *early_alloc(size_t size, int nid, bool panic) static __init void *early_alloc(size_t size, int nid, bool should_panic)
{ {
if (panic) void *ptr = memblock_alloc_try_nid(size, size,
return memblock_alloc_try_nid(size, size,
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
else
return memblock_alloc_try_nid_nopanic(size, size,
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
if (!ptr && should_panic)
panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
(void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
return ptr;
} }
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
......
...@@ -333,7 +333,7 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type) ...@@ -333,7 +333,7 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type)
{ {
struct firmware_map_entry *entry; struct firmware_map_entry *entry;
entry = memblock_alloc_nopanic(sizeof(struct firmware_map_entry), entry = memblock_alloc(sizeof(struct firmware_map_entry),
SMP_CACHE_BYTES); SMP_CACHE_BYTES);
if (WARN_ON(!entry)) if (WARN_ON(!entry))
return -ENOMEM; return -ENOMEM;
......
...@@ -94,7 +94,7 @@ static void * __init xdbc_get_page(dma_addr_t *dma_addr) ...@@ -94,7 +94,7 @@ static void * __init xdbc_get_page(dma_addr_t *dma_addr)
{ {
void *virt; void *virt;
virt = memblock_alloc_nopanic(PAGE_SIZE, PAGE_SIZE); virt = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!virt) if (!virt)
return NULL; return NULL;
......
...@@ -335,9 +335,6 @@ static inline phys_addr_t memblock_phys_alloc(phys_addr_t size, ...@@ -335,9 +335,6 @@ static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, phys_addr_t min_addr, phys_addr_t max_addr,
int nid); int nid);
void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, phys_addr_t min_addr, phys_addr_t max_addr,
int nid); int nid);
...@@ -364,36 +361,12 @@ static inline void * __init memblock_alloc_from(phys_addr_t size, ...@@ -364,36 +361,12 @@ static inline void * __init memblock_alloc_from(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
} }
static inline void * __init memblock_alloc_nopanic(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_low(phys_addr_t size, static inline void * __init memblock_alloc_low(phys_addr_t size,
phys_addr_t align) phys_addr_t align)
{ {
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
} }
static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT,
NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size,
phys_addr_t align,
phys_addr_t min_addr)
{
return memblock_alloc_try_nid_nopanic(size, align, min_addr,
MEMBLOCK_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_node(phys_addr_t size, static inline void * __init memblock_alloc_node(phys_addr_t size,
phys_addr_t align, int nid) phys_addr_t align, int nid)
...@@ -402,14 +375,6 @@ static inline void * __init memblock_alloc_node(phys_addr_t size, ...@@ -402,14 +375,6 @@ static inline void * __init memblock_alloc_node(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE, nid); MEMBLOCK_ALLOC_ACCESSIBLE, nid);
} }
static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size,
int nid)
{
return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES,
MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
static inline void __init memblock_free_early(phys_addr_t base, static inline void __init memblock_free_early(phys_addr_t base,
phys_addr_t size) phys_addr_t size)
{ {
......
...@@ -256,7 +256,7 @@ swiotlb_init(int verbose) ...@@ -256,7 +256,7 @@ swiotlb_init(int verbose)
bytes = io_tlb_nslabs << IO_TLB_SHIFT; bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/* Get IO TLB memory from the low pages */ /* Get IO TLB memory from the low pages */
vstart = memblock_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); vstart = memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE);
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
return; return;
......
...@@ -1143,14 +1143,7 @@ void __init setup_log_buf(int early) ...@@ -1143,14 +1143,7 @@ void __init setup_log_buf(int early)
if (!new_log_buf_len) if (!new_log_buf_len)
return; return;
if (early) { new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
new_log_buf =
memblock_alloc(new_log_buf_len, LOG_ALIGN);
} else {
new_log_buf = memblock_alloc_nopanic(new_log_buf_len,
LOG_ALIGN);
}
if (unlikely(!new_log_buf)) { if (unlikely(!new_log_buf)) {
pr_err("log_buf_len: %lu bytes not available\n", pr_err("log_buf_len: %lu bytes not available\n",
new_log_buf_len); new_log_buf_len);
......
...@@ -1433,41 +1433,6 @@ void * __init memblock_alloc_try_nid_raw( ...@@ -1433,41 +1433,6 @@ void * __init memblock_alloc_try_nid_raw(
return ptr; return ptr;
} }
/**
* memblock_alloc_try_nid_nopanic - allocate boot memory block
* @size: size of memory block to be allocated in bytes
* @align: alignment of the region and block's size
* @min_addr: the lower bound of the memory region from where the allocation
* is preferred (phys address)
* @max_addr: the upper bound of the memory region from where the allocation
* is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
* Public function, provides additional debug information (including caller
* info), if enabled. This function zeroes the allocated memory.
*
* Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
void * __init memblock_alloc_try_nid_nopanic(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid)
{
void *ptr;
memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
__func__, (u64)size, (u64)align, nid, &min_addr,
&max_addr, (void *)_RET_IP_);
ptr = memblock_alloc_internal(size, align,
min_addr, max_addr, nid);
if (ptr)
memset(ptr, 0, size);
return ptr;
}
/** /**
* memblock_alloc_try_nid - allocate boot memory block * memblock_alloc_try_nid - allocate boot memory block
* @size: size of memory block to be allocated in bytes * @size: size of memory block to be allocated in bytes
......
...@@ -6445,7 +6445,7 @@ static void __ref setup_usemap(struct pglist_data *pgdat, ...@@ -6445,7 +6445,7 @@ static void __ref setup_usemap(struct pglist_data *pgdat,
zone->pageblock_flags = NULL; zone->pageblock_flags = NULL;
if (usemapsize) { if (usemapsize) {
zone->pageblock_flags = zone->pageblock_flags =
memblock_alloc_node_nopanic(usemapsize, memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
pgdat->node_id); pgdat->node_id);
if (!zone->pageblock_flags) if (!zone->pageblock_flags)
panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
...@@ -6679,7 +6679,8 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat) ...@@ -6679,7 +6679,8 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
end = pgdat_end_pfn(pgdat); end = pgdat_end_pfn(pgdat);
end = ALIGN(end, MAX_ORDER_NR_PAGES); end = ALIGN(end, MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page); size = (end - start) * sizeof(struct page);
map = memblock_alloc_node_nopanic(size, pgdat->node_id); map = memblock_alloc_node(size, SMP_CACHE_BYTES,
pgdat->node_id);
if (!map) if (!map)
panic("Failed to allocate %ld bytes for node %d memory map\n", panic("Failed to allocate %ld bytes for node %d memory map\n",
size, pgdat->node_id); size, pgdat->node_id);
...@@ -7959,8 +7960,7 @@ void *__init alloc_large_system_hash(const char *tablename, ...@@ -7959,8 +7960,7 @@ void *__init alloc_large_system_hash(const char *tablename,
size = bucketsize << log2qty; size = bucketsize << log2qty;
if (flags & HASH_EARLY) { if (flags & HASH_EARLY) {
if (flags & HASH_ZERO) if (flags & HASH_ZERO)
table = memblock_alloc_nopanic(size, table = memblock_alloc(size, SMP_CACHE_BYTES);
SMP_CACHE_BYTES);
else else
table = memblock_alloc_raw(size, table = memblock_alloc_raw(size,
SMP_CACHE_BYTES); SMP_CACHE_BYTES);
......
...@@ -161,7 +161,7 @@ static int __init alloc_node_page_ext(int nid) ...@@ -161,7 +161,7 @@ static int __init alloc_node_page_ext(int nid)
table_size = get_entry_size() * nr_pages; table_size = get_entry_size() * nr_pages;
base = memblock_alloc_try_nid_nopanic( base = memblock_alloc_try_nid(
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_ACCESSIBLE, nid); MEMBLOCK_ALLOC_ACCESSIBLE, nid);
if (!base) if (!base)
......
...@@ -1905,7 +1905,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, ...@@ -1905,7 +1905,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
__alignof__(ai->groups[0].cpu_map[0])); __alignof__(ai->groups[0].cpu_map[0]));
ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
ptr = memblock_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE); ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
if (!ptr) if (!ptr)
return NULL; return NULL;
ai = ptr; ai = ptr;
...@@ -2496,7 +2496,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, ...@@ -2496,7 +2496,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
areas = memblock_alloc_nopanic(areas_size, SMP_CACHE_BYTES); areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
if (!areas) { if (!areas) {
rc = -ENOMEM; rc = -ENOMEM;
goto out_free; goto out_free;
...@@ -2729,8 +2729,7 @@ EXPORT_SYMBOL(__per_cpu_offset); ...@@ -2729,8 +2729,7 @@ EXPORT_SYMBOL(__per_cpu_offset);
static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
size_t align) size_t align)
{ {
return memblock_alloc_from_nopanic( return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
size, align, __pa(MAX_DMA_ADDRESS));
} }
static void __init pcpu_dfl_fc_free(void *ptr, size_t size) static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
...@@ -2778,9 +2777,7 @@ void __init setup_per_cpu_areas(void) ...@@ -2778,9 +2777,7 @@ void __init setup_per_cpu_areas(void)
void *fc; void *fc;
ai = pcpu_alloc_alloc_info(1, 1); ai = pcpu_alloc_alloc_info(1, 1);
fc = memblock_alloc_from_nopanic(unit_size, fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
PAGE_SIZE,
__pa(MAX_DMA_ADDRESS));
if (!ai || !fc) if (!ai || !fc)
panic("Failed to allocate memory for percpu areas."); panic("Failed to allocate memory for percpu areas.");
/* kmemleak tracks the percpu allocations separately */ /* kmemleak tracks the percpu allocations separately */
......
...@@ -330,9 +330,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, ...@@ -330,9 +330,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
limit = goal + (1UL << PA_SECTION_SHIFT); limit = goal + (1UL << PA_SECTION_SHIFT);
nid = early_pfn_to_nid(goal >> PAGE_SHIFT); nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
again: again:
p = memblock_alloc_try_nid_nopanic(size, p = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
SMP_CACHE_BYTES, goal, limit,
nid);
if (!p && limit) { if (!p && limit) {
limit = 0; limit = 0;
goto again; goto again;
...@@ -386,7 +384,7 @@ static unsigned long * __init ...@@ -386,7 +384,7 @@ static unsigned long * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
unsigned long size) unsigned long size)
{ {
return memblock_alloc_node_nopanic(size, pgdat->node_id); return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
} }
static void __init check_usemap_section_nr(int nid, unsigned long *usemap) static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment