Commit 24aa0788 authored by Tejun Heo's avatar Tejun Heo Committed by H. Peter Anvin

memblock, x86: Replace memblock_x86_reserve/free_range() with generic ones

Other than sanity check and debug message, the x86 specific version of
memblock reserve/free functions are simple wrappers around the generic
versions - memblock_reserve/free().

This patch adds debug messages with caller identification to the
generic versions and replaces x86 specific ones and kills them.
arch/x86/include/asm/memblock.h and arch/x86/mm/memblock.c are empty
after this change and removed.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310462166-31469-14-git-send-email-tj@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent c378ddd5
#ifndef _X86_MEMBLOCK_H
#define _X86_MEMBLOCK_H
void memblock_x86_reserve_range(u64 start, u64 end, char *name);
void memblock_x86_free_range(u64 start, u64 end);
#endif
...@@ -94,7 +94,7 @@ static u32 __init allocate_aperture(void) ...@@ -94,7 +94,7 @@ static u32 __init allocate_aperture(void)
addr, aper_size>>10); addr, aper_size>>10);
return 0; return 0;
} }
memblock_x86_reserve_range(addr, addr + aper_size, "aperture64"); memblock_reserve(addr, aper_size);
/* /*
* Kmemleak should not scan this block as it may not be mapped via the * Kmemleak should not scan this block as it may not be mapped via the
* kernel direct mapping. * kernel direct mapping.
......
...@@ -91,7 +91,7 @@ void __init setup_bios_corruption_check(void) ...@@ -91,7 +91,7 @@ void __init setup_bios_corruption_check(void)
if (start >= end) if (start >= end)
continue; continue;
memblock_x86_reserve_range(start, end, "SCAN RAM"); memblock_reserve(start, end - start);
scan_areas[num_scan_areas].addr = start; scan_areas[num_scan_areas].addr = start;
scan_areas[num_scan_areas].size = end - start; scan_areas[num_scan_areas].size = end - start;
......
...@@ -52,5 +52,5 @@ void __init reserve_ebda_region(void) ...@@ -52,5 +52,5 @@ void __init reserve_ebda_region(void)
lowmem = 0x9f000; lowmem = 0x9f000;
/* reserve all memory between lowmem and the 1MB mark */ /* reserve all memory between lowmem and the 1MB mark */
memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved"); memblock_reserve(lowmem, 0x100000 - lowmem);
} }
...@@ -33,7 +33,8 @@ void __init i386_start_kernel(void) ...@@ -33,7 +33,8 @@ void __init i386_start_kernel(void)
{ {
memblock_init(); memblock_init();
memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); memblock_reserve(__pa_symbol(&_text),
__pa_symbol(&__bss_stop) - __pa_symbol(&_text));
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */ /* Reserve INITRD */
...@@ -42,7 +43,7 @@ void __init i386_start_kernel(void) ...@@ -42,7 +43,7 @@ void __init i386_start_kernel(void)
u64 ramdisk_image = boot_params.hdr.ramdisk_image; u64 ramdisk_image = boot_params.hdr.ramdisk_image;
u64 ramdisk_size = boot_params.hdr.ramdisk_size; u64 ramdisk_size = boot_params.hdr.ramdisk_size;
u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
} }
#endif #endif
......
...@@ -100,7 +100,8 @@ void __init x86_64_start_reservations(char *real_mode_data) ...@@ -100,7 +100,8 @@ void __init x86_64_start_reservations(char *real_mode_data)
memblock_init(); memblock_init();
memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); memblock_reserve(__pa_symbol(&_text),
__pa_symbol(&__bss_stop) - __pa_symbol(&_text));
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */ /* Reserve INITRD */
...@@ -109,7 +110,7 @@ void __init x86_64_start_reservations(char *real_mode_data) ...@@ -109,7 +110,7 @@ void __init x86_64_start_reservations(char *real_mode_data)
unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
} }
#endif #endif
......
...@@ -564,9 +564,7 @@ void __init default_get_smp_config(unsigned int early) ...@@ -564,9 +564,7 @@ void __init default_get_smp_config(unsigned int early)
static void __init smp_reserve_memory(struct mpf_intel *mpf) static void __init smp_reserve_memory(struct mpf_intel *mpf)
{ {
unsigned long size = get_mpc_size(mpf->physptr); memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc");
} }
static int __init smp_scan_config(unsigned long base, unsigned long length) static int __init smp_scan_config(unsigned long base, unsigned long length)
...@@ -595,7 +593,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) ...@@ -595,7 +593,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
mpf, (u64)virt_to_phys(mpf)); mpf, (u64)virt_to_phys(mpf));
mem = virt_to_phys(mpf); mem = virt_to_phys(mpf);
memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf"); memblock_reserve(mem, sizeof(*mpf));
if (mpf->physptr) if (mpf->physptr)
smp_reserve_memory(mpf); smp_reserve_memory(mpf);
......
...@@ -306,7 +306,8 @@ static void __init cleanup_highmap(void) ...@@ -306,7 +306,8 @@ static void __init cleanup_highmap(void)
static void __init reserve_brk(void) static void __init reserve_brk(void)
{ {
if (_brk_end > _brk_start) if (_brk_end > _brk_start)
memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK"); memblock_reserve(__pa(_brk_start),
__pa(_brk_end) - __pa(_brk_start));
/* Mark brk area as locked down and no longer taking any /* Mark brk area as locked down and no longer taking any
new allocations */ new allocations */
...@@ -337,7 +338,7 @@ static void __init relocate_initrd(void) ...@@ -337,7 +338,7 @@ static void __init relocate_initrd(void)
/* Note: this includes all the lowmem currently occupied by /* Note: this includes all the lowmem currently occupied by
the initrd, we rely on that fact to keep the data intact. */ the initrd, we rely on that fact to keep the data intact. */
memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK"); memblock_reserve(ramdisk_here, area_size);
initrd_start = ramdisk_here + PAGE_OFFSET; initrd_start = ramdisk_here + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size; initrd_end = initrd_start + ramdisk_size;
printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
...@@ -393,7 +394,7 @@ static void __init reserve_initrd(void) ...@@ -393,7 +394,7 @@ static void __init reserve_initrd(void)
initrd_start = 0; initrd_start = 0;
if (ramdisk_size >= (end_of_lowmem>>1)) { if (ramdisk_size >= (end_of_lowmem>>1)) {
memblock_x86_free_range(ramdisk_image, ramdisk_end); memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
printk(KERN_ERR "initrd too large to handle, " printk(KERN_ERR "initrd too large to handle, "
"disabling initrd\n"); "disabling initrd\n");
return; return;
...@@ -416,7 +417,7 @@ static void __init reserve_initrd(void) ...@@ -416,7 +417,7 @@ static void __init reserve_initrd(void)
relocate_initrd(); relocate_initrd();
memblock_x86_free_range(ramdisk_image, ramdisk_end); memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
} }
#else #else
static void __init reserve_initrd(void) static void __init reserve_initrd(void)
...@@ -490,15 +491,13 @@ static void __init memblock_x86_reserve_range_setup_data(void) ...@@ -490,15 +491,13 @@ static void __init memblock_x86_reserve_range_setup_data(void)
{ {
struct setup_data *data; struct setup_data *data;
u64 pa_data; u64 pa_data;
char buf[32];
if (boot_params.hdr.version < 0x0209) if (boot_params.hdr.version < 0x0209)
return; return;
pa_data = boot_params.hdr.setup_data; pa_data = boot_params.hdr.setup_data;
while (pa_data) { while (pa_data) {
data = early_memremap(pa_data, sizeof(*data)); data = early_memremap(pa_data, sizeof(*data));
sprintf(buf, "setup data %x", data->type); memblock_reserve(pa_data, sizeof(*data) + data->len);
memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf);
pa_data = data->next; pa_data = data->next;
early_iounmap(data, sizeof(*data)); early_iounmap(data, sizeof(*data));
} }
...@@ -568,7 +567,7 @@ static void __init reserve_crashkernel(void) ...@@ -568,7 +567,7 @@ static void __init reserve_crashkernel(void)
return; return;
} }
} }
memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL"); memblock_reserve(crash_base, crash_size);
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
"for crashkernel (System RAM: %ldMB)\n", "for crashkernel (System RAM: %ldMB)\n",
...@@ -626,7 +625,7 @@ static __init void reserve_ibft_region(void) ...@@ -626,7 +625,7 @@ static __init void reserve_ibft_region(void)
addr = find_ibft_region(&size); addr = find_ibft_region(&size);
if (size) if (size)
memblock_x86_reserve_range(addr, addr + size, "* ibft"); memblock_reserve(addr, size);
} }
static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
......
...@@ -18,7 +18,7 @@ void __init setup_trampolines(void) ...@@ -18,7 +18,7 @@ void __init setup_trampolines(void)
panic("Cannot allocate trampoline\n"); panic("Cannot allocate trampoline\n");
x86_trampoline_base = __va(mem); x86_trampoline_base = __va(mem);
memblock_x86_reserve_range(mem, mem + size, "TRAMPOLINE"); memblock_reserve(mem, size);
printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
x86_trampoline_base, (unsigned long long)mem, size); x86_trampoline_base, (unsigned long long)mem, size);
......
...@@ -27,6 +27,4 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o ...@@ -27,6 +27,4 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o
obj-$(CONFIG_ACPI_NUMA) += srat.o obj-$(CONFIG_ACPI_NUMA) += srat.o
obj-$(CONFIG_NUMA_EMU) += numa_emulation.o obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
obj-$(CONFIG_MEMTEST) += memtest.o obj-$(CONFIG_MEMTEST) += memtest.o
...@@ -81,7 +81,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse, ...@@ -81,7 +81,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
void __init native_pagetable_reserve(u64 start, u64 end) void __init native_pagetable_reserve(u64 start, u64 end)
{ {
memblock_x86_reserve_range(start, end, "PGTABLE"); memblock_reserve(start, end - start);
} }
struct map_range { struct map_range {
...@@ -280,8 +280,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, ...@@ -280,8 +280,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
* pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
* so that they can be reused for other purposes. * so that they can be reused for other purposes.
* *
* On native it just means calling memblock_x86_reserve_range, on Xen it * On native it just means calling memblock_reserve, on Xen it also
* also means marking RW the pagetable pages that we allocated before * means marking RW the pagetable pages that we allocated before
* but that haven't been used. * but that haven't been used.
* *
* In fact on xen we mark RO the whole range pgt_buf_start - * In fact on xen we mark RO the whole range pgt_buf_start -
......
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <linux/range.h>
void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
{
if (start == end)
return;
if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end))
return;
memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name);
memblock_reserve(start, end - start);
}
void __init memblock_x86_free_range(u64 start, u64 end)
{
if (start == end)
return;
if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end))
return;
memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1);
memblock_free(start, end - start);
}
...@@ -34,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad) ...@@ -34,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
(unsigned long long) pattern, (unsigned long long) pattern,
(unsigned long long) start_bad, (unsigned long long) start_bad,
(unsigned long long) end_bad); (unsigned long long) end_bad);
memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM"); memblock_reserve(start_bad, end_bad - start_bad);
} }
static void __init memtest(u64 pattern, u64 start_phys, u64 size) static void __init memtest(u64 pattern, u64 start_phys, u64 size)
......
...@@ -364,8 +364,7 @@ void __init numa_reset_distance(void) ...@@ -364,8 +364,7 @@ void __init numa_reset_distance(void)
/* numa_distance could be 1LU marking allocation failure, test cnt */ /* numa_distance could be 1LU marking allocation failure, test cnt */
if (numa_distance_cnt) if (numa_distance_cnt)
memblock_x86_free_range(__pa(numa_distance), memblock_free(__pa(numa_distance), size);
__pa(numa_distance) + size);
numa_distance_cnt = 0; numa_distance_cnt = 0;
numa_distance = NULL; /* enable table creation */ numa_distance = NULL; /* enable table creation */
} }
...@@ -394,7 +393,7 @@ static int __init numa_alloc_distance(void) ...@@ -394,7 +393,7 @@ static int __init numa_alloc_distance(void)
numa_distance = (void *)1LU; numa_distance = (void *)1LU;
return -ENOMEM; return -ENOMEM;
} }
memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); memblock_reserve(phys, size);
numa_distance = __va(phys); numa_distance = __va(phys);
numa_distance_cnt = cnt; numa_distance_cnt = cnt;
......
...@@ -204,7 +204,7 @@ void __init init_alloc_remap(int nid, u64 start, u64 end) ...@@ -204,7 +204,7 @@ void __init init_alloc_remap(int nid, u64 start, u64 end)
size, nid); size, nid);
return; return;
} }
memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM"); memblock_reserve(node_pa, size);
remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
max_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT,
...@@ -212,10 +212,10 @@ void __init init_alloc_remap(int nid, u64 start, u64 end) ...@@ -212,10 +212,10 @@ void __init init_alloc_remap(int nid, u64 start, u64 end)
if (!remap_pa) { if (!remap_pa) {
pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
size, nid); size, nid);
memblock_x86_free_range(node_pa, node_pa + size); memblock_free(node_pa, size);
return; return;
} }
memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG"); memblock_reserve(remap_pa, size);
remap_va = phys_to_virt(remap_pa); remap_va = phys_to_virt(remap_pa);
/* perform actual remap */ /* perform actual remap */
......
...@@ -361,7 +361,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) ...@@ -361,7 +361,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
goto no_emu; goto no_emu;
} }
memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST"); memblock_reserve(phys, phys_size);
phys_dist = __va(phys); phys_dist = __va(phys);
for (i = 0; i < numa_dist_cnt; i++) for (i = 0; i < numa_dist_cnt; i++)
...@@ -430,7 +430,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) ...@@ -430,7 +430,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
/* free the copied physical distance table */ /* free the copied physical distance table */
if (phys_dist) if (phys_dist)
memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size); memblock_free(__pa(phys_dist), phys_size);
return; return;
no_emu: no_emu:
......
...@@ -280,8 +280,7 @@ void __init efi_memblock_x86_reserve_range(void) ...@@ -280,8 +280,7 @@ void __init efi_memblock_x86_reserve_range(void)
boot_params.efi_info.efi_memdesc_size; boot_params.efi_info.efi_memdesc_size;
memmap.desc_version = boot_params.efi_info.efi_memdesc_version; memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
memmap.desc_size = boot_params.efi_info.efi_memdesc_size; memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size, memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
"EFI memmap");
} }
#if EFI_DEBUG #if EFI_DEBUG
...@@ -332,8 +331,7 @@ void __init efi_reserve_boot_services(void) ...@@ -332,8 +331,7 @@ void __init efi_reserve_boot_services(void)
"[0x%010llx-0x%010llx]\n", "[0x%010llx-0x%010llx]\n",
start, start+size-1); start, start+size-1);
} else } else
memblock_x86_reserve_range(start, start+size, memblock_reserve(start, size);
"EFI Boot");
} }
} }
......
...@@ -1720,10 +1720,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, ...@@ -1720,10 +1720,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
__xen_write_cr3(true, __pa(pgd)); __xen_write_cr3(true, __pa(pgd));
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(PARAVIRT_LAZY_CPU);
memblock_x86_reserve_range(__pa(xen_start_info->pt_base), memblock_reserve(__pa(xen_start_info->pt_base),
__pa(xen_start_info->pt_base + xen_start_info->nr_pt_frames * PAGE_SIZE);
xen_start_info->nr_pt_frames * PAGE_SIZE),
"XEN PAGETABLES");
return pgd; return pgd;
} }
...@@ -1799,10 +1797,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, ...@@ -1799,10 +1797,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
PFN_DOWN(__pa(initial_page_table))); PFN_DOWN(__pa(initial_page_table)));
xen_write_cr3(__pa(initial_page_table)); xen_write_cr3(__pa(initial_page_table));
memblock_x86_reserve_range(__pa(xen_start_info->pt_base), memblock_reserve(__pa(xen_start_info->pt_base),
__pa(xen_start_info->pt_base + xen_start_info->nr_pt_frames * PAGE_SIZE));
xen_start_info->nr_pt_frames * PAGE_SIZE),
"XEN PAGETABLES");
return initial_page_table; return initial_page_table;
} }
......
...@@ -63,7 +63,7 @@ static void __init xen_add_extra_mem(unsigned long pages) ...@@ -63,7 +63,7 @@ static void __init xen_add_extra_mem(unsigned long pages)
e820_add_region(extra_start, size, E820_RAM); e820_add_region(extra_start, size, E820_RAM);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA"); memblock_reserve(extra_start, size);
xen_extra_mem_size += size; xen_extra_mem_size += size;
...@@ -287,9 +287,8 @@ char * __init xen_memory_setup(void) ...@@ -287,9 +287,8 @@ char * __init xen_memory_setup(void)
* - xen_start_info * - xen_start_info
* See comment above "struct start_info" in <xen/interface/xen.h> * See comment above "struct start_info" in <xen/interface/xen.h>
*/ */
memblock_x86_reserve_range(__pa(xen_start_info->mfn_list), memblock_reserve(__pa(xen_start_info->mfn_list),
__pa(xen_start_info->pt_base), xen_start_info->pt_base - xen_start_info->mfn_list);
"XEN START INFO");
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
......
...@@ -17,8 +17,6 @@ ...@@ -17,8 +17,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/memblock.h>
#define INIT_MEMBLOCK_REGIONS 128 #define INIT_MEMBLOCK_REGIONS 128
struct memblock_region { struct memblock_region {
......
...@@ -449,6 +449,9 @@ long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) ...@@ -449,6 +449,9 @@ long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
{ {
memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
base, base + size, (void *)_RET_IP_);
return __memblock_remove(&memblock.reserved, base, size); return __memblock_remove(&memblock.reserved, base, size);
} }
...@@ -456,6 +459,8 @@ long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) ...@@ -456,6 +459,8 @@ long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
{ {
struct memblock_type *_rgn = &memblock.reserved; struct memblock_type *_rgn = &memblock.reserved;
memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
base, base + size, (void *)_RET_IP_);
BUG_ON(0 == size); BUG_ON(0 == size);
return memblock_add_region(_rgn, base, size); return memblock_add_region(_rgn, base, size);
......
...@@ -47,7 +47,7 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, ...@@ -47,7 +47,7 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
ptr = phys_to_virt(addr); ptr = phys_to_virt(addr);
memset(ptr, 0, size); memset(ptr, 0, size);
memblock_x86_reserve_range(addr, addr + size, "BOOTMEM"); memblock_reserve(addr, size);
/* /*
* The min_count is set to 0 so that bootmem allocated blocks * The min_count is set to 0 so that bootmem allocated blocks
* are never reported as leaks. * are never reported as leaks.
...@@ -175,7 +175,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, ...@@ -175,7 +175,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size) unsigned long size)
{ {
kmemleak_free_part(__va(physaddr), size); kmemleak_free_part(__va(physaddr), size);
memblock_x86_free_range(physaddr, physaddr + size); memblock_free(physaddr, size);
} }
/** /**
...@@ -190,7 +190,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, ...@@ -190,7 +190,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
void __init free_bootmem(unsigned long addr, unsigned long size) void __init free_bootmem(unsigned long addr, unsigned long size)
{ {
kmemleak_free_part(__va(addr), size); kmemleak_free_part(__va(addr), size);
memblock_x86_free_range(addr, addr + size); memblock_free(addr, size);
} }
static void * __init ___alloc_bootmem_nopanic(unsigned long size, static void * __init ___alloc_bootmem_nopanic(unsigned long size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment