Commit 0608f70c authored by Tejun Heo's avatar Tejun Heo Committed by H. Peter Anvin

x86: Use HAVE_MEMBLOCK_NODE_MAP

From 5732e1247898d67cbf837585150fe9f68974671d Mon Sep 17 00:00:00 2001
From: Tejun Heo <tj@kernel.org>
Date: Thu, 14 Jul 2011 11:22:16 +0200

Convert x86 to HAVE_MEMBLOCK_NODE_MAP.  The only difference in memory
handling is that allocations can't no longer cross node boundaries
whether they're node affine or not, which shouldn't matter at all.

This conversion will enable further simplification of boot memory
handling.

-v2: Fix build failure on !NUMA configurations discovered by hpa.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20110714094423.GG3455@htj.dyndns.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 7c0caeb8
...@@ -25,6 +25,7 @@ config X86 ...@@ -25,6 +25,7 @@ config X86
select HAVE_IOREMAP_PROT select HAVE_IOREMAP_PROT
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS select HAVE_DMA_ATTRS
......
...@@ -12,8 +12,6 @@ int __get_free_all_memory_range(struct range **range, int nodeid, ...@@ -12,8 +12,6 @@ int __get_free_all_memory_range(struct range **range, int nodeid,
unsigned long start_pfn, unsigned long end_pfn); unsigned long start_pfn, unsigned long end_pfn);
int get_free_all_memory_range(struct range **rangep, int nodeid); int get_free_all_memory_range(struct range **rangep, int nodeid);
void memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
unsigned long last_pfn);
u64 memblock_x86_hole_size(u64 start, u64 end); u64 memblock_x86_hole_size(u64 start, u64 end);
u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
u64 memblock_x86_memory_in_range(u64 addr, u64 limit); u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
......
...@@ -650,18 +650,18 @@ void __init initmem_init(void) ...@@ -650,18 +650,18 @@ void __init initmem_init(void)
highstart_pfn = highend_pfn = max_pfn; highstart_pfn = highend_pfn = max_pfn;
if (max_pfn > max_low_pfn) if (max_pfn > max_low_pfn)
highstart_pfn = max_low_pfn; highstart_pfn = max_low_pfn;
memblock_x86_register_active_regions(0, 0, highend_pfn);
sparse_memory_present_with_active_regions(0);
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
pages_to_mb(highend_pfn - highstart_pfn)); pages_to_mb(highend_pfn - highstart_pfn));
num_physpages = highend_pfn; num_physpages = highend_pfn;
high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
#else #else
memblock_x86_register_active_regions(0, 0, max_low_pfn);
sparse_memory_present_with_active_regions(0);
num_physpages = max_low_pfn; num_physpages = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif #endif
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
sparse_memory_present_with_active_regions(0);
#ifdef CONFIG_FLATMEM #ifdef CONFIG_FLATMEM
max_mapnr = num_physpages; max_mapnr = num_physpages;
#endif #endif
......
...@@ -607,7 +607,7 @@ kernel_physical_mapping_init(unsigned long start, ...@@ -607,7 +607,7 @@ kernel_physical_mapping_init(unsigned long start,
#ifndef CONFIG_NUMA #ifndef CONFIG_NUMA
void __init initmem_init(void) void __init initmem_init(void)
{ {
memblock_x86_register_active_regions(0, 0, max_pfn); memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
} }
#endif #endif
......
...@@ -283,20 +283,6 @@ static int __init memblock_x86_find_active_region(const struct memblock_region * ...@@ -283,20 +283,6 @@ static int __init memblock_x86_find_active_region(const struct memblock_region *
return 1; return 1;
} }
/* Walk the memblock.memory map and register active regions within a node */
void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
unsigned long last_pfn)
{
unsigned long ei_startpfn;
unsigned long ei_endpfn;
struct memblock_region *r;
for_each_memblock(memory, r)
if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
&ei_startpfn, &ei_endpfn))
add_active_range(nid, ei_startpfn, ei_endpfn);
}
/* /*
* Find the hole size (in bytes) in the memory range. * Find the hole size (in bytes) in the memory range.
* @start: starting address of the memory range to scan * @start: starting address of the memory range to scan
......
...@@ -498,13 +498,10 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) ...@@ -498,13 +498,10 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
if (WARN_ON(nodes_empty(node_possible_map))) if (WARN_ON(nodes_empty(node_possible_map)))
return -EINVAL; return -EINVAL;
for (i = 0; i < mi->nr_blks; i++) for (i = 0; i < mi->nr_blks; i++) {
memblock_x86_register_active_regions(mi->blk[i].nid, struct numa_memblk *mb = &mi->blk[i];
mi->blk[i].start >> PAGE_SHIFT, memblock_set_node(mb->start, mb->end - mb->start, mb->nid);
mi->blk[i].end >> PAGE_SHIFT); }
/* for out of order entries */
sort_node_map();
/* /*
* If sections array is gonna be used for pfn -> nid mapping, check * If sections array is gonna be used for pfn -> nid mapping, check
...@@ -538,6 +535,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) ...@@ -538,6 +535,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
setup_node_data(nid, start, end); setup_node_data(nid, start, end);
} }
/* Dump memblock with node info and return. */
memblock_dump_all();
return 0; return 0;
} }
...@@ -575,7 +574,7 @@ static int __init numa_init(int (*init_func)(void)) ...@@ -575,7 +574,7 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_possible_map); nodes_clear(node_possible_map);
nodes_clear(node_online_map); nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo)); memset(&numa_meminfo, 0, sizeof(numa_meminfo));
remove_all_active_ranges(); WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
numa_reset_distance(); numa_reset_distance();
ret = init_func(); ret = init_func();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment