Commit 88ba088c authored by Yinghai Lu's avatar Yinghai Lu Committed by H. Peter Anvin

x86, memblock: Add memblock_x86_register_active_regions() and memblock_x86_hole_size()

memblock_x86_register_active_regions() will be used to fill early_node_map,
the result will be memblock.memory.region AND numa data

memblock_x86_hole_size will be used to find hole size on memblock.memory.region
with specified range.
Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent 4d5cf86c
...@@ -11,4 +11,8 @@ void memblock_x86_free_range(u64 start, u64 end); ...@@ -11,4 +11,8 @@ void memblock_x86_free_range(u64 start, u64 end);
struct range; struct range;
int get_free_all_memory_range(struct range **rangep, int nodeid); int get_free_all_memory_range(struct range **rangep, int nodeid);
void memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
unsigned long last_pfn);
u64 memblock_x86_hole_size(u64 start, u64 end);
#endif #endif
...@@ -232,3 +232,69 @@ void __init memblock_x86_free_range(u64 start, u64 end) ...@@ -232,3 +232,69 @@ void __init memblock_x86_free_range(u64 start, u64 end)
memblock_free(start, end - start); memblock_free(start, end - start);
} }
/*
* Finds an active region in the address range from start_pfn to last_pfn and
* returns its range in ei_startpfn and ei_endpfn for the memblock entry.
*/
static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
unsigned long start_pfn,
unsigned long last_pfn,
unsigned long *ei_startpfn,
unsigned long *ei_endpfn)
{
u64 align = PAGE_SIZE;
*ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
*ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
/* Skip map entries smaller than a page */
if (*ei_startpfn >= *ei_endpfn)
return 0;
/* Skip if map is outside the node */
if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
return 0;
/* Check for overlaps */
if (*ei_startpfn < start_pfn)
*ei_startpfn = start_pfn;
if (*ei_endpfn > last_pfn)
*ei_endpfn = last_pfn;
return 1;
}
/* Walk the memblock.memory map and register active regions within a node */
void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
unsigned long last_pfn)
{
unsigned long ei_startpfn;
unsigned long ei_endpfn;
struct memblock_region *r;
for_each_memblock(memory, r)
if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
&ei_startpfn, &ei_endpfn))
add_active_range(nid, ei_startpfn, ei_endpfn);
}
/*
* Find the hole size (in bytes) in the memory range.
* @start: starting address of the memory range to scan
* @end: ending address of the memory range to scan
*/
u64 __init memblock_x86_hole_size(u64 start, u64 end)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long last_pfn = end >> PAGE_SHIFT;
unsigned long ei_startpfn, ei_endpfn, ram = 0;
struct memblock_region *r;
for_each_memblock(memory, r)
if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
&ei_startpfn, &ei_endpfn))
ram += ei_endpfn - ei_startpfn;
return end - start - ((u64)ram << PAGE_SHIFT);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment