Commit 96e907d1 authored by Tejun Heo's avatar Tejun Heo Committed by H. Peter Anvin

bootmem: Reimplement __absent_pages_in_range() using for_each_mem_pfn_range()

__absent_pages_in_range() was needlessly complex.  Reimplement it
using for_each_mem_pfn_range().

Also, update zone_absent_pages_in_node() such that it doesn't call
__absent_pages_in_range() with @zone_start_pfn which is larger than
@zone_end_pfn.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310460395-30913-3-git-send-email-tj@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 5dfe8660
...@@ -4044,46 +4044,16 @@ unsigned long __meminit __absent_pages_in_range(int nid, ...@@ -4044,46 +4044,16 @@ unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn, unsigned long range_start_pfn,
unsigned long range_end_pfn) unsigned long range_end_pfn)
{ {
int i = 0; unsigned long nr_absent = range_end_pfn - range_start_pfn;
unsigned long prev_end_pfn = 0, hole_pages = 0; unsigned long start_pfn, end_pfn;
unsigned long start_pfn; int i;
/* Find the end_pfn of the first active range of pfns in the node */
i = first_active_region_index_in_nid(nid);
if (i == -1)
return 0;
prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
/* Account for ranges before physical memory on this node */
if (early_node_map[i].start_pfn > range_start_pfn)
hole_pages = prev_end_pfn - range_start_pfn;
/* Find all holes for the zone within the node */
for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
/* No need to continue if prev_end_pfn is outside the zone */
if (prev_end_pfn >= range_end_pfn)
break;
/* Make sure the end of the zone is not within the hole */
start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
prev_end_pfn = max(prev_end_pfn, range_start_pfn);
/* Update the hole size cound and move on */ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
if (start_pfn > range_start_pfn) { start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
BUG_ON(prev_end_pfn > start_pfn); end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
hole_pages += start_pfn - prev_end_pfn; nr_absent -= end_pfn - start_pfn;
} }
prev_end_pfn = early_node_map[i].end_pfn; return nr_absent;
}
/* Account for ranges past physical memory on this node */
if (range_end_pfn > prev_end_pfn)
hole_pages += range_end_pfn -
max(range_start_pfn, prev_end_pfn);
return hole_pages;
} }
/** /**
...@@ -4104,14 +4074,14 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid, ...@@ -4104,14 +4074,14 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
unsigned long zone_type, unsigned long zone_type,
unsigned long *ignored) unsigned long *ignored)
{ {
unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
unsigned long node_start_pfn, node_end_pfn; unsigned long node_start_pfn, node_end_pfn;
unsigned long zone_start_pfn, zone_end_pfn; unsigned long zone_start_pfn, zone_end_pfn;
get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
node_start_pfn); zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
node_end_pfn);
adjust_zone_range_for_zone_movable(nid, zone_type, adjust_zone_range_for_zone_movable(nid, zone_type,
node_start_pfn, node_end_pfn, node_start_pfn, node_end_pfn,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment