Commit e6498040 authored by Tejun Heo's avatar Tejun Heo Committed by H. Peter Anvin

memblock: Separate out memblock_find_in_range_node()

Node affine memblock allocation logic is currently implemented across
memblock_alloc_nid() and memblock_alloc_nid_region().  This
reorganizes it such that it resembles that of non-NUMA allocation API.

Area finding is collected and moved into new exported function
memblock_find_in_range_node() which is symmetrical to non-NUMA
counterpart - it handles @start/@end and understands ANYWHERE and
ACCESSIBLE.  memblock_alloc_nid() now simply calls
memblock_find_in_range_node() and reserves the returned area.

This makes memblock_alloc[_try]_nid() observe ACCESSIBLE limit on node
affine allocations too (again, this doesn't make any difference for
the current sole user - sparc64).
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310460395-30913-8-git-send-email-tj@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 34e18455
...@@ -61,6 +61,10 @@ extern long memblock_reserve(phys_addr_t base, phys_addr_t size); ...@@ -61,6 +61,10 @@ extern long memblock_reserve(phys_addr_t base, phys_addr_t size);
/* The numa aware allocator is only available if /* The numa aware allocator is only available if
* CONFIG_ARCH_POPULATES_NODE_MAP is set * CONFIG_ARCH_POPULATES_NODE_MAP is set
*/ */
extern phys_addr_t memblock_find_in_range_node(phys_addr_t start,
phys_addr_t end,
phys_addr_t size,
phys_addr_t align, int nid);
extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align,
int nid); int nid);
extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
......
...@@ -521,49 +521,56 @@ static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start, ...@@ -521,49 +521,56 @@ static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start,
return start; return start;
} }
static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, phys_addr_t __init memblock_find_in_range_node(phys_addr_t start,
phys_addr_t end,
phys_addr_t size, phys_addr_t size,
phys_addr_t align, int nid) phys_addr_t align, int nid)
{ {
phys_addr_t start, end; struct memblock_type *mem = &memblock.memory;
int i;
BUG_ON(0 == size);
/* Pump up max_addr */
if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
end = memblock.current_limit;
start = mp->base; for (i = mem->cnt - 1; i >= 0; i--) {
end = start + mp->size; struct memblock_region *r = &mem->regions[i];
phys_addr_t base = max(start, r->base);
phys_addr_t top = min(end, r->base + r->size);
while (start < end) { while (base < top) {
phys_addr_t this_start; phys_addr_t tbase, ret;
int this_nid; int tnid;
this_start = memblock_nid_range_rev(start, end, &this_nid); tbase = memblock_nid_range_rev(base, top, &tnid);
if (this_nid == nid) { if (nid == MAX_NUMNODES || tnid == nid) {
phys_addr_t ret = memblock_find_region(this_start, end, size, align); ret = memblock_find_region(tbase, top, size, align);
if (ret && if (ret)
!memblock_add_region(&memblock.reserved, ret, size))
return ret; return ret;
} }
end = this_start; top = tbase;
}
} }
return 0; return 0;
} }
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
{ {
struct memblock_type *mem = &memblock.memory; phys_addr_t found;
int i;
BUG_ON(0 == size);
/* We align the size to limit fragmentation. Without this, a lot of /*
* We align the size to limit fragmentation. Without this, a lot of
* small allocs quickly eat up the whole reserve array on sparc * small allocs quickly eat up the whole reserve array on sparc
*/ */
size = round_up(size, align); size = round_up(size, align);
for (i = mem->cnt - 1; i >= 0; i--) { found = memblock_find_in_range_node(0, MEMBLOCK_ALLOC_ACCESSIBLE,
phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
size, align, nid); size, align, nid);
if (ret) if (found && !memblock_add_region(&memblock.reserved, found, size))
return ret; return found;
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment