Commit 784656f9 authored by Tejun Heo's avatar Tejun Heo Committed by H. Peter Anvin

memblock: Reimplement memblock_add_region()

memblock_add_region() carefully checked for merge and overlap
conditions while adding a new region, which is complicated and makes
it difficult to allow arbitrary overlaps or add more merge conditions
(e.g. node ID).

This re-implements memblock_add_region() such that insertion is done
in two steps - all non-overlapping portions of new area are inserted
as separate regions first and then memblock_merge_regions() scan and
merge all neighbouring compatible regions.

This makes addition logic simpler and more versatile and enables
adding node information to memblock.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310462166-31469-3-git-send-email-tj@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent ed7b56a7
...@@ -251,117 +251,142 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) ...@@ -251,117 +251,142 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
return 0; return 0;
} }
static long __init_memblock memblock_add_region(struct memblock_type *type, /**
phys_addr_t base, phys_addr_t size) * memblock_merge_regions - merge neighboring compatible regions
* @type: memblock type to scan
*
* Scan @type and merge neighboring compatible regions.
*/
static void __init_memblock memblock_merge_regions(struct memblock_type *type)
{ {
phys_addr_t end = base + size; int i = 0;
int i, slot = -1;
/* First try and coalesce this MEMBLOCK with others */ /* cnt never goes below 1 */
for (i = 0; i < type->cnt; i++) { while (i < type->cnt - 1) {
struct memblock_region *rgn = &type->regions[i]; struct memblock_region *this = &type->regions[i];
phys_addr_t rend = rgn->base + rgn->size; struct memblock_region *next = &type->regions[i + 1];
/* Exit if there's no possible hits */ if (this->base + this->size != next->base) {
if (rgn->base > end || rgn->size == 0) BUG_ON(this->base + this->size > next->base);
break; i++;
continue;
/* Check if we are fully enclosed within an existing }
* block
*/
if (rgn->base <= base && rend >= end)
return 0;
/* Check if we overlap or are adjacent with the bottom this->size += next->size;
* of a block. memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
*/ type->cnt--;
if (base < rgn->base && end >= rgn->base) { }
/* We extend the bottom of the block down to our }
* base
*/
rgn->base = base;
rgn->size = rend - base;
/* Return if we have nothing else to allocate /**
* (fully coalesced) * memblock_insert_region - insert new memblock region
*/ * @type: memblock type to insert into
if (rend >= end) * @idx: index for the insertion point
return 0; * @base: base address of the new region
* @size: size of the new region
*
* Insert new memblock region [@base,@base+@size) into @type at @idx.
* @type must already have extra room to accomodate the new region.
*/
static void __init_memblock memblock_insert_region(struct memblock_type *type,
int idx, phys_addr_t base,
phys_addr_t size)
{
struct memblock_region *rgn = &type->regions[idx];
/* We continue processing from the end of the BUG_ON(type->cnt >= type->max);
* coalesced block. memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
*/ rgn->base = base;
base = rend; rgn->size = size;
size = end - base; type->cnt++;
} }
/* Now check if we overlap or are adjacent with the /**
* top of a block * memblock_add_region - add new memblock region
*/ * @type: memblock type to add new region into
if (base <= rend && end >= rend) { * @base: base address of the new region
/* We adjust our base down to enclose the * @size: size of the new region
* original block and destroy it. It will be *
* part of our new allocation. Since we've * Add new memblock region [@base,@base+@size) into @type. The new region
* freed an entry, we know we won't fail * is allowed to overlap with existing ones - overlaps don't affect already
* to allocate one later, so we won't risk * existing regions. @type is guaranteed to be minimal (all neighbouring
* losing the original block allocation. * compatible regions are merged) after the addition.
*/ *
size += (base - rgn->base); * RETURNS:
base = rgn->base; * 0 on success, -errno on failure.
memblock_remove_region(type, i--); */
} static long __init_memblock memblock_add_region(struct memblock_type *type,
} phys_addr_t base, phys_addr_t size)
{
bool insert = false;
phys_addr_t obase = base, end = base + size;
int i, nr_new;
/* If the array is empty, special case, replace the fake /* special case for empty array */
* filler region and return if (type->regions[0].size == 0) {
*/ WARN_ON(type->cnt != 1);
if ((type->cnt == 1) && (type->regions[0].size == 0)) {
type->regions[0].base = base; type->regions[0].base = base;
type->regions[0].size = size; type->regions[0].size = size;
return 0; return 0;
} }
repeat:
/* If we are out of space, we fail. It's too late to resize the array /*
* but then this shouldn't have happened in the first place. * The following is executed twice. Once with %false @insert and
* then with %true. The first counts the number of regions needed
* to accomodate the new area. The second actually inserts them.
*/ */
if (WARN_ON(type->cnt >= type->max)) base = obase;
return -1; nr_new = 0;
for (i = 0; i < type->cnt; i++) {
struct memblock_region *rgn = &type->regions[i];
phys_addr_t rbase = rgn->base;
phys_addr_t rend = rbase + rgn->size;
/* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ if (rbase >= end)
for (i = type->cnt - 1; i >= 0; i--) {
if (base < type->regions[i].base) {
type->regions[i+1].base = type->regions[i].base;
type->regions[i+1].size = type->regions[i].size;
} else {
type->regions[i+1].base = base;
type->regions[i+1].size = size;
slot = i + 1;
break; break;
if (rend <= base)
continue;
/*
* @rgn overlaps. If it separates the lower part of new
* area, insert that portion.
*/
if (rbase > base) {
nr_new++;
if (insert)
memblock_insert_region(type, i++, base,
rbase - base);
} }
/* area below @rend is dealt with, forget about it */
base = min(rend, end);
} }
if (base < type->regions[0].base) {
type->regions[0].base = base; /* insert the remaining portion */
type->regions[0].size = size; if (base < end) {
slot = 0; nr_new++;
if (insert)
memblock_insert_region(type, i, base, end - base);
} }
type->cnt++;
/* The array is full ? Try to resize it. If that fails, we undo /*
* our allocation and return an error * If this was the first round, resize array and repeat for actual
* insertions; otherwise, merge and return.
*/ */
if (type->cnt == type->max && memblock_double_array(type)) { if (!insert) {
BUG_ON(slot < 0); while (type->cnt + nr_new > type->max)
memblock_remove_region(type, slot); if (memblock_double_array(type) < 0)
return -1; return -ENOMEM;
insert = true;
goto repeat;
} else {
memblock_merge_regions(type);
return 0;
} }
return 0;
} }
long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
{ {
return memblock_add_region(&memblock.memory, base, size); return memblock_add_region(&memblock.memory, base, size);
} }
static long __init_memblock __memblock_remove(struct memblock_type *type, static long __init_memblock __memblock_remove(struct memblock_type *type,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment