Commit ccaea152 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/vmemmap: Fix memory leak with vmemmap list allocation failures.

If we fail to allocate vmemmap list, we don't keep track of allocated
vmemmap block buf. Hence on section deactivate we skip vmemmap block
buf free. This results in memory leak.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200731113500.248306-1-aneesh.kumar@linux.ibm.com
parent 18102e4b
......@@ -162,7 +162,7 @@ static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
return next++;
}
static __meminit void vmemmap_list_populate(unsigned long phys,
static __meminit int vmemmap_list_populate(unsigned long phys,
unsigned long start,
int node)
{
......@@ -170,8 +170,8 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
vmem_back = vmemmap_list_alloc(node);
if (unlikely(!vmem_back)) {
WARN_ON(1);
return;
pr_debug("vmemap list allocation failed\n");
return -ENOMEM;
}
vmem_back->phys = phys;
......@@ -179,6 +179,7 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
vmem_back->list = vmemmap_list;
vmemmap_list = vmem_back;
return 0;
}
static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
......@@ -199,6 +200,7 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
bool altmap_alloc;
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
/* Align to the page size of the linear mapping. */
......@@ -228,13 +230,32 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
p = vmemmap_alloc_block_buf(page_size, node, altmap);
if (!p)
pr_debug("altmap block allocation failed, falling back to system memory");
else
altmap_alloc = true;
}
if (!p)
if (!p) {
p = vmemmap_alloc_block_buf(page_size, node, NULL);
altmap_alloc = false;
}
if (!p)
return -ENOMEM;
vmemmap_list_populate(__pa(p), start, node);
if (vmemmap_list_populate(__pa(p), start, node)) {
/*
* If we don't populate vmemap list, we don't have
* the ability to free the allocated vmemmap
* pages in section_deactivate. Hence free them
* here.
*/
int nr_pfns = page_size >> PAGE_SHIFT;
unsigned long page_order = get_order(page_size);
if (altmap_alloc)
vmem_altmap_free(altmap, nr_pfns);
else
free_pages((unsigned long)p, page_order);
return -ENOMEM;
}
pr_debug(" * %016lx..%016lx allocated at %p\n",
start, start + page_size, p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment