Commit 4a654294 authored by Gerald Schaefer's avatar Gerald Schaefer Committed by Martin Schwidefsky

s390/mm: fix zone calculation in arch_add_memory()

Standby (hotplug) memory should be added to ZONE_MOVABLE on s390. After
commit 199071f1 "s390/mm: make arch_add_memory() NUMA aware",
arch_add_memory() used memblock_end_of_DRAM() to find out the end of
ZONE_NORMAL and the beginning of ZONE_MOVABLE. However, commit 7f36e3e5
"memory-hotplug: add hot-added memory ranges to memblock before allocate
node_data for a node." moved the call of memblock_add_node() before
the call of arch_add_memory() in add_memory_resource(), and thus changed
the return value of memblock_end_of_DRAM() when called in
arch_add_memory(). As a result, arch_add_memory() will think that all
memory blocks should be added to ZONE_NORMAL.

Fix this by changing the logic in arch_add_memory() so that it will
manually iterate over all zones of a given node to find out which zone
a memory block should be added to.
Reviewed-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 47ece7fe
...@@ -151,36 +151,40 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) ...@@ -151,36 +151,40 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool for_device) int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{ {
unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
unsigned long start_pfn = PFN_DOWN(start); unsigned long start_pfn = PFN_DOWN(start);
unsigned long size_pages = PFN_DOWN(size); unsigned long size_pages = PFN_DOWN(size);
unsigned long nr_pages; pg_data_t *pgdat = NODE_DATA(nid);
int rc, zone_enum; struct zone *zone;
int rc, i;
rc = vmem_add_mapping(start, size); rc = vmem_add_mapping(start, size);
if (rc) if (rc)
return rc; return rc;
while (size_pages > 0) { for (i = 0; i < MAX_NR_ZONES; i++) {
if (start_pfn < dma_end_pfn) { zone = pgdat->node_zones + i;
nr_pages = (start_pfn + size_pages > dma_end_pfn) ? if (zone_idx(zone) != ZONE_MOVABLE) {
dma_end_pfn - start_pfn : size_pages; /* Add range within existing zone limits, if possible */
zone_enum = ZONE_DMA; zone_start_pfn = zone->zone_start_pfn;
} else if (start_pfn < normal_end_pfn) { zone_end_pfn = zone->zone_start_pfn +
nr_pages = (start_pfn + size_pages > normal_end_pfn) ? zone->spanned_pages;
normal_end_pfn - start_pfn : size_pages;
zone_enum = ZONE_NORMAL;
} else { } else {
nr_pages = size_pages; /* Add remaining range to ZONE_MOVABLE */
zone_enum = ZONE_MOVABLE; zone_start_pfn = start_pfn;
zone_end_pfn = start_pfn + size_pages;
} }
rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum, if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
start_pfn, size_pages); continue;
nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
zone_end_pfn - start_pfn : size_pages;
rc = __add_pages(nid, zone, start_pfn, nr_pages);
if (rc) if (rc)
break; break;
start_pfn += nr_pages; start_pfn += nr_pages;
size_pages -= nr_pages; size_pages -= nr_pages;
if (!size_pages)
break;
} }
if (rc) if (rc)
vmem_remove_mapping(start, size); vmem_remove_mapping(start, size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment