Commit dd14be92 authored by Juergen Gross's avatar Juergen Gross Committed by David Vrabel

xen: use same main loop for counting and remapping pages

Instead of having two functions for cycling through the E820 map in
order to count to be remapped pages and remap them later, just use one
function with a caller supplied sub-function called for each region to
be processed. This eliminates the possibility of a mismatch between
both loops which showed up in certain configurations.
Suggested-by: default avatarEd Swierk <eswierk@skyportsystems.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
parent f0f39387
...@@ -393,6 +393,9 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -393,6 +393,9 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
unsigned long i = 0; unsigned long i = 0;
unsigned long n = end_pfn - start_pfn; unsigned long n = end_pfn - start_pfn;
if (remap_pfn == 0)
remap_pfn = nr_pages;
while (i < n) { while (i < n) {
unsigned long cur_pfn = start_pfn + i; unsigned long cur_pfn = start_pfn + i;
unsigned long left = n - i; unsigned long left = n - i;
...@@ -438,17 +441,29 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -438,17 +441,29 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
return remap_pfn; return remap_pfn;
} }
static void __init xen_set_identity_and_remap(unsigned long nr_pages) static unsigned long __init xen_count_remap_pages(
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
unsigned long remap_pages)
{
if (start_pfn >= nr_pages)
return remap_pages;
return remap_pages + min(end_pfn, nr_pages) - start_pfn;
}
static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
unsigned long nr_pages, unsigned long last_val))
{ {
phys_addr_t start = 0; phys_addr_t start = 0;
unsigned long last_pfn = nr_pages; unsigned long ret_val = 0;
const struct e820entry *entry = xen_e820_map; const struct e820entry *entry = xen_e820_map;
int i; int i;
/* /*
* Combine non-RAM regions and gaps until a RAM region (or the * Combine non-RAM regions and gaps until a RAM region (or the
* end of the map) is reached, then set the 1:1 map and * end of the map) is reached, then call the provided function
* remap the memory in those non-RAM regions. * to perform its duty on the non-RAM region.
* *
* The combined non-RAM regions are rounded to a whole number * The combined non-RAM regions are rounded to a whole number
* of pages so any partial pages are accessible via the 1:1 * of pages so any partial pages are accessible via the 1:1
...@@ -466,14 +481,13 @@ static void __init xen_set_identity_and_remap(unsigned long nr_pages) ...@@ -466,14 +481,13 @@ static void __init xen_set_identity_and_remap(unsigned long nr_pages)
end_pfn = PFN_UP(entry->addr); end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn) if (start_pfn < end_pfn)
last_pfn = xen_set_identity_and_remap_chunk( ret_val = func(start_pfn, end_pfn, nr_pages,
start_pfn, end_pfn, nr_pages, ret_val);
last_pfn);
start = end; start = end;
} }
} }
pr_info("Released %ld page(s)\n", xen_released_pages); return ret_val;
} }
/* /*
...@@ -596,35 +610,6 @@ static void __init xen_ignore_unusable(void) ...@@ -596,35 +610,6 @@ static void __init xen_ignore_unusable(void)
} }
} }
static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
{
unsigned long extra = 0;
unsigned long start_pfn, end_pfn;
const struct e820entry *entry = xen_e820_map;
int i;
end_pfn = 0;
for (i = 0; i < xen_e820_map_entries; i++, entry++) {
start_pfn = PFN_DOWN(entry->addr);
/* Adjacent regions on non-page boundaries handling! */
end_pfn = min(end_pfn, start_pfn);
if (start_pfn >= max_pfn)
return extra + max_pfn - end_pfn;
/* Add any holes in map to result. */
extra += start_pfn - end_pfn;
end_pfn = PFN_UP(entry->addr + entry->size);
end_pfn = min(end_pfn, max_pfn);
if (entry->type != E820_RAM)
extra += end_pfn - start_pfn;
}
return extra;
}
bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
{ {
struct e820entry *entry; struct e820entry *entry;
...@@ -804,7 +789,7 @@ char * __init xen_memory_setup(void) ...@@ -804,7 +789,7 @@ char * __init xen_memory_setup(void)
max_pages = xen_get_max_pages(); max_pages = xen_get_max_pages();
/* How many extra pages do we need due to remapping? */ /* How many extra pages do we need due to remapping? */
max_pages += xen_count_remap_pages(max_pfn); max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
if (max_pages > max_pfn) if (max_pages > max_pfn)
extra_pages += max_pages - max_pfn; extra_pages += max_pages - max_pfn;
...@@ -922,7 +907,9 @@ char * __init xen_memory_setup(void) ...@@ -922,7 +907,9 @@ char * __init xen_memory_setup(void)
* Set identity map on non-RAM pages and prepare remapping the * Set identity map on non-RAM pages and prepare remapping the
* underlying RAM. * underlying RAM.
*/ */
xen_set_identity_and_remap(max_pfn); xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
pr_info("Released %ld page(s)\n", xen_released_pages);
return "Xen"; return "Xen";
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment