Commit 5097cdf6 authored by Juergen Gross's avatar Juergen Gross Committed by David Vrabel

xen: split counting of extra memory pages from remapping

Memory pages in the initial memory setup done by the Xen hypervisor
conflicting with the target E820 map are remapped. In order to do this
those pages are counted and remapped in xen_set_identity_and_remap().

Split the counting from the remapping operation to be able to setup
the needed memory sizes in time but doing the remap operation at a
later time. This enables us to simplify the interface to
xen_set_identity_and_remap() as the number of remapped and released
pages is no longer needed here.

Finally move the remapping further down to prepare relocating
conflicting memory contents before the memory might be clobbered by
xen_set_identity_and_remap(). This requires to not destroy the Xen
E820 map when the one for the system is being constructed.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Acked-by: default avatarKonrad Rzeszutek Wilk <Konrad.wilk@oracle.com>
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
parent 69632ecf
...@@ -223,7 +223,7 @@ static int __init xen_free_mfn(unsigned long mfn) ...@@ -223,7 +223,7 @@ static int __init xen_free_mfn(unsigned long mfn)
* as a fallback if the remapping fails. * as a fallback if the remapping fails.
*/ */
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages, unsigned long *released) unsigned long end_pfn, unsigned long nr_pages)
{ {
unsigned long pfn, end; unsigned long pfn, end;
int ret; int ret;
...@@ -243,7 +243,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, ...@@ -243,7 +243,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
if (ret == 1) { if (ret == 1) {
(*released)++; xen_released_pages++;
if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY)) if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
break; break;
} else } else
...@@ -359,8 +359,7 @@ static void __init xen_do_set_identity_and_remap_chunk( ...@@ -359,8 +359,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
*/ */
static unsigned long __init xen_set_identity_and_remap_chunk( static unsigned long __init xen_set_identity_and_remap_chunk(
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
unsigned long remap_pfn, unsigned long *released, unsigned long remap_pfn)
unsigned long *remapped)
{ {
unsigned long pfn; unsigned long pfn;
unsigned long i = 0; unsigned long i = 0;
...@@ -385,7 +384,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -385,7 +384,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
if (!remap_range_size) { if (!remap_range_size) {
pr_warning("Unable to find available pfn range, not remapping identity pages\n"); pr_warning("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn, xen_set_identity_and_release_chunk(cur_pfn,
cur_pfn + left, nr_pages, released); cur_pfn + left, nr_pages);
break; break;
} }
/* Adjust size to fit in current e820 RAM region */ /* Adjust size to fit in current e820 RAM region */
...@@ -397,7 +396,6 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -397,7 +396,6 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
/* Update variables to reflect new mappings. */ /* Update variables to reflect new mappings. */
i += size; i += size;
remap_pfn += size; remap_pfn += size;
*remapped += size;
} }
/* /*
...@@ -412,14 +410,11 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -412,14 +410,11 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
return remap_pfn; return remap_pfn;
} }
static void __init xen_set_identity_and_remap(unsigned long nr_pages, static void __init xen_set_identity_and_remap(unsigned long nr_pages)
unsigned long *released, unsigned long *remapped)
{ {
phys_addr_t start = 0; phys_addr_t start = 0;
unsigned long last_pfn = nr_pages; unsigned long last_pfn = nr_pages;
const struct e820entry *entry = xen_e820_map; const struct e820entry *entry = xen_e820_map;
unsigned long num_released = 0;
unsigned long num_remapped = 0;
int i; int i;
/* /*
...@@ -445,16 +440,12 @@ static void __init xen_set_identity_and_remap(unsigned long nr_pages, ...@@ -445,16 +440,12 @@ static void __init xen_set_identity_and_remap(unsigned long nr_pages,
if (start_pfn < end_pfn) if (start_pfn < end_pfn)
last_pfn = xen_set_identity_and_remap_chunk( last_pfn = xen_set_identity_and_remap_chunk(
start_pfn, end_pfn, nr_pages, start_pfn, end_pfn, nr_pages,
last_pfn, &num_released, last_pfn);
&num_remapped);
start = end; start = end;
} }
} }
*released = num_released; pr_info("Released %ld page(s)\n", xen_released_pages);
*remapped = num_remapped;
pr_info("Released %ld page(s)\n", num_released);
} }
/* /*
...@@ -560,6 +551,28 @@ static void __init xen_ignore_unusable(void) ...@@ -560,6 +551,28 @@ static void __init xen_ignore_unusable(void)
} }
} }
static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
{
unsigned long extra = 0;
const struct e820entry *entry = xen_e820_map;
int i;
for (i = 0; i < xen_e820_map_entries; i++, entry++) {
unsigned long start_pfn = PFN_DOWN(entry->addr);
unsigned long end_pfn = PFN_UP(entry->addr + entry->size);
if (start_pfn >= max_pfn)
break;
if (entry->type == E820_RAM)
continue;
if (end_pfn >= max_pfn)
end_pfn = max_pfn;
extra += end_pfn - start_pfn;
}
return extra;
}
/* /*
* Reserve Xen mfn_list. * Reserve Xen mfn_list.
* See comment above "struct start_info" in <xen/interface/xen.h> * See comment above "struct start_info" in <xen/interface/xen.h>
...@@ -601,12 +614,12 @@ static void __init xen_reserve_xen_mfnlist(void) ...@@ -601,12 +614,12 @@ static void __init xen_reserve_xen_mfnlist(void)
char * __init xen_memory_setup(void) char * __init xen_memory_setup(void)
{ {
unsigned long max_pfn = xen_start_info->nr_pages; unsigned long max_pfn = xen_start_info->nr_pages;
phys_addr_t mem_end; phys_addr_t mem_end, addr, size, chunk_size;
u32 type;
int rc; int rc;
struct xen_memory_map memmap; struct xen_memory_map memmap;
unsigned long max_pages; unsigned long max_pages;
unsigned long extra_pages = 0; unsigned long extra_pages = 0;
unsigned long remapped_pages;
int i; int i;
int op; int op;
...@@ -653,15 +666,8 @@ char * __init xen_memory_setup(void) ...@@ -653,15 +666,8 @@ char * __init xen_memory_setup(void)
if (max_pages > max_pfn) if (max_pages > max_pfn)
extra_pages += max_pages - max_pfn; extra_pages += max_pages - max_pfn;
/* /* How many extra pages do we need due to remapping? */
* Set identity map on non-RAM pages and prepare remapping the extra_pages += xen_count_remap_pages(max_pfn);
* underlying RAM.
*/
xen_set_identity_and_remap(max_pfn, &xen_released_pages,
&remapped_pages);
extra_pages += xen_released_pages;
extra_pages += remapped_pages;
/* /*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
...@@ -677,29 +683,35 @@ char * __init xen_memory_setup(void) ...@@ -677,29 +683,35 @@ char * __init xen_memory_setup(void)
extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages); extra_pages);
i = 0; i = 0;
addr = xen_e820_map[0].addr;
size = xen_e820_map[0].size;
while (i < xen_e820_map_entries) { while (i < xen_e820_map_entries) {
phys_addr_t addr = xen_e820_map[i].addr; chunk_size = size;
phys_addr_t size = xen_e820_map[i].size; type = xen_e820_map[i].type;
u32 type = xen_e820_map[i].type;
if (type == E820_RAM) { if (type == E820_RAM) {
if (addr < mem_end) { if (addr < mem_end) {
size = min(size, mem_end - addr); chunk_size = min(size, mem_end - addr);
} else if (extra_pages) { } else if (extra_pages) {
size = min(size, PFN_PHYS(extra_pages)); chunk_size = min(size, PFN_PHYS(extra_pages));
extra_pages -= PFN_DOWN(size); extra_pages -= PFN_DOWN(chunk_size);
xen_add_extra_mem(addr, size); xen_add_extra_mem(addr, chunk_size);
xen_max_p2m_pfn = PFN_DOWN(addr + size); xen_max_p2m_pfn = PFN_DOWN(addr + chunk_size);
} else } else
type = E820_UNUSABLE; type = E820_UNUSABLE;
} }
xen_align_and_add_e820_region(addr, size, type); xen_align_and_add_e820_region(addr, chunk_size, type);
xen_e820_map[i].addr += size; addr += chunk_size;
xen_e820_map[i].size -= size; size -= chunk_size;
if (xen_e820_map[i].size == 0) if (size == 0) {
i++; i++;
if (i < xen_e820_map_entries) {
addr = xen_e820_map[i].addr;
size = xen_e820_map[i].size;
}
}
} }
/* /*
...@@ -709,7 +721,7 @@ char * __init xen_memory_setup(void) ...@@ -709,7 +721,7 @@ char * __init xen_memory_setup(void)
* PFNs above MAX_P2M_PFN are considered identity mapped as * PFNs above MAX_P2M_PFN are considered identity mapped as
* well. * well.
*/ */
set_phys_range_identity(xen_e820_map[i - 1].addr / PAGE_SIZE, ~0ul); set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
/* /*
* In domU, the ISA region is normal, usable memory, but we * In domU, the ISA region is normal, usable memory, but we
...@@ -723,6 +735,12 @@ char * __init xen_memory_setup(void) ...@@ -723,6 +735,12 @@ char * __init xen_memory_setup(void)
xen_reserve_xen_mfnlist(); xen_reserve_xen_mfnlist();
/*
* Set identity map on non-RAM pages and prepare remapping the
* underlying RAM.
*/
xen_set_identity_and_remap(max_pfn);
return "Xen"; return "Xen";
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment