Commit 9ec23a7f authored by Ian Campbell's avatar Ian Campbell Committed by Jeremy Fitzhardinge

xen: do not release any memory under 1M in domain 0

We already deliberately setup a 1-1 P2M for the region up to 1M in
order to allow code which assumes this region is already mapped to
work without having to convert everything to ioremap.

Domain 0 should not return any apparently unused memory regions
(reserved or otherwise) in this region to Xen since the e820 may not
accurately reflect what the BIOS has stashed in this region.
Signed-off-by: default avatarIan Campbell <ian.campbell@citrix.com>
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
parent 6903591f
...@@ -118,16 +118,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, ...@@ -118,16 +118,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
const struct e820map *e820) const struct e820map *e820)
{ {
phys_addr_t max_addr = PFN_PHYS(max_pfn); phys_addr_t max_addr = PFN_PHYS(max_pfn);
phys_addr_t last_end = 0; phys_addr_t last_end = ISA_END_ADDRESS;
unsigned long released = 0; unsigned long released = 0;
int i; int i;
/* Free any unused memory above the low 1Mbyte. */
for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
phys_addr_t end = e820->map[i].addr; phys_addr_t end = e820->map[i].addr;
end = min(max_addr, end); end = min(max_addr, end);
if (last_end < end)
released += xen_release_chunk(last_end, end); released += xen_release_chunk(last_end, end);
last_end = e820->map[i].addr + e820->map[i].size; last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
} }
if (last_end < max_addr) if (last_end < max_addr)
...@@ -164,6 +166,7 @@ char * __init xen_memory_setup(void) ...@@ -164,6 +166,7 @@ char * __init xen_memory_setup(void)
XENMEM_memory_map; XENMEM_memory_map;
rc = HYPERVISOR_memory_op(op, &memmap); rc = HYPERVISOR_memory_op(op, &memmap);
if (rc == -ENOSYS) { if (rc == -ENOSYS) {
BUG_ON(xen_initial_domain());
memmap.nr_entries = 1; memmap.nr_entries = 1;
map[0].addr = 0ULL; map[0].addr = 0ULL;
map[0].size = mem_end; map[0].size = mem_end;
...@@ -201,12 +204,13 @@ char * __init xen_memory_setup(void) ...@@ -201,12 +204,13 @@ char * __init xen_memory_setup(void)
} }
/* /*
* Even though this is normal, usable memory under Xen, reserve * In domU, the ISA region is normal, usable memory, but we
* ISA memory anyway because too many things think they can poke * reserve ISA memory anyway because too many things poke
* about in there. * about in there.
* *
* In a dom0 kernel, this region is identity mapped with the * In Dom0, the host E820 information can leave gaps in the
* hardware ISA area, so it really is out of bounds. * ISA range, which would cause us to release those pages. To
* avoid this, we unconditionally reserve them here.
*/ */
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
E820_RESERVED); E820_RESERVED);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment