Commit 1f0324ca authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'stable/bug-fixes-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

* 'stable/bug-fixes-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen/setup: Route halt operations to safe_halt pvop.
  xen/e820: Guard against E820_RAM not having page-aligned size or start.
  xen/p2m: Mark INVALID_P2M_ENTRY the mfn_list past max_pfn.
parents 0b6bb66d 23febedd
...@@ -241,20 +241,14 @@ void __init xen_build_dynamic_phys_to_machine(void) ...@@ -241,20 +241,14 @@ void __init xen_build_dynamic_phys_to_machine(void)
* As long as the mfn_list has enough entries to completely * As long as the mfn_list has enough entries to completely
* fill a p2m page, pointing into the array is ok. But if * fill a p2m page, pointing into the array is ok. But if
* not the entries beyond the last pfn will be undefined. * not the entries beyond the last pfn will be undefined.
* And guessing that the 'what-ever-there-is' does not take it
* too kindly when changing it to invalid markers, a new page
* is allocated, initialized and filled with the valid part.
*/ */
if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) { if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
unsigned long p2midx; unsigned long p2midx;
unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
p2m_init(p2m);
for (p2midx = 0; pfn + p2midx < max_pfn; p2midx++) { p2midx = max_pfn % P2M_PER_PAGE;
p2m[p2midx] = mfn_list[pfn + p2midx]; for ( ; p2midx < P2M_PER_PAGE; p2midx++)
mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
} }
p2m_top[topidx][mididx] = p2m;
} else
p2m_top[topidx][mididx] = &mfn_list[pfn]; p2m_top[topidx][mididx] = &mfn_list[pfn];
} }
......
...@@ -179,8 +179,13 @@ char * __init xen_memory_setup(void) ...@@ -179,8 +179,13 @@ char * __init xen_memory_setup(void)
e820.nr_map = 0; e820.nr_map = 0;
xen_extra_mem_start = mem_end; xen_extra_mem_start = mem_end;
for (i = 0; i < memmap.nr_entries; i++) { for (i = 0; i < memmap.nr_entries; i++) {
unsigned long long end = map[i].addr + map[i].size; unsigned long long end;
/* Guard against non-page aligned E820 entries. */
if (map[i].type == E820_RAM)
map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE;
end = map[i].addr + map[i].size;
if (map[i].type == E820_RAM && end > mem_end) { if (map[i].type == E820_RAM && end > mem_end) {
/* RAM off the end - may be partially included */ /* RAM off the end - may be partially included */
u64 delta = min(map[i].size, end - mem_end); u64 delta = min(map[i].size, end - mem_end);
...@@ -350,6 +355,7 @@ void __init xen_arch_setup(void) ...@@ -350,6 +355,7 @@ void __init xen_arch_setup(void)
boot_cpu_data.hlt_works_ok = 1; boot_cpu_data.hlt_works_ok = 1;
#endif #endif
pm_idle = default_idle; pm_idle = default_idle;
boot_option_idle_override = IDLE_HALT;
fiddle_vdso(); fiddle_vdso();
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment