Commit cf0923ea authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Thomas Gleixner

xen: efficiently support a holey p2m table

When using sparsemem and memory hotplug, the kernel's pseudo-physical
address space can be discontigious.  Previously this was dealt with by
having the upper parts of the radix tree stubbed off.  Unfortunately,
this is incompatible with save/restore, which requires a complete p2m
table.

The solution is to have a special distinguished all-invalid p2m leaf
page, which we can point all the hole areas at.  This allows the tools
to see a complete p2m table, but it only costs a page for all memory
holes.

It also simplifies the code since it removes a few special cases.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 8006ec3e
...@@ -57,8 +57,17 @@ ...@@ -57,8 +57,17 @@
#include "mmu.h" #include "mmu.h"
#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) #define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
static unsigned long *p2m_top[MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE]; /* Placeholder for holes in the address space */
static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE]
__attribute__((section(".data.page_aligned"))) =
{ [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
/* Array of pointers to pages containing p2m entries */
static unsigned long *p2m_top[TOP_ENTRIES]
__attribute__((section(".data.page_aligned"))) =
{ [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
static inline unsigned p2m_top_index(unsigned long pfn) static inline unsigned p2m_top_index(unsigned long pfn)
{ {
...@@ -92,9 +101,6 @@ unsigned long get_phys_to_machine(unsigned long pfn) ...@@ -92,9 +101,6 @@ unsigned long get_phys_to_machine(unsigned long pfn)
return INVALID_P2M_ENTRY; return INVALID_P2M_ENTRY;
topidx = p2m_top_index(pfn); topidx = p2m_top_index(pfn);
if (p2m_top[topidx] == NULL)
return INVALID_P2M_ENTRY;
idx = p2m_index(pfn); idx = p2m_index(pfn);
return p2m_top[topidx][idx]; return p2m_top[topidx][idx];
} }
...@@ -110,7 +116,7 @@ static void alloc_p2m(unsigned long **pp) ...@@ -110,7 +116,7 @@ static void alloc_p2m(unsigned long **pp)
for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++) for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
p[i] = INVALID_P2M_ENTRY; p[i] = INVALID_P2M_ENTRY;
if (cmpxchg(pp, NULL, p) != NULL) if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
free_page((unsigned long)p); free_page((unsigned long)p);
} }
...@@ -129,7 +135,7 @@ void set_phys_to_machine(unsigned long pfn, unsigned long mfn) ...@@ -129,7 +135,7 @@ void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
} }
topidx = p2m_top_index(pfn); topidx = p2m_top_index(pfn);
if (p2m_top[topidx] == NULL) { if (p2m_top[topidx] == p2m_missing) {
/* no need to allocate a page to store an invalid entry */ /* no need to allocate a page to store an invalid entry */
if (mfn == INVALID_P2M_ENTRY) if (mfn == INVALID_P2M_ENTRY)
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment