Commit 9a4029fd authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Jeremy Fitzhardinge

xen: ignore RW mapping of RO pages in pagetable_init

When setting up the initial pagetable, which includes mappings of all
low physical memory, ignore a mapping which tries to set the RW bit on
an RO pte.  An RO pte indicates a page which is part of the current
pagetable, and so it cannot be allowed to become RW.

Once xen_pagetable_setup_done is called, set_pte reverts to its normal
behaviour.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy@xensource.com>
Acked-by: default avatarChris Wright <chrisw@sous-sol.org>
Cc: ebiederm@xmission.com (Eric W. Biederman)
parent f4f97b3e
...@@ -505,7 +505,7 @@ static void xen_write_cr3(unsigned long cr3) ...@@ -505,7 +505,7 @@ static void xen_write_cr3(unsigned long cr3)
/* Early in boot, while setting up the initial pagetable, assume /* Early in boot, while setting up the initial pagetable, assume
everything is pinned. */ everything is pinned. */
static void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn) static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
{ {
BUG_ON(mem_map); /* should only be used early */ BUG_ON(mem_map); /* should only be used early */
make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
...@@ -557,10 +557,32 @@ static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) ...@@ -557,10 +557,32 @@ static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
} }
#endif #endif
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
{
/* If there's an existing pte, then don't allow _PAGE_RW to be set */
if (pte_val_ma(*ptep) & _PAGE_PRESENT)
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
pte_val_ma(pte));
return pte;
}
/* Init-time set_pte while constructing initial pagetables, which
doesn't allow RO pagetable pages to be remapped RW */
static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
{
pte = mask_rw_pte(ptep, pte);
xen_set_pte(ptep, pte);
}
static __init void xen_pagetable_setup_start(pgd_t *base) static __init void xen_pagetable_setup_start(pgd_t *base)
{ {
pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base; pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
/* special set_pte for pagetable initialization */
paravirt_ops.set_pte = xen_set_pte_init;
init_mm.pgd = base; init_mm.pgd = base;
/* /*
* copy top-level of Xen-supplied pagetable into place. For * copy top-level of Xen-supplied pagetable into place. For
...@@ -607,6 +629,7 @@ static __init void xen_pagetable_setup_done(pgd_t *base) ...@@ -607,6 +629,7 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
/* This will work as long as patching hasn't happened yet /* This will work as long as patching hasn't happened yet
(which it hasn't) */ (which it hasn't) */
paravirt_ops.alloc_pt = xen_alloc_pt; paravirt_ops.alloc_pt = xen_alloc_pt;
paravirt_ops.set_pte = xen_set_pte;
if (!xen_feature(XENFEAT_auto_translated_physmap)) { if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* /*
...@@ -745,7 +768,7 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = { ...@@ -745,7 +768,7 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
.kmap_atomic_pte = xen_kmap_atomic_pte, .kmap_atomic_pte = xen_kmap_atomic_pte,
#endif #endif
.set_pte = xen_set_pte, .set_pte = NULL, /* see xen_pagetable_setup_* */
.set_pte_at = xen_set_pte_at, .set_pte_at = xen_set_pte_at,
.set_pmd = xen_set_pmd, .set_pmd = xen_set_pmd,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment