Commit 3843fc25 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Thomas Gleixner

xen: remove support for non-PAE 32-bit

Non-PAE operation has been deprecated in Xen for a while, and is
rarely tested or used.  xen-unstable has now officially dropped
non-PAE support.  Since Xen/pvops' non-PAE support has also been
broken for a while, we may as well completely drop it altogether.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 78b58e54
...@@ -6,7 +6,7 @@ config XEN ...@@ -6,7 +6,7 @@ config XEN
bool "Xen guest support" bool "Xen guest support"
select PARAVIRT select PARAVIRT
depends on X86_32 depends on X86_32
depends on X86_CMPXCHG && X86_TSC && !(X86_VISWS || X86_VOYAGER) depends on X86_CMPXCHG && X86_TSC && X86_PAE && !(X86_VISWS || X86_VOYAGER)
help help
This is the Linux Xen port. Enabling this will allow the This is the Linux Xen port. Enabling this will allow the
kernel to boot in a paravirtualized environment under the kernel to boot in a paravirtualized environment under the
......
...@@ -785,20 +785,18 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) ...@@ -785,20 +785,18 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
static __init void xen_pagetable_setup_start(pgd_t *base) static __init void xen_pagetable_setup_start(pgd_t *base)
{ {
pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base; pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
int i;
/* special set_pte for pagetable initialization */ /* special set_pte for pagetable initialization */
pv_mmu_ops.set_pte = xen_set_pte_init; pv_mmu_ops.set_pte = xen_set_pte_init;
init_mm.pgd = base; init_mm.pgd = base;
/* /*
* copy top-level of Xen-supplied pagetable into place. For * copy top-level of Xen-supplied pagetable into place. This
* !PAE we can use this as-is, but for PAE it is a stand-in * is a stand-in while we copy the pmd pages.
* while we copy the pmd pages.
*/ */
memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t)); memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t));
if (PTRS_PER_PMD > 1) {
int i;
/* /*
* For PAE, need to allocate new pmds, rather than * For PAE, need to allocate new pmds, rather than
* share Xen's, since Xen doesn't like pmd's being * share Xen's, since Xen doesn't like pmd's being
...@@ -817,7 +815,6 @@ static __init void xen_pagetable_setup_start(pgd_t *base) ...@@ -817,7 +815,6 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
} else } else
pgd_clear(&base[i]); pgd_clear(&base[i]);
} }
}
/* make sure zero_page is mapped RO so we can use it in pagetables */ /* make sure zero_page is mapped RO so we can use it in pagetables */
make_lowmem_page_readonly(empty_zero_page); make_lowmem_page_readonly(empty_zero_page);
...@@ -873,17 +870,7 @@ static __init void xen_pagetable_setup_done(pgd_t *base) ...@@ -873,17 +870,7 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
/* Actually pin the pagetable down, but we can't set PG_pinned /* Actually pin the pagetable down, but we can't set PG_pinned
yet because the page structures don't exist yet. */ yet because the page structures don't exist yet. */
{ pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(base)));
unsigned level;
#ifdef CONFIG_X86_PAE
level = MMUEXT_PIN_L3_TABLE;
#else
level = MMUEXT_PIN_L2_TABLE;
#endif
pin_pagetable_pfn(level, PFN_DOWN(__pa(base)));
}
} }
/* This is called once we have the cpu_possible_map */ /* This is called once we have the cpu_possible_map */
...@@ -1093,7 +1080,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { ...@@ -1093,7 +1080,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
.make_pte = xen_make_pte, .make_pte = xen_make_pte,
.make_pgd = xen_make_pgd, .make_pgd = xen_make_pgd,
#ifdef CONFIG_X86_PAE
.set_pte_atomic = xen_set_pte_atomic, .set_pte_atomic = xen_set_pte_atomic,
.set_pte_present = xen_set_pte_at, .set_pte_present = xen_set_pte_at,
.set_pud = xen_set_pud, .set_pud = xen_set_pud,
...@@ -1102,7 +1088,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { ...@@ -1102,7 +1088,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
.make_pmd = xen_make_pmd, .make_pmd = xen_make_pmd,
.pmd_val = xen_pmd_val, .pmd_val = xen_pmd_val,
#endif /* PAE */
.activate_mm = xen_activate_mm, .activate_mm = xen_activate_mm,
.dup_mmap = xen_dup_mmap, .dup_mmap = xen_dup_mmap,
......
...@@ -222,7 +222,7 @@ pmdval_t xen_pmd_val(pmd_t pmd) ...@@ -222,7 +222,7 @@ pmdval_t xen_pmd_val(pmd_t pmd)
ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT; ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
return ret; return ret;
} }
#ifdef CONFIG_X86_PAE
void xen_set_pud(pud_t *ptr, pud_t val) void xen_set_pud(pud_t *ptr, pud_t val)
{ {
struct multicall_space mcs; struct multicall_space mcs;
...@@ -272,12 +272,6 @@ pmd_t xen_make_pmd(pmdval_t pmd) ...@@ -272,12 +272,6 @@ pmd_t xen_make_pmd(pmdval_t pmd)
return native_make_pmd(pmd); return native_make_pmd(pmd);
} }
#else /* !PAE */
void xen_set_pte(pte_t *ptep, pte_t pte)
{
*ptep = pte;
}
#endif /* CONFIG_X86_PAE */
/* /*
(Yet another) pagetable walker. This one is intended for pinning a (Yet another) pagetable walker. This one is intended for pinning a
...@@ -430,8 +424,6 @@ static int pin_page(struct page *page, enum pt_level level) ...@@ -430,8 +424,6 @@ static int pin_page(struct page *page, enum pt_level level)
read-only, and can be pinned. */ read-only, and can be pinned. */
void xen_pgd_pin(pgd_t *pgd) void xen_pgd_pin(pgd_t *pgd)
{ {
unsigned level;
xen_mc_batch(); xen_mc_batch();
if (pgd_walk(pgd, pin_page, TASK_SIZE)) { if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
...@@ -441,14 +433,7 @@ void xen_pgd_pin(pgd_t *pgd) ...@@ -441,14 +433,7 @@ void xen_pgd_pin(pgd_t *pgd)
xen_mc_batch(); xen_mc_batch();
} }
#ifdef CONFIG_X86_PAE xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
level = MMUEXT_PIN_L3_TABLE;
#else
level = MMUEXT_PIN_L2_TABLE;
#endif
xen_do_pin(level, PFN_DOWN(__pa(pgd)));
xen_mc_issue(0); xen_mc_issue(0);
} }
......
...@@ -37,14 +37,13 @@ void xen_exit_mmap(struct mm_struct *mm); ...@@ -37,14 +37,13 @@ void xen_exit_mmap(struct mm_struct *mm);
void xen_pgd_pin(pgd_t *pgd); void xen_pgd_pin(pgd_t *pgd);
//void xen_pgd_unpin(pgd_t *pgd); //void xen_pgd_unpin(pgd_t *pgd);
#ifdef CONFIG_X86_PAE pteval_t xen_pte_val(pte_t);
unsigned long long xen_pte_val(pte_t); pmdval_t xen_pmd_val(pmd_t);
unsigned long long xen_pmd_val(pmd_t); pgdval_t xen_pgd_val(pgd_t);
unsigned long long xen_pgd_val(pgd_t);
pte_t xen_make_pte(unsigned long long); pte_t xen_make_pte(pteval_t);
pmd_t xen_make_pmd(unsigned long long); pmd_t xen_make_pmd(pmdval_t);
pgd_t xen_make_pgd(unsigned long long); pgd_t xen_make_pgd(pgdval_t);
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval); pte_t *ptep, pte_t pteval);
...@@ -53,15 +52,4 @@ void xen_set_pud(pud_t *ptr, pud_t val); ...@@ -53,15 +52,4 @@ void xen_set_pud(pud_t *ptr, pud_t val);
void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
void xen_pmd_clear(pmd_t *pmdp); void xen_pmd_clear(pmd_t *pmdp);
#else
unsigned long xen_pte_val(pte_t);
unsigned long xen_pmd_val(pmd_t);
unsigned long xen_pgd_val(pgd_t);
pte_t xen_make_pte(unsigned long);
pmd_t xen_make_pmd(unsigned long);
pgd_t xen_make_pgd(unsigned long);
#endif
#endif /* _XEN_MMU_H */ #endif /* _XEN_MMU_H */
...@@ -30,11 +30,7 @@ ENTRY(hypercall_page) ...@@ -30,11 +30,7 @@ ENTRY(hypercall_page)
ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long startup_xen) ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long startup_xen)
ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long hypercall_page) ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long hypercall_page)
ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "!writable_page_tables|pae_pgdir_above_4gb") ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "!writable_page_tables|pae_pgdir_above_4gb")
#ifdef CONFIG_X86_PAE
ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes") ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes")
#else
ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "no")
#endif
ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic") ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
#endif /*CONFIG_XEN */ #endif /*CONFIG_XEN */
...@@ -150,13 +150,9 @@ static inline pte_t __pte_ma(pteval_t x) ...@@ -150,13 +150,9 @@ static inline pte_t __pte_ma(pteval_t x)
return (pte_t) { .pte = x }; return (pte_t) { .pte = x };
} }
#ifdef CONFIG_X86_PAE
#define pmd_val_ma(v) ((v).pmd) #define pmd_val_ma(v) ((v).pmd)
#define pud_val_ma(v) ((v).pgd.pgd) #define pud_val_ma(v) ((v).pgd.pgd)
#define __pmd_ma(x) ((pmd_t) { (x) } ) #define __pmd_ma(x) ((pmd_t) { (x) } )
#else /* !X86_PAE */
#define pmd_val_ma(v) ((v).pud.pgd.pgd)
#endif /* CONFIG_X86_PAE */
#define pgd_val_ma(x) ((x).pgd) #define pgd_val_ma(x) ((x).pgd)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment