Commit 131ca9c0 authored by Brian Gerst's avatar Brian Gerst Committed by Patrick Mochel

[PATCH] i386 mm init cleanup part 1

This revised patch starts untangling the mess in arch/i386/mm/init.c
- Pull setting bits in cr4 out of the loop
- Make __PAGE_KERNEL a variable and cache the global bit there.
- New pfn_pmd() for large pages.
parent 81d3703c
...@@ -174,3 +174,5 @@ EXPORT_SYMBOL(atomic_dec_and_lock); ...@@ -174,3 +174,5 @@ EXPORT_SYMBOL(atomic_dec_and_lock);
extern int is_sony_vaio_laptop; extern int is_sony_vaio_laptop;
EXPORT_SYMBOL(is_sony_vaio_laptop); EXPORT_SYMBOL(is_sony_vaio_laptop);
EXPORT_SYMBOL(__PAGE_KERNEL);
...@@ -177,6 +177,8 @@ static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t ...@@ -177,6 +177,8 @@ static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t
} }
} }
unsigned long __PAGE_KERNEL = _PAGE_KERNEL;
static void __init pagetable_init (void) static void __init pagetable_init (void)
{ {
unsigned long vaddr, end; unsigned long vaddr, end;
...@@ -196,6 +198,14 @@ static void __init pagetable_init (void) ...@@ -196,6 +198,14 @@ static void __init pagetable_init (void)
for (i = 0; i < PTRS_PER_PGD; i++) for (i = 0; i < PTRS_PER_PGD; i++)
set_pgd(pgd_base + i, __pgd(1 + __pa(empty_zero_page))); set_pgd(pgd_base + i, __pgd(1 + __pa(empty_zero_page)));
#endif #endif
if (cpu_has_pse) {
set_in_cr4(X86_CR4_PSE);
}
if (cpu_has_pge) {
set_in_cr4(X86_CR4_PGE);
__PAGE_KERNEL |= _PAGE_GLOBAL;
}
i = __pgd_offset(PAGE_OFFSET); i = __pgd_offset(PAGE_OFFSET);
pgd = pgd_base + i; pgd = pgd_base + i;
...@@ -216,17 +226,7 @@ static void __init pagetable_init (void) ...@@ -216,17 +226,7 @@ static void __init pagetable_init (void)
if (end && (vaddr >= end)) if (end && (vaddr >= end))
break; break;
if (cpu_has_pse) { if (cpu_has_pse) {
unsigned long __pe; set_pmd(pmd, pfn_pmd(__pa(vaddr) >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
set_in_cr4(X86_CR4_PSE);
boot_cpu_data.wp_works_ok = 1;
__pe = _KERNPG_TABLE + _PAGE_PSE + __pa(vaddr);
/* Make it "global" too if supported */
if (cpu_has_pge) {
set_in_cr4(X86_CR4_PGE);
__pe += _PAGE_GLOBAL;
}
set_pmd(pmd, __pmd(__pe));
continue; continue;
} }
...@@ -358,14 +358,17 @@ static int do_test_wp_bit(unsigned long vaddr); ...@@ -358,14 +358,17 @@ static int do_test_wp_bit(unsigned long vaddr);
void __init test_wp_bit(void) void __init test_wp_bit(void)
{ {
/*
* Ok, all PSE-capable CPUs are definitely handling the WP bit right.
*/
const unsigned long vaddr = PAGE_OFFSET; const unsigned long vaddr = PAGE_OFFSET;
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte, old_pte; pte_t *pte, old_pte;
if (cpu_has_pse) {
/* Ok, all PSE-capable CPUs are definitely handling the WP bit right. */
boot_cpu_data.wp_works_ok = 1;
return;
}
printk("Checking if this processor honours the WP bit even in supervisor mode... "); printk("Checking if this processor honours the WP bit even in supervisor mode... ");
pgd = swapper_pg_dir + __pgd_offset(vaddr); pgd = swapper_pg_dir + __pgd_offset(vaddr);
......
...@@ -60,5 +60,6 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) ...@@ -60,5 +60,6 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
#define pte_none(x) (!(x).pte_low) #define pte_none(x) (!(x).pte_low)
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#endif /* _I386_PGTABLE_2LEVEL_H */ #endif /* _I386_PGTABLE_2LEVEL_H */
...@@ -99,4 +99,9 @@ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) ...@@ -99,4 +99,9 @@ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
return pte; return pte;
} }
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
return __pmd(((unsigned long long)page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
}
#endif /* _I386_PGTABLE_3LEVEL_H */ #endif /* _I386_PGTABLE_3LEVEL_H */
...@@ -132,27 +132,18 @@ extern void pgtable_cache_init(void); ...@@ -132,27 +132,18 @@ extern void pgtable_cache_init(void);
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define __PAGE_KERNEL \ #define _PAGE_KERNEL \
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define __PAGE_KERNEL_NOCACHE \
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED) extern unsigned long __PAGE_KERNEL;
#define __PAGE_KERNEL_RO \ #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED) #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
# define MAKE_GLOBAL(x) \
({ \ #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
pgprot_t __ret; \ #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
\ #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
if (cpu_has_pge) \ #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
__ret = __pgprot((x) | _PAGE_GLOBAL); \
else \
__ret = __pgprot(x); \
__ret; \
})
#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
/* /*
* The i386 can't do page protection for execute, and considers that * The i386 can't do page protection for execute, and considers that
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment