Commit 88ae31fc authored by Russell King's avatar Russell King

Update PTE functions to be in line with 2.5.5.

This is a minimal "get it working again" patch; there are plans a
foot to re-jig the page table code to work better with Ingo Molnar's
changes.  These same plans also allow the ARM page tabkes to fit
into Rik van Riel's rmap significantly better.

(We're currently abusing the struct page * returned from pte_alloc_one,
treating it as if it were the same as a pte_t *)
parent ce1e6b9f
......@@ -682,7 +682,6 @@ comment 'Kernel hacking'
bool 'Compile kernel without frame pointer' CONFIG_NO_FRAME_POINTER
bool 'Verbose user fault messages' CONFIG_DEBUG_USER
bool 'Include debugging information in kernel binary' CONFIG_DEBUG_INFO
dep_bool 'Disable pgtable cache' CONFIG_NO_PGT_CACHE $CONFIG_CPU_26
bool 'Kernel debugging' CONFIG_DEBUG_KERNEL
dep_bool ' Debug memory allocations' CONFIG_DEBUG_SLAB $CONFIG_DEBUG_KERNEL
......
......@@ -167,10 +167,6 @@ EXPORT_SYMBOL(__virt_to_bus);
#endif
#ifndef __bus_to_virt__is_a_macro
EXPORT_SYMBOL(__bus_to_virt);
#endif
#ifndef CONFIG_NO_PGT_CACHE
EXPORT_SYMBOL(quicklists);
#endif
/* string / mem functions */
......
......@@ -95,9 +95,6 @@ void cpu_idle(void)
idle();
leds_event(led_idle_end);
schedule();
#ifndef CONFIG_NO_PGT_CACHE
check_pgt_cache();
#endif
}
}
......
......@@ -78,15 +78,16 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
if (!new_pmd)
goto no_pmd;
new_pte = pte_alloc(mm, new_pmd, 0);
new_pte = pte_alloc_map(mm, new_pmd, 0);
if (!new_pte)
goto no_pte;
init_pgd = pgd_offset_k(0);
init_pmd = pmd_offset(init_pgd, 0);
init_pte = pte_offset(init_pmd, 0);
init_pte = pte_offset_map_nested(init_pmd, 0);
set_pte(new_pte, *init_pte);
pte_unmap_nested(init_pte);
pte_unmap(new_pte);
/*
* most of the page table entries are zeroed
......
......@@ -151,7 +151,7 @@ static void adjust_pte(struct vm_area_struct *vma, unsigned long address)
if (pmd_bad(*pmd))
goto bad_pmd;
pte = pte_offset(pmd, address);
pte = pte_offset_map(pmd, address);
entry = *pte;
/*
......@@ -164,6 +164,7 @@ static void adjust_pte(struct vm_area_struct *vma, unsigned long address)
set_pte(pte, entry);
flush_tlb_page(vma, address);
}
pte_unmap(pte);
return;
bad_pgd:
......
......@@ -83,10 +83,14 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
break;
}
pte = pte_offset(pmd, addr);
#ifndef CONFIG_HIGHMEM
/* We must not map this if we have highmem enabled */
pte = pte_offset_map(pmd, addr);
printk(", *pte = %08lx", pte_val(*pte));
#ifdef CONFIG_CPU_32
printk(", *ppte = %08lx", pte_val(pte[-PTRS_PER_PTE]));
#endif
pte_unmap(pte);
#endif
} while(0);
......
......@@ -64,38 +64,6 @@ static struct meminfo meminfo __initdata = { 0, };
*/
struct page *empty_zero_page;
#ifndef CONFIG_NO_PGT_CACHE
struct pgtable_cache_struct quicklists;
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
if(pgtable_cache_size > high) {
do {
if(pgd_quicklist) {
free_pgd_slow(get_pgd_fast());
freed++;
}
if(pmd_quicklist) {
pmd_free_slow(pmd_alloc_one_fast(NULL, 0));
freed++;
}
if(pte_quicklist) {
pte_free_slow(pte_alloc_one_fast(NULL, 0));
freed++;
}
} while(pgtable_cache_size > low);
}
return freed;
}
#else
int do_check_pgt_cache(int low, int high)
{
return 0;
}
#endif
/* This is currently broken
* PG_skip is used on sparc/sparc64 architectures to "skip" certain
* parts of the address space.
......@@ -145,9 +113,6 @@ void show_mem(void)
printk("%d slab pages\n", slab);
printk("%d pages shared\n", shared);
printk("%d pages swap cached\n", cached);
#ifndef CONFIG_NO_PGT_CACHE
printk("%ld page tables cached\n", pgtable_cache_size);
#endif
show_buffers();
}
......
......@@ -58,7 +58,7 @@ static int __init minicache_init(void)
pmd = pmd_alloc(&init_mm, pgd, minicache_address);
if (!pmd)
BUG();
minicache_pte = pte_alloc(&init_mm, pmd, minicache_address);
minicache_pte = pte_alloc_kernel(&init_mm, pmd, minicache_address);
if (!minicache_pte)
BUG();
......
......@@ -98,11 +98,15 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
if (!new_pmd)
goto no_pmd;
new_pte = pte_alloc(mm, new_pmd, 0);
new_pte = pte_alloc_map(mm, new_pmd, 0);
if (!new_pte)
goto no_pte;
init_pmd = pmd_offset(init_pgd, 0);
init_pte = pte_offset_map_nested(init_pmd, 0);
set_pte(new_pte, *init_pte);
pte_unmap_nested(init_pte);
pte_unmap(new_pte);
spin_unlock(&mm->page_table_lock);
}
......@@ -138,7 +142,7 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
void free_pgd_slow(pgd_t *pgd)
{
pmd_t *pmd;
pte_t *pte;
struct page *pte;
if (!pgd)
return;
......@@ -153,7 +157,7 @@ void free_pgd_slow(pgd_t *pgd)
goto free;
}
pte = pte_offset(pmd, 0);
pte = pmd_page(*pmd);
pmd_clear(pmd);
pte_free(pte);
pmd_free(pmd);
......@@ -198,7 +202,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot)
set_pmd(pmdp, __mk_pmd(ptep, PMD_TYPE_TABLE | PMD_DOMAIN(domain)));
}
ptep = pte_offset(pmdp, virt);
ptep = pte_offset_kernel(pmdp, virt);
set_pte(ptep, mk_pte_phys(phys, __pgprot(prot)));
}
......@@ -225,6 +229,20 @@ static void __init create_mapping(struct map_desc *md)
int prot_sect, prot_pte;
long off;
if (md->prot_read && md->prot_write &&
!md->cacheable && !md->bufferable) {
printk(KERN_WARNING "Security risk: creating user "
"accessible mapping for 0x%08lx at 0x%08lx\n",
md->physical, md->virtual);
}
if (md->virtual != vectors_base() && md->virtual < PAGE_OFFSET) {
printk(KERN_WARNING "MM: not creating mapping for "
"0x%08lx at 0x%08lx in user region\n",
md->physical, md->virtual);
return;
}
prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
(md->prot_read ? L_PTE_USER : 0) |
(md->prot_write ? L_PTE_WRITE : 0) |
......
......@@ -14,136 +14,20 @@
#include <asm/processor.h>
/*
* Get the cache handling stuff now.
*/
#include <asm/proc/cache.h>
/*
* ARM processors do not cache TLB tables in RAM.
*/
#define flush_tlb_pgtables(mm,start,end) do { } while (0)
/*
* Processor specific parts...
*/
#include <asm/proc/pgalloc.h>
/*
* Page table cache stuff
*/
#ifndef CONFIG_NO_PGT_CACHE
#ifdef CONFIG_SMP
#error Pgtable caches have to be per-CPU, so that no locking is needed.
#endif /* CONFIG_SMP */
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
unsigned long *pte_cache;
unsigned long pgtable_cache_sz;
} quicklists;
#define pgd_quicklist (quicklists.pgd_cache)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (quicklists.pte_cache)
#define pgtable_cache_size (quicklists.pgtable_cache_sz)
/* used for quicklists */
#define __pgd_next(pgd) (((unsigned long *)pgd)[1])
#define __pte_next(pte) (((unsigned long *)pte)[0])
static inline pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
preempt_disable();
if ((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)__pgd_next(ret);
ret[1] = ret[2];
clean_dcache_entry(ret + 1);
pgtable_cache_size--;
}
preempt_enable();
return (pgd_t *)ret;
}
static inline void free_pgd_fast(pgd_t *pgd)
{
preempt_disable();
__pgd_next(pgd) = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
preempt_enable();
}
static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
unsigned long *ret;
preempt_disable();
if((ret = pte_quicklist) != NULL) {
pte_quicklist = (unsigned long *)__pte_next(ret);
ret[0] = 0;
clean_dcache_entry(ret);
pgtable_cache_size--;
}
preempt_enable();
return (pte_t *)ret;
}
static inline void free_pte_fast(pte_t *pte)
{
preempt_disable();
__pte_next(pte) = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
preempt_enable();
}
#else /* CONFIG_NO_PGT_CACHE */
#define pgd_quicklist ((unsigned long *)0)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist ((unsigned long *)0)
#define get_pgd_fast() ((pgd_t *)0)
#define pte_alloc_one_fast(mm,addr) ((pte_t *)0)
#define free_pgd_fast(pgd) free_pgd_slow(pgd)
#define free_pte_fast(pte) pte_free_slow(pte)
#endif /* CONFIG_NO_PGT_CACHE */
#define pte_free(pte) free_pte_fast(pte)
/*
* Since we have only two-level page tables, these are trivial
*/
#define pmd_alloc_one_fast(mm,addr) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free_slow(pmd) do { } while (0)
#define pmd_free_fast(pmd) do { } while (0)
#define pmd_free(pmd) do { } while (0)
#define pgd_populate(mm,pmd,pte) BUG()
extern pgd_t *get_pgd_slow(struct mm_struct *mm);
extern void free_pgd_slow(pgd_t *pgd);
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
pgd = get_pgd_fast();
if (!pgd)
pgd = get_pgd_slow(mm);
return pgd;
}
#define pgd_free(pgd) free_pgd_fast(pgd)
extern int do_check_pgt_cache(int, int);
#define pgd_alloc(mm) get_pgd_slow(mm)
#define pgd_free(pgd) free_pgd_slow(pgd)
#endif
......@@ -13,7 +13,6 @@
#include <linux/config.h>
#include <asm/arch/memory.h>
#include <asm/arch/vmalloc.h>
#include <asm/proc-fns.h>
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
......@@ -146,8 +145,16 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
#define pmd_offset(dir, addr) ((pmd_t *)(dir))
/* Find an entry in the third-level page table.. */
#define __pte_offset(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset(dir, addr) ((pte_t *)pmd_page(*(dir)) + __pte_offset(addr))
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pmd_page(dir) ((struct page *)__pmd_page(dir))
#define __pte_offset(dir, addr) ((pte_t *)__pmd_page(*(dir)) + __pte_index(addr))
#define pte_offset_kernel __pte_offset
#define pte_offset_map __pte_offset
#define pte_offset_map_nested __pte_offset
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#include <asm/proc/pgtable.h>
......
......@@ -18,7 +18,8 @@ extern kmem_cache_t *pte_cache;
* from the Linux copy. The processor copies are offset by -PTRS_PER_PTE
* words from the Linux copy.
*/
static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte;
......@@ -28,10 +29,21 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
return pte;
}
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte;
pte = kmem_cache_alloc(pte_cache, GFP_KERNEL);
if (pte)
pte += PTRS_PER_PTE;
return (struct page *)pte;
}
/*
* Free one PTE table.
*/
static inline void pte_free_slow(pte_t *pte)
static inline void pte_free_kernel(pte_t *pte)
{
if (pte) {
pte -= PTRS_PER_PTE;
......@@ -39,6 +51,15 @@ static inline void pte_free_slow(pte_t *pte)
}
}
static inline void pte_free(struct page *pte)
{
pte_t *_pte = (pte_t *)pte;
if (pte) {
_pte -= PTRS_PER_PTE;
kmem_cache_free(pte_cache, _pte);
}
}
/*
* Populate the pmdp entry with a pointer to the pte. This pmd is part
* of the mm address space.
......@@ -46,12 +67,14 @@ static inline void pte_free_slow(pte_t *pte)
* If 'mm' is the init tasks mm, then we are doing a vmalloc, and we
* need to set stuff up correctly for it.
*/
#define pmd_populate_kernel(mm,pmdp,pte) \
do { \
BUG_ON(mm != &init_mm); \
set_pmd(pmdp, __mk_pmd(pte, _PAGE_KERNEL_TABLE));\
} while (0)
#define pmd_populate(mm,pmdp,pte) \
do { \
unsigned long __prot; \
if (mm == &init_mm) \
__prot = _PAGE_KERNEL_TABLE; \
else \
__prot = _PAGE_USER_TABLE; \
set_pmd(pmdp, __mk_pmd(pte, __prot)); \
BUG_ON(mm == &init_mm); \
set_pmd(pmdp, __mk_pmd(pte, _PAGE_USER_TABLE)); \
} while (0)
......@@ -125,7 +125,7 @@ static inline pmd_t __mk_pmd(pte_t *ptep, unsigned long prot)
return pmd;
}
static inline unsigned long pmd_page(pmd_t pmd)
static inline unsigned long __pmd_page(pmd_t pmd)
{
unsigned long ptr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment