Commit d7181b4b authored by Russell King's avatar Russell King

[ARM] Inline PMD entry cache handling

The common case is building a kernel for one CPU type, and we are
able to allow GCC to optimise any the PMD entry cache handling
assembly which will never be used.
parent 0faa91fd
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/tty.h> #include <linux/tty.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/serial_core.h> #include <linux/serial_core.h>
#include <linux/delay.h> #include <linux/delay.h>
...@@ -25,6 +26,7 @@ ...@@ -25,6 +26,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
......
...@@ -247,7 +247,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg ...@@ -247,7 +247,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
pmdval = __pa(ptep) | prot_l1; pmdval = __pa(ptep) | prot_l1;
pmdp[0] = __pmd(pmdval); pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
cpu_flush_pmd(pmdp); flush_pmd_entry(pmdp);
} }
ptep = pte_offset_kernel(pmdp, virt); ptep = pte_offset_kernel(pmdp, virt);
......
...@@ -364,23 +364,6 @@ ENTRY(cpu_arm1020_set_pgd) ...@@ -364,23 +364,6 @@ ENTRY(cpu_arm1020_set_pgd)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr mov pc, lr
/*
* cpu_arm1020_flush_pmd(pmdp)
*
* Set a level 1 translation table entry, and clean it out of
* any caches such that the MMUs can load it correctly.
*
* pmdp: pointer to PMD entry
*/
.align 5
ENTRY(cpu_arm1020_flush_pmd)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 4
mcr p15, 0, r0, c7, c10, 1 @ clean D entry (drain is done by TLB fns)
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* /*
* cpu_arm1020_set_pte(ptep, pte) * cpu_arm1020_set_pte(ptep, pte)
* *
...@@ -511,7 +494,6 @@ arm1020_processor_functions: ...@@ -511,7 +494,6 @@ arm1020_processor_functions:
/* pgtable */ /* pgtable */
.word cpu_arm1020_set_pgd .word cpu_arm1020_set_pgd
.word cpu_arm1020_flush_pmd
.word cpu_arm1020_set_pte .word cpu_arm1020_set_pte
.size arm1020_processor_functions, . - arm1020_processor_functions .size arm1020_processor_functions, . - arm1020_processor_functions
......
...@@ -233,17 +233,6 @@ ENTRY(cpu_arm7_set_pgd) ...@@ -233,17 +233,6 @@ ENTRY(cpu_arm7_set_pgd)
mcr p15, 0, r1, c5, c0, 0 @ flush TLBs mcr p15, 0, r1, c5, c0, 0 @ flush TLBs
mov pc, lr mov pc, lr
/*
* Function: arm6_flush_pmd(pmdp)
*
* Params : r0 = Address to set
*
* Purpose : Set a PMD and flush it out of any WB cache
*/
ENTRY(cpu_arm6_flush_pmd)
ENTRY(cpu_arm7_flush_pmd)
mov pc, lr
/* /*
* Function: arm6_7_set_pte(pte_t *ptep, pte_t pte) * Function: arm6_7_set_pte(pte_t *ptep, pte_t pte)
* Params : r0 = Address to set * Params : r0 = Address to set
...@@ -346,7 +335,6 @@ ENTRY(arm6_processor_functions) ...@@ -346,7 +335,6 @@ ENTRY(arm6_processor_functions)
/* pgtable */ /* pgtable */
.word cpu_arm6_set_pgd .word cpu_arm6_set_pgd
.word cpu_arm6_flush_pmd
.word cpu_arm6_set_pte .word cpu_arm6_set_pte
.size arm6_processor_functions, . - arm6_processor_functions .size arm6_processor_functions, . - arm6_processor_functions
...@@ -380,7 +368,6 @@ ENTRY(arm7_processor_functions) ...@@ -380,7 +368,6 @@ ENTRY(arm7_processor_functions)
/* pgtable */ /* pgtable */
.word cpu_arm7_set_pgd .word cpu_arm7_set_pgd
.word cpu_arm7_flush_pmd
.word cpu_arm7_set_pte .word cpu_arm7_set_pte
.size arm7_processor_functions, . - arm7_processor_functions .size arm7_processor_functions, . - arm7_processor_functions
......
...@@ -114,16 +114,6 @@ ENTRY(cpu_arm720_set_pgd) ...@@ -114,16 +114,6 @@ ENTRY(cpu_arm720_set_pgd)
mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4)
mov pc, lr mov pc, lr
/*
* Function: arm720_flush_pmd(pmdp)
*
* Params : r0 = Address to set
*
* Purpose : Set a PMD and flush it out of any WB cache
*/
ENTRY(cpu_arm720_flush_pmd)
mov pc, lr
/* /*
* Function: arm720_set_pte(pte_t *ptep, pte_t pte) * Function: arm720_set_pte(pte_t *ptep, pte_t pte)
* Params : r0 = Address to set * Params : r0 = Address to set
...@@ -216,7 +206,6 @@ ENTRY(arm720_processor_functions) ...@@ -216,7 +206,6 @@ ENTRY(arm720_processor_functions)
/* pgtable */ /* pgtable */
.word cpu_arm720_set_pgd .word cpu_arm720_set_pgd
.word cpu_arm720_flush_pmd
.word cpu_arm720_set_pte .word cpu_arm720_set_pte
.size arm720_processor_functions, . - arm720_processor_functions .size arm720_processor_functions, . - arm720_processor_functions
......
...@@ -367,20 +367,6 @@ ENTRY(cpu_arm920_set_pgd) ...@@ -367,20 +367,6 @@ ENTRY(cpu_arm920_set_pgd)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr mov pc, lr
/*
* cpu_arm920_flush_pmd(pmdp)
*
* Set a level 1 translation table entry, and clean it out of
* any caches such that the MMUs can load it correctly.
*
* pmdp: pointer to PMD entry
*/
.align 5
ENTRY(cpu_arm920_flush_pmd)
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* /*
* cpu_arm920_set_pte(ptep, pte) * cpu_arm920_set_pte(ptep, pte)
* *
...@@ -499,7 +485,6 @@ arm920_processor_functions: ...@@ -499,7 +485,6 @@ arm920_processor_functions:
/* pgtable */ /* pgtable */
.word cpu_arm920_set_pgd .word cpu_arm920_set_pgd
.word cpu_arm920_flush_pmd
.word cpu_arm920_set_pte .word cpu_arm920_set_pte
.size arm920_processor_functions, . - arm920_processor_functions .size arm920_processor_functions, . - arm920_processor_functions
......
...@@ -368,20 +368,6 @@ ENTRY(cpu_arm922_set_pgd) ...@@ -368,20 +368,6 @@ ENTRY(cpu_arm922_set_pgd)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr mov pc, lr
/*
* cpu_arm922_flush_pmd(pmdp)
*
* Set a level 1 translation table entry, and clean it out of
* any caches such that the MMUs can load it correctly.
*
* pmdp: pointer to PMD entry
*/
.align 5
ENTRY(cpu_arm922_flush_pmd)
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* /*
* cpu_arm922_set_pte(ptep, pte) * cpu_arm922_set_pte(ptep, pte)
* *
...@@ -498,7 +484,6 @@ arm922_processor_functions: ...@@ -498,7 +484,6 @@ arm922_processor_functions:
/* pgtable */ /* pgtable */
.word cpu_arm922_set_pgd .word cpu_arm922_set_pgd
.word cpu_arm922_flush_pmd
.word cpu_arm922_set_pte .word cpu_arm922_set_pte
.size arm922_processor_functions, . - arm922_processor_functions .size arm922_processor_functions, . - arm922_processor_functions
......
...@@ -349,22 +349,6 @@ ENTRY(cpu_arm926_set_pgd) ...@@ -349,22 +349,6 @@ ENTRY(cpu_arm926_set_pgd)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr mov pc, lr
/*
* cpu_arm926_flush_pmd(pmdp)
*
* Set a level 1 translation table entry, and clean it out of
* any caches such that the MMUs can load it correctly.
*
* pmdp: pointer to PMD entry
*/
.align 5
ENTRY(cpu_arm926_flush_pmd)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* /*
* cpu_arm926_set_pte(ptep, pte) * cpu_arm926_set_pte(ptep, pte)
* *
...@@ -496,7 +480,6 @@ arm926_processor_functions: ...@@ -496,7 +480,6 @@ arm926_processor_functions:
/* pgtable */ /* pgtable */
.word cpu_arm926_set_pgd .word cpu_arm926_set_pgd
.word cpu_arm926_flush_pmd
.word cpu_arm926_set_pte .word cpu_arm926_set_pte
.size arm926_processor_functions, . - arm926_processor_functions .size arm926_processor_functions, . - arm926_processor_functions
......
...@@ -424,21 +424,6 @@ ENTRY(cpu_sa1100_set_pgd) ...@@ -424,21 +424,6 @@ ENTRY(cpu_sa1100_set_pgd)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr mov pc, lr
/*
* cpu_sa110_flush_pmd(pmdp)
*
* Set a level 1 translation table entry, and clean it out of
* any caches such that the MMUs can load it correctly.
*
* pmdp: pointer to PMD entry
*/
.align 5
ENTRY(cpu_sa110_flush_pmd)
ENTRY(cpu_sa1100_flush_pmd)
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* /*
* cpu_sa110_set_pte(ptep, pte) * cpu_sa110_set_pte(ptep, pte)
* *
...@@ -540,7 +525,6 @@ ENTRY(sa110_processor_functions) ...@@ -540,7 +525,6 @@ ENTRY(sa110_processor_functions)
/* pgtable */ /* pgtable */
.word cpu_sa110_set_pgd .word cpu_sa110_set_pgd
.word cpu_sa110_flush_pmd
.word cpu_sa110_set_pte .word cpu_sa110_set_pte
.size sa110_processor_functions, . - sa110_processor_functions .size sa110_processor_functions, . - sa110_processor_functions
...@@ -573,7 +557,6 @@ ENTRY(sa1100_processor_functions) ...@@ -573,7 +557,6 @@ ENTRY(sa1100_processor_functions)
/* pgtable */ /* pgtable */
.word cpu_sa1100_set_pgd .word cpu_sa1100_set_pgd
.word cpu_sa1100_flush_pmd
.word cpu_sa1100_set_pte .word cpu_sa1100_set_pte
.size sa1100_processor_functions, . - sa1100_processor_functions .size sa1100_processor_functions, . - sa1100_processor_functions
......
...@@ -27,7 +27,6 @@ EXPORT_SYMBOL(cpu_dcache_invalidate_range); ...@@ -27,7 +27,6 @@ EXPORT_SYMBOL(cpu_dcache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_range); EXPORT_SYMBOL(cpu_icache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_page); EXPORT_SYMBOL(cpu_icache_invalidate_page);
EXPORT_SYMBOL(cpu_set_pgd); EXPORT_SYMBOL(cpu_set_pgd);
EXPORT_SYMBOL(cpu_flush_pmd);
EXPORT_SYMBOL(cpu_set_pte); EXPORT_SYMBOL(cpu_set_pte);
#else #else
EXPORT_SYMBOL(processor); EXPORT_SYMBOL(processor);
......
...@@ -572,21 +572,6 @@ ENTRY(cpu_xscale_set_pgd) ...@@ -572,21 +572,6 @@ ENTRY(cpu_xscale_set_pgd)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
cpwait_ret lr, ip cpwait_ret lr, ip
/*
* cpu_xscale_flush_pmd(pmdp)
*
* Set a level 1 translation table entry, and clean it out of
* any caches such that the MMUs can load it correctly.
*
* pmdp: pointer to PMD entry
*/
.align 5
ENTRY(cpu_xscale_flush_pmd)
mov ip, #0
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr
/* /*
* cpu_xscale_set_pte(ptep, pte) * cpu_xscale_set_pte(ptep, pte)
* *
...@@ -719,7 +704,6 @@ ENTRY(xscale_processor_functions) ...@@ -719,7 +704,6 @@ ENTRY(xscale_processor_functions)
/* pgtable */ /* pgtable */
.word cpu_xscale_set_pgd .word cpu_xscale_set_pgd
.word cpu_xscale_flush_pmd
.word cpu_xscale_set_pte .word cpu_xscale_set_pte
.size xscale_processor_functions, . - xscale_processor_functions .size xscale_processor_functions, . - xscale_processor_functions
......
...@@ -93,10 +93,6 @@ extern struct processor { ...@@ -93,10 +93,6 @@ extern struct processor {
* Set the page table * Set the page table
*/ */
void (*set_pgd)(unsigned long pgd_phys, struct mm_struct *mm); void (*set_pgd)(unsigned long pgd_phys, struct mm_struct *mm);
/*
* Set a PMD (handling IMP bit 4)
*/
void (*flush_pmd)(pmd_t *pmdp);
/* /*
* Set a PTE * Set a PTE
*/ */
...@@ -126,7 +122,6 @@ extern const struct processor sa110_processor_functions; ...@@ -126,7 +122,6 @@ extern const struct processor sa110_processor_functions;
#define cpu_icache_invalidate_page(vp) processor.icache.invalidate_page(vp) #define cpu_icache_invalidate_page(vp) processor.icache.invalidate_page(vp)
#define cpu_set_pgd(pgd,mm) processor.pgtable.set_pgd(pgd,mm) #define cpu_set_pgd(pgd,mm) processor.pgtable.set_pgd(pgd,mm)
#define cpu_flush_pmd(pmdp) processor.pgtable.flush_pmd(pmdp)
#define cpu_set_pte(ptep, pte) processor.pgtable.set_pte(ptep, pte) #define cpu_set_pte(ptep, pte) processor.pgtable.set_pte(ptep, pte)
#define cpu_switch_mm(pgd,mm) cpu_set_pgd(__virt_to_phys((unsigned long)(pgd)),mm) #define cpu_switch_mm(pgd,mm) cpu_set_pgd(__virt_to_phys((unsigned long)(pgd)),mm)
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#define cpu_icache_invalidate_range __cpu_fn(CPU_NAME,_icache_invalidate_range) #define cpu_icache_invalidate_range __cpu_fn(CPU_NAME,_icache_invalidate_range)
#define cpu_icache_invalidate_page __cpu_fn(CPU_NAME,_icache_invalidate_page) #define cpu_icache_invalidate_page __cpu_fn(CPU_NAME,_icache_invalidate_page)
#define cpu_set_pgd __cpu_fn(CPU_NAME,_set_pgd) #define cpu_set_pgd __cpu_fn(CPU_NAME,_set_pgd)
#define cpu_flush_pmd __cpu_fn(CPU_NAME,_flush_pmd)
#define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte) #define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -65,7 +64,6 @@ extern void cpu_icache_invalidate_range(unsigned long start, unsigned long end); ...@@ -65,7 +64,6 @@ extern void cpu_icache_invalidate_range(unsigned long start, unsigned long end);
extern void cpu_icache_invalidate_page(void *virt_page); extern void cpu_icache_invalidate_page(void *virt_page);
extern void cpu_set_pgd(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_set_pgd(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_flush_pmd(pmd_t *pmdp);
extern void cpu_set_pte(pte_t *ptep, pte_t pte); extern void cpu_set_pte(pte_t *ptep, pte_t pte);
extern volatile void cpu_reset(unsigned long addr); extern volatile void cpu_reset(unsigned long addr);
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* Page table allocation/freeing primitives for 32-bit ARM processors. * Page table allocation/freeing primitives for 32-bit ARM processors.
*/ */
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include "pgtable.h" #include "pgtable.h"
/* /*
...@@ -92,7 +93,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) ...@@ -92,7 +93,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE; pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE;
pmdp[0] = __pmd(pmdval); pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
cpu_flush_pmd(pmdp); flush_pmd_entry(pmdp);
} }
static inline void static inline void
...@@ -105,5 +106,5 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) ...@@ -105,5 +106,5 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE; pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE;
pmdp[0] = __pmd(pmdval); pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
cpu_flush_pmd(pmdp); flush_pmd_entry(pmdp);
} }
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */ #define PMD_SECT_TEX(x) ((x) << 12) /* v5 */
#define PMD_SECT_UNCACHED (0) #define PMD_SECT_UNCACHED (0)
#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
#define PMD_SECT_WT (PMD_SECT_CACHEABLE) #define PMD_SECT_WT (PMD_SECT_CACHEABLE)
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) #define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) #define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
...@@ -120,14 +121,19 @@ ...@@ -120,14 +121,19 @@
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
#define pmd_bad(pmd) (pmd_val(pmd) & 2) #define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define set_pmd(pmdp,pmd) do { *pmdp = pmd; cpu_flush_pmd(pmdp); } while (0)
static inline void pmd_clear(pmd_t *pmdp) #define set_pmd(pmdp,pmd) \
{ do { \
pmdp[0] = __pmd(0); *pmdp = pmd; \
pmdp[1] = __pmd(0); flush_pmd_entry(pmdp); \
cpu_flush_pmd(pmdp); } while (0)
}
#define pmd_clear(pmdp) \
do { \
pmdp[0] = __pmd(0); \
pmdp[1] = __pmd(0); \
clean_pmd_entry(pmdp); \
} while (0)
static inline pte_t *pmd_page_kernel(pmd_t pmd) static inline pte_t *pmd_page_kernel(pmd_t pmd)
{ {
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#define TLB_V4_D_FULL (1 << 10) #define TLB_V4_D_FULL (1 << 10)
#define TLB_V4_I_FULL (1 << 11) #define TLB_V4_I_FULL (1 << 11)
#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31) #define TLB_WB (1 << 31)
/* /*
...@@ -65,7 +66,7 @@ ...@@ -65,7 +66,7 @@
# define v4_always_flags (-1UL) # define v4_always_flags (-1UL)
#endif #endif
#define v4wbi_tlb_flags (TLB_WB | \ #define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \ TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_I_PAGE | TLB_V4_D_PAGE) TLB_V4_I_PAGE | TLB_V4_D_PAGE)
...@@ -84,7 +85,7 @@ ...@@ -84,7 +85,7 @@
# define v4wbi_always_flags (-1UL) # define v4wbi_always_flags (-1UL)
#endif #endif
#define v4wb_tlb_flags (TLB_WB | \ #define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \ TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_D_PAGE) TLB_V4_D_PAGE)
...@@ -287,6 +288,41 @@ static inline void flush_tlb_kernel_page(unsigned long kaddr) ...@@ -287,6 +288,41 @@ static inline void flush_tlb_kernel_page(unsigned long kaddr)
asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
} }
/*
* flush_pmd_entry
*
* Flush a PMD entry (word aligned, or double-word aligned) to
* RAM if the TLB for the CPU we are running on requires this.
* This is typically used when we are creating PMD entries.
*
* clean_pmd_entry
*
* Clean (but don't drain the write buffer) if the CPU requires
* these operations. This is typically used when we are removing
* PMD entries.
*/
static inline void flush_pmd_entry(pmd_t *pmd)
{
const unsigned int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_DCLEAN))
asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
: : "r" (pmd));
if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4 @ flush_pmd"
: : "r" (zero));
}
static inline void clean_pmd_entry(pmd_t *pmd)
{
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_DCLEAN))
asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
: : "r" (pmd));
}
#undef tlb_flag #undef tlb_flag
#undef always_tlb_flags #undef always_tlb_flags
#undef possible_tlb_flags #undef possible_tlb_flags
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment