Commit b4d20eff authored by Linus Walleij's avatar Linus Walleij Committed by Russell King (Oracle)

ARM: 9387/2: mm: Rewrite cacheflush vtables in CFI safe C

Instead of defining all cache flush operations with an assembly
macro in proc-macros.S, provide an explicit struct cpu_cache_fns
for each CPU cache type in mm/cache.c.

As a side effect from rewriting the vtables in C, we can
avoid the aliasing for the "louis" cache callback, instead we
can just assign the NN_flush_kern_cache_all() function to the
louis callback in the C vtable.

As the louis cache callback is called explicitly (not through the
vtable) if we only have one type of cache support compiled in, we
need an ifdef quirk for this in the !MULTI_CACHE case.

Feroceon and XScale have some dma mapping quirk, in this case we
can just define two structs and assign all but one callback to the
main implementation; since each of them invoked define_cache_functions
twice they require MULTI_CACHE by definition so the compiled-in
shortcut is not used on these variants.
Tested-by: default avatarKees Cook <keescook@chromium.org>
Reviewed-by: default avatarSami Tolvanen <samitolvanen@google.com>
Signed-off-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: default avatarRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
parent 2074beeb
...@@ -118,6 +118,10 @@ ...@@ -118,6 +118,10 @@
# define MULTI_CACHE 1 # define MULTI_CACHE 1
#endif #endif
#ifdef CONFIG_CPU_CACHE_NOP
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_V7M) #if defined(CONFIG_CPU_V7M)
# define MULTI_CACHE 1 # define MULTI_CACHE 1
#endif #endif
...@@ -126,29 +130,15 @@ ...@@ -126,29 +130,15 @@
#error Unknown cache maintenance model #error Unknown cache maintenance model
#endif #endif
#ifndef __ASSEMBLER__
static inline void nop_flush_icache_all(void) { }
static inline void nop_flush_kern_cache_all(void) { }
static inline void nop_flush_kern_cache_louis(void) { }
static inline void nop_flush_user_cache_all(void) { }
static inline void nop_flush_user_cache_range(unsigned long a,
unsigned long b, unsigned int c) { }
static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { }
static inline int nop_coherent_user_range(unsigned long a,
unsigned long b) { return 0; }
static inline void nop_flush_kern_dcache_area(void *a, size_t s) { }
static inline void nop_dma_flush_range(const void *a, const void *b) { }
static inline void nop_dma_map_area(const void *s, size_t l, int f) { }
static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
#endif
#ifndef MULTI_CACHE #ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) #define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
/* This function only has a dedicated assembly callback on the v7 cache */
#ifdef CONFIG_CPU_CACHE_V7
#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis) #define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis)
#else
#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_all)
#endif
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
......
...@@ -45,6 +45,7 @@ obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o ...@@ -45,6 +45,7 @@ obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o
obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o
obj-$(CONFIG_CPU_CACHE_V7M) += cache-v7m.o obj-$(CONFIG_CPU_CACHE_V7M) += cache-v7m.o
obj-y += cache.o
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
* Copyright (C) 2015-2016 Broadcom * Copyright (C) 2015-2016 Broadcom
*/ */
#include <linux/cfi_types.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/io.h> #include <linux/io.h>
......
...@@ -243,11 +243,3 @@ SYM_FUNC_END(fa_dma_map_area) ...@@ -243,11 +243,3 @@ SYM_FUNC_END(fa_dma_map_area)
SYM_TYPED_FUNC_START(fa_dma_unmap_area) SYM_TYPED_FUNC_START(fa_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(fa_dma_unmap_area) SYM_FUNC_END(fa_dma_unmap_area)
.globl fa_flush_kern_cache_louis
.equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions fa
...@@ -18,9 +18,6 @@ SYM_TYPED_FUNC_START(nop_flush_kern_cache_all) ...@@ -18,9 +18,6 @@ SYM_TYPED_FUNC_START(nop_flush_kern_cache_all)
ret lr ret lr
SYM_FUNC_END(nop_flush_kern_cache_all) SYM_FUNC_END(nop_flush_kern_cache_all)
.globl nop_flush_kern_cache_louis
.equ nop_flush_kern_cache_louis, nop_flush_icache_all
SYM_TYPED_FUNC_START(nop_flush_user_cache_all) SYM_TYPED_FUNC_START(nop_flush_user_cache_all)
ret lr ret lr
SYM_FUNC_END(nop_flush_user_cache_all) SYM_FUNC_END(nop_flush_user_cache_all)
...@@ -50,11 +47,6 @@ SYM_TYPED_FUNC_START(nop_dma_map_area) ...@@ -50,11 +47,6 @@ SYM_TYPED_FUNC_START(nop_dma_map_area)
ret lr ret lr
SYM_FUNC_END(nop_dma_map_area) SYM_FUNC_END(nop_dma_map_area)
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions nop
SYM_TYPED_FUNC_START(nop_dma_unmap_area) SYM_TYPED_FUNC_START(nop_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(nop_dma_unmap_area) SYM_FUNC_END(nop_dma_unmap_area)
...@@ -144,11 +144,3 @@ SYM_FUNC_END(v4_dma_unmap_area) ...@@ -144,11 +144,3 @@ SYM_FUNC_END(v4_dma_unmap_area)
SYM_TYPED_FUNC_START(v4_dma_map_area) SYM_TYPED_FUNC_START(v4_dma_map_area)
ret lr ret lr
SYM_FUNC_END(v4_dma_map_area) SYM_FUNC_END(v4_dma_map_area)
.globl v4_flush_kern_cache_louis
.equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v4
...@@ -253,11 +253,3 @@ SYM_FUNC_END(v4wb_dma_map_area) ...@@ -253,11 +253,3 @@ SYM_FUNC_END(v4wb_dma_map_area)
SYM_TYPED_FUNC_START(v4wb_dma_unmap_area) SYM_TYPED_FUNC_START(v4wb_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(v4wb_dma_unmap_area) SYM_FUNC_END(v4wb_dma_unmap_area)
.globl v4wb_flush_kern_cache_louis
.equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v4wb
...@@ -200,11 +200,3 @@ SYM_FUNC_END(v4wt_dma_unmap_area) ...@@ -200,11 +200,3 @@ SYM_FUNC_END(v4wt_dma_unmap_area)
SYM_TYPED_FUNC_START(v4wt_dma_map_area) SYM_TYPED_FUNC_START(v4wt_dma_map_area)
ret lr ret lr
SYM_FUNC_END(v4wt_dma_map_area) SYM_FUNC_END(v4wt_dma_map_area)
.globl v4wt_flush_kern_cache_louis
.equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v4wt
...@@ -298,11 +298,3 @@ SYM_TYPED_FUNC_START(v6_dma_unmap_area) ...@@ -298,11 +298,3 @@ SYM_TYPED_FUNC_START(v6_dma_unmap_area)
bne v6_dma_inv_range bne v6_dma_inv_range
ret lr ret lr
SYM_FUNC_END(v6_dma_unmap_area) SYM_FUNC_END(v6_dma_unmap_area)
.globl v6_flush_kern_cache_louis
.equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v6
...@@ -456,28 +456,3 @@ SYM_TYPED_FUNC_START(v7_dma_unmap_area) ...@@ -456,28 +456,3 @@ SYM_TYPED_FUNC_START(v7_dma_unmap_area)
bne v7_dma_inv_range bne v7_dma_inv_range
ret lr ret lr
SYM_FUNC_END(v7_dma_unmap_area) SYM_FUNC_END(v7_dma_unmap_area)
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v7
/* The Broadcom Brahma-B15 read-ahead cache requires some modifications
* to the v7_cache_fns, we only override the ones we need
*/
#ifndef CONFIG_CACHE_B15_RAC
globl_equ b15_flush_kern_cache_all, v7_flush_kern_cache_all
#endif
globl_equ b15_flush_icache_all, v7_flush_icache_all
globl_equ b15_flush_kern_cache_louis, v7_flush_kern_cache_louis
globl_equ b15_flush_user_cache_all, v7_flush_user_cache_all
globl_equ b15_flush_user_cache_range, v7_flush_user_cache_range
globl_equ b15_coherent_kern_range, v7_coherent_kern_range
globl_equ b15_coherent_user_range, v7_coherent_user_range
globl_equ b15_flush_kern_dcache_area, v7_flush_kern_dcache_area
globl_equ b15_dma_map_area, v7_dma_map_area
globl_equ b15_dma_unmap_area, v7_dma_unmap_area
globl_equ b15_dma_flush_range, v7_dma_flush_range
define_cache_functions b15
...@@ -447,11 +447,3 @@ SYM_TYPED_FUNC_START(v7m_dma_unmap_area) ...@@ -447,11 +447,3 @@ SYM_TYPED_FUNC_START(v7m_dma_unmap_area)
bne v7m_dma_inv_range bne v7m_dma_inv_range
ret lr ret lr
SYM_FUNC_END(v7m_dma_unmap_area) SYM_FUNC_END(v7m_dma_unmap_area)
.globl v7m_flush_kern_cache_louis
.equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v7m
This diff is collapsed.
...@@ -359,12 +359,6 @@ SYM_TYPED_FUNC_START(arm1020_dma_unmap_area) ...@@ -359,12 +359,6 @@ SYM_TYPED_FUNC_START(arm1020_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(arm1020_dma_unmap_area) SYM_FUNC_END(arm1020_dma_unmap_area)
.globl arm1020_flush_kern_cache_louis
.equ arm1020_flush_kern_cache_louis, arm1020_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm1020
.align 5 .align 5
ENTRY(cpu_arm1020_dcache_clean_area) ENTRY(cpu_arm1020_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
......
...@@ -346,12 +346,6 @@ SYM_TYPED_FUNC_START(arm1020e_dma_unmap_area) ...@@ -346,12 +346,6 @@ SYM_TYPED_FUNC_START(arm1020e_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(arm1020e_dma_unmap_area) SYM_FUNC_END(arm1020e_dma_unmap_area)
.globl arm1020e_flush_kern_cache_louis
.equ arm1020e_flush_kern_cache_louis, arm1020e_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm1020e
.align 5 .align 5
ENTRY(cpu_arm1020e_dcache_clean_area) ENTRY(cpu_arm1020e_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
......
...@@ -345,12 +345,6 @@ SYM_TYPED_FUNC_START(arm1022_dma_unmap_area) ...@@ -345,12 +345,6 @@ SYM_TYPED_FUNC_START(arm1022_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(arm1022_dma_unmap_area) SYM_FUNC_END(arm1022_dma_unmap_area)
.globl arm1022_flush_kern_cache_louis
.equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm1022
.align 5 .align 5
ENTRY(cpu_arm1022_dcache_clean_area) ENTRY(cpu_arm1022_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
......
...@@ -340,12 +340,6 @@ SYM_TYPED_FUNC_START(arm1026_dma_unmap_area) ...@@ -340,12 +340,6 @@ SYM_TYPED_FUNC_START(arm1026_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(arm1026_dma_unmap_area) SYM_FUNC_END(arm1026_dma_unmap_area)
.globl arm1026_flush_kern_cache_louis
.equ arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm1026
.align 5 .align 5
ENTRY(cpu_arm1026_dcache_clean_area) ENTRY(cpu_arm1026_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
......
...@@ -311,11 +311,6 @@ SYM_TYPED_FUNC_START(arm920_dma_unmap_area) ...@@ -311,11 +311,6 @@ SYM_TYPED_FUNC_START(arm920_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(arm920_dma_unmap_area) SYM_FUNC_END(arm920_dma_unmap_area)
.globl arm920_flush_kern_cache_louis
.equ arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm920
#endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */ #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */
......
...@@ -313,12 +313,6 @@ SYM_TYPED_FUNC_START(arm922_dma_unmap_area) ...@@ -313,12 +313,6 @@ SYM_TYPED_FUNC_START(arm922_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(arm922_dma_unmap_area) SYM_FUNC_END(arm922_dma_unmap_area)
.globl arm922_flush_kern_cache_louis
.equ arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm922
#endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */ #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */
ENTRY(cpu_arm922_dcache_clean_area) ENTRY(cpu_arm922_dcache_clean_area)
......
...@@ -368,12 +368,6 @@ SYM_TYPED_FUNC_START(arm925_dma_unmap_area) ...@@ -368,12 +368,6 @@ SYM_TYPED_FUNC_START(arm925_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(arm925_dma_unmap_area) SYM_FUNC_END(arm925_dma_unmap_area)
.globl arm925_flush_kern_cache_louis
.equ arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm925
ENTRY(cpu_arm925_dcache_clean_area) ENTRY(cpu_arm925_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
......
...@@ -331,12 +331,6 @@ SYM_TYPED_FUNC_START(arm926_dma_unmap_area) ...@@ -331,12 +331,6 @@ SYM_TYPED_FUNC_START(arm926_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(arm926_dma_unmap_area) SYM_FUNC_END(arm926_dma_unmap_area)
.globl arm926_flush_kern_cache_louis
.equ arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm926
ENTRY(cpu_arm926_dcache_clean_area) ENTRY(cpu_arm926_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
......
...@@ -269,12 +269,6 @@ SYM_TYPED_FUNC_START(arm940_dma_unmap_area) ...@@ -269,12 +269,6 @@ SYM_TYPED_FUNC_START(arm940_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(arm940_dma_unmap_area) SYM_FUNC_END(arm940_dma_unmap_area)
.globl arm940_flush_kern_cache_louis
.equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm940
.type __arm940_setup, #function .type __arm940_setup, #function
__arm940_setup: __arm940_setup:
mov r0, #0 mov r0, #0
......
...@@ -312,12 +312,6 @@ SYM_TYPED_FUNC_START(arm946_dma_unmap_area) ...@@ -312,12 +312,6 @@ SYM_TYPED_FUNC_START(arm946_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(arm946_dma_unmap_area) SYM_FUNC_END(arm946_dma_unmap_area)
.globl arm946_flush_kern_cache_louis
.equ arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions arm946
ENTRY(cpu_arm946_dcache_clean_area) ENTRY(cpu_arm946_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
......
...@@ -414,33 +414,6 @@ SYM_TYPED_FUNC_START(feroceon_dma_unmap_area) ...@@ -414,33 +414,6 @@ SYM_TYPED_FUNC_START(feroceon_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(feroceon_dma_unmap_area) SYM_FUNC_END(feroceon_dma_unmap_area)
.globl feroceon_flush_kern_cache_louis
.equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions feroceon
.macro range_alias basename
.globl feroceon_range_\basename
.type feroceon_range_\basename , %function
.equ feroceon_range_\basename , feroceon_\basename
.endm
/*
* Most of the cache functions are unchanged for this case.
* Export suitable alias symbols for the unchanged functions:
*/
range_alias flush_icache_all
range_alias flush_user_cache_all
range_alias flush_kern_cache_all
range_alias flush_kern_cache_louis
range_alias flush_user_cache_range
range_alias coherent_kern_range
range_alias coherent_user_range
range_alias dma_unmap_area
define_cache_functions feroceon_range
.align 5 .align 5
ENTRY(cpu_feroceon_dcache_clean_area) ENTRY(cpu_feroceon_dcache_clean_area)
#if defined(CONFIG_CACHE_FEROCEON_L2) && \ #if defined(CONFIG_CACHE_FEROCEON_L2) && \
......
...@@ -320,24 +320,6 @@ ENTRY(\name\()_processor_functions) ...@@ -320,24 +320,6 @@ ENTRY(\name\()_processor_functions)
#endif #endif
.endm .endm
.macro define_cache_functions name:req
.align 2
.type \name\()_cache_fns, #object
ENTRY(\name\()_cache_fns)
.long \name\()_flush_icache_all
.long \name\()_flush_kern_cache_all
.long \name\()_flush_kern_cache_louis
.long \name\()_flush_user_cache_all
.long \name\()_flush_user_cache_range
.long \name\()_coherent_kern_range
.long \name\()_coherent_user_range
.long \name\()_flush_kern_dcache_area
.long \name\()_dma_map_area
.long \name\()_dma_unmap_area
.long \name\()_dma_flush_range
.size \name\()_cache_fns, . - \name\()_cache_fns
.endm
.macro globl_equ x, y .macro globl_equ x, y
.globl \x .globl \x
.equ \x, \y .equ \x, \y
......
...@@ -296,12 +296,6 @@ SYM_TYPED_FUNC_START(mohawk_dma_unmap_area) ...@@ -296,12 +296,6 @@ SYM_TYPED_FUNC_START(mohawk_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(mohawk_dma_unmap_area) SYM_FUNC_END(mohawk_dma_unmap_area)
.globl mohawk_flush_kern_cache_louis
.equ mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions mohawk
ENTRY(cpu_mohawk_dcache_clean_area) ENTRY(cpu_mohawk_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
......
...@@ -341,12 +341,6 @@ SYM_TYPED_FUNC_START(xsc3_dma_unmap_area) ...@@ -341,12 +341,6 @@ SYM_TYPED_FUNC_START(xsc3_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(xsc3_dma_unmap_area) SYM_FUNC_END(xsc3_dma_unmap_area)
.globl xsc3_flush_kern_cache_louis
.equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions xsc3
ENTRY(cpu_xsc3_dcache_clean_area) ENTRY(cpu_xsc3_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
......
...@@ -391,6 +391,20 @@ SYM_TYPED_FUNC_START(xscale_dma_map_area) ...@@ -391,6 +391,20 @@ SYM_TYPED_FUNC_START(xscale_dma_map_area)
b xscale_dma_flush_range b xscale_dma_flush_range
SYM_FUNC_END(xscale_dma_map_area) SYM_FUNC_END(xscale_dma_map_area)
/*
* On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
* clear the dirty bits, which means that if we invalidate a dirty line,
* the dirty data can still be written back to external memory later on.
*
* The recommended workaround is to always do a clean D-cache line before
* doing an invalidate D-cache line, so on the affected processors,
* dma_inv_range() is implemented as dma_flush_range().
*
* See erratum #25 of "Intel 80200 Processor Specification Update",
* revision January 22, 2003, available at:
* http://www.intel.com/design/iio/specupdt/273415.htm
*/
/* /*
* dma_map_area(start, size, dir) * dma_map_area(start, size, dir)
* - start - kernel virtual start address * - start - kernel virtual start address
...@@ -414,49 +428,6 @@ SYM_TYPED_FUNC_START(xscale_dma_unmap_area) ...@@ -414,49 +428,6 @@ SYM_TYPED_FUNC_START(xscale_dma_unmap_area)
ret lr ret lr
SYM_FUNC_END(xscale_dma_unmap_area) SYM_FUNC_END(xscale_dma_unmap_area)
.globl xscale_flush_kern_cache_louis
.equ xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions xscale
/*
* On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
* clear the dirty bits, which means that if we invalidate a dirty line,
* the dirty data can still be written back to external memory later on.
*
* The recommended workaround is to always do a clean D-cache line before
* doing an invalidate D-cache line, so on the affected processors,
* dma_inv_range() is implemented as dma_flush_range().
*
* See erratum #25 of "Intel 80200 Processor Specification Update",
* revision January 22, 2003, available at:
* http://www.intel.com/design/iio/specupdt/273415.htm
*/
.macro a0_alias basename
.globl xscale_80200_A0_A1_\basename
.type xscale_80200_A0_A1_\basename , %function
.equ xscale_80200_A0_A1_\basename , xscale_\basename
.endm
/*
* Most of the cache functions are unchanged for these processor revisions.
* Export suitable alias symbols for the unchanged functions:
*/
a0_alias flush_icache_all
a0_alias flush_user_cache_all
a0_alias flush_kern_cache_all
a0_alias flush_kern_cache_louis
a0_alias flush_user_cache_range
a0_alias coherent_kern_range
a0_alias coherent_user_range
a0_alias flush_kern_dcache_area
a0_alias dma_flush_range
a0_alias dma_unmap_area
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions xscale_80200_A0_A1
ENTRY(cpu_xscale_dcache_clean_area) ENTRY(cpu_xscale_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment