Commit f412b09f authored by Russell King's avatar Russell King Committed by Russell King

Merge branch 'for-rmk' of git://linux-arm.org/linux-2.6 into devel

parents 31bccbf3 7f1fd31d
...@@ -717,6 +717,9 @@ __armv7_mmu_cache_off: ...@@ -717,6 +717,9 @@ __armv7_mmu_cache_off:
bl __armv7_mmu_cache_flush bl __armv7_mmu_cache_flush
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
mcr p15, 0, r0, c7, c10, 4 @ DSB
mcr p15, 0, r0, c7, c5, 4 @ ISB
mov pc, r12 mov pc, r12
__arm6_mmu_cache_off: __arm6_mmu_cache_off:
...@@ -778,12 +781,13 @@ __armv6_mmu_cache_flush: ...@@ -778,12 +781,13 @@ __armv6_mmu_cache_flush:
__armv7_mmu_cache_flush: __armv7_mmu_cache_flush:
mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
tst r10, #0xf << 16 @ hierarchical cache (ARMv7) tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
beq hierarchical
mov r10, #0 mov r10, #0
beq hierarchical
mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
b iflush b iflush
hierarchical: hierarchical:
stmfd sp!, {r0-r5, r7, r9-r11} mcr p15, 0, r10, c7, c10, 5 @ DMB
stmfd sp!, {r0-r5, r7, r9, r11}
mrc p15, 1, r0, c0, c0, 1 @ read clidr mrc p15, 1, r0, c0, c0, 1 @ read clidr
ands r3, r0, #0x7000000 @ extract loc from clidr ands r3, r0, #0x7000000 @ extract loc from clidr
mov r3, r3, lsr #23 @ left align loc bit field mov r3, r3, lsr #23 @ left align loc bit field
...@@ -820,12 +824,14 @@ skip: ...@@ -820,12 +824,14 @@ skip:
cmp r3, r10 cmp r3, r10
bgt loop1 bgt loop1
finished: finished:
ldmfd sp!, {r0-r5, r7, r9, r11}
mov r10, #0 @ swith back to cache level 0 mov r10, #0 @ swith back to cache level 0
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
ldmfd sp!, {r0-r5, r7, r9-r11}
iflush: iflush:
mcr p15, 0, r10, c7, c10, 4 @ DSB
mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
mcr p15, 0, r10, c7, c10, 4 @ drain WB mcr p15, 0, r10, c7, c10, 4 @ DSB
mcr p15, 0, r10, c7, c5, 4 @ ISB
mov pc, lr mov pc, lr
__armv5tej_mmu_cache_flush: __armv5tej_mmu_cache_flush:
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/glue.h> #include <asm/glue.h>
#include <asm/shmparam.h> #include <asm/shmparam.h>
#include <asm/cachetype.h>
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
...@@ -295,16 +296,6 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) ...@@ -295,16 +296,6 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
#endif #endif
/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. Since the
* direct-mappings of these pages may contain cached data, we need
* to do a full cache flush to ensure that writebacks don't corrupt
* data placed into these pages via the new mappings.
*/
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
/* /*
* Copy user data from/to a page which is mapped into a different * Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user * processes address space. Really, we want to allow our "user
...@@ -444,4 +435,29 @@ static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt, ...@@ -444,4 +435,29 @@ static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
dmac_inv_range(start, start + size); dmac_inv_range(start, start + size);
} }
/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
* caches, since the direct-mappings of these pages may contain cached
* data, we need to do a full cache flush to ensure that writebacks
* don't corrupt data placed into these pages via the new mappings.
*/
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
if (!cache_is_vipt_nonaliasing())
flush_cache_all();
else
/*
* set_pte_at() called from vmap_pte_range() does not
* have a DSB after cleaning the cache line.
*/
dsb();
}
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
if (!cache_is_vipt_nonaliasing())
flush_cache_all();
}
#endif #endif
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#define HWCAP_IWMMXT 512 #define HWCAP_IWMMXT 512
#define HWCAP_CRUNCH 1024 #define HWCAP_CRUNCH 1024
#define HWCAP_THUMBEE 2048 #define HWCAP_THUMBEE 2048
#define HWCAP_NEON 4096
#if defined(__KERNEL__) && !defined(__ASSEMBLY__) #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
/* /*
......
...@@ -772,6 +772,8 @@ static const char *hwcap_str[] = { ...@@ -772,6 +772,8 @@ static const char *hwcap_str[] = {
"java", "java",
"iwmmxt", "iwmmxt",
"crunch", "crunch",
"thumbee",
"neon",
NULL NULL
}; };
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
/* /*
* Access to the ThumbEE Handler Base register * Access to the ThumbEE Handler Base register
*/ */
static inline unsigned long teehbr_read() static inline unsigned long teehbr_read(void)
{ {
unsigned long v; unsigned long v;
asm("mrc p14, 6, %0, c1, c0, 0\n" : "=r" (v)); asm("mrc p14, 6, %0, c1, c0, 0\n" : "=r" (v));
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
* - mm - mm_struct describing address space * - mm - mm_struct describing address space
*/ */
ENTRY(v7_flush_dcache_all) ENTRY(v7_flush_dcache_all)
dmb @ ensure ordering with previous memory accesses
mrc p15, 1, r0, c0, c0, 1 @ read clidr mrc p15, 1, r0, c0, c0, 1 @ read clidr
ands r3, r0, #0x7000000 @ extract loc from clidr ands r3, r0, #0x7000000 @ extract loc from clidr
mov r3, r3, lsr #23 @ left align loc bit field mov r3, r3, lsr #23 @ left align loc bit field
...@@ -64,6 +65,7 @@ skip: ...@@ -64,6 +65,7 @@ skip:
finished: finished:
mov r10, #0 @ swith back to cache level 0 mov r10, #0 @ swith back to cache level 0
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
dsb
isb isb
mov pc, lr mov pc, lr
ENDPROC(v7_flush_dcache_all) ENDPROC(v7_flush_dcache_all)
......
...@@ -71,6 +71,8 @@ ENTRY(cpu_v6_reset) ...@@ -71,6 +71,8 @@ ENTRY(cpu_v6_reset)
* IRQs are already disabled. * IRQs are already disabled.
*/ */
ENTRY(cpu_v6_do_idle) ENTRY(cpu_v6_do_idle)
mov r1, #0
mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode
mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt
mov pc, lr mov pc, lr
......
...@@ -20,9 +20,17 @@ ...@@ -20,9 +20,17 @@
#define TTB_C (1 << 0) #define TTB_C (1 << 0)
#define TTB_S (1 << 1) #define TTB_S (1 << 1)
#define TTB_RGN_NC (0 << 3)
#define TTB_RGN_OC_WBWA (1 << 3)
#define TTB_RGN_OC_WT (2 << 3) #define TTB_RGN_OC_WT (2 << 3)
#define TTB_RGN_OC_WB (3 << 3) #define TTB_RGN_OC_WB (3 << 3)
#ifndef CONFIG_SMP
#define TTB_FLAGS TTB_C|TTB_RGN_OC_WB @ mark PTWs cacheable, outer WB
#else
#define TTB_FLAGS TTB_C|TTB_S|TTB_RGN_OC_WBWA @ mark PTWs cacheable and shared, outer WBWA
#endif
ENTRY(cpu_v7_proc_init) ENTRY(cpu_v7_proc_init)
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_proc_init) ENDPROC(cpu_v7_proc_init)
...@@ -55,6 +63,7 @@ ENDPROC(cpu_v7_reset) ...@@ -55,6 +63,7 @@ ENDPROC(cpu_v7_reset)
* IRQs are already disabled. * IRQs are already disabled.
*/ */
ENTRY(cpu_v7_do_idle) ENTRY(cpu_v7_do_idle)
dsb @ WFI may enter a low-power mode
wfi wfi
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_do_idle) ENDPROC(cpu_v7_do_idle)
...@@ -85,7 +94,7 @@ ENTRY(cpu_v7_switch_mm) ...@@ -85,7 +94,7 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov r2, #0 mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
orr r0, r0, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB orr r0, r0, #TTB_FLAGS
mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
isb isb
1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
...@@ -162,6 +171,11 @@ cpu_v7_name: ...@@ -162,6 +171,11 @@ cpu_v7_name:
* - cache type register is implemented * - cache type register is implemented
*/ */
__v7_setup: __v7_setup:
#ifdef CONFIG_SMP
mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode
orr r0, r0, #(0x1 << 6)
mcr p15, 0, r0, c1, c0, 1
#endif
adr r12, __v7_setup_stack @ the local stack adr r12, __v7_setup_stack @ the local stack
stmia r12, {r0-r5, r7, r9, r11, lr} stmia r12, {r0-r5, r7, r9, r11, lr}
bl v7_flush_dcache_all bl v7_flush_dcache_all
...@@ -174,8 +188,7 @@ __v7_setup: ...@@ -174,8 +188,7 @@ __v7_setup:
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r10, c2, c0, 2 @ TTB control register mcr p15, 0, r10, c2, c0, 2 @ TTB control register
orr r4, r4, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB orr r4, r4, #TTB_FLAGS
mcr p15, 0, r4, c2, c0, 0 @ load TTB0
mcr p15, 0, r4, c2, c0, 1 @ load TTB1 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
mov r10, #0x1f @ domains 0, 1 = manager mov r10, #0x1f @ domains 0, 1 = manager
mcr p15, 0, r10, c3, c0, 0 @ load domain access register mcr p15, 0, r10, c3, c0, 0 @ load domain access register
......
...@@ -101,9 +101,12 @@ ENTRY(vfp_support_entry) ...@@ -101,9 +101,12 @@ ENTRY(vfp_support_entry)
VFPFSTMIA r4, r5 @ save the working registers VFPFSTMIA r4, r5 @ save the working registers
VFPFMRX r5, FPSCR @ current status VFPFMRX r5, FPSCR @ current status
tst r1, #FPEXC_EX @ is there additional state to save? tst r1, #FPEXC_EX @ is there additional state to save?
VFPFMRX r6, FPINST, NE @ FPINST (only if FPEXC.EX is set) beq 1f
tstne r1, #FPEXC_FP2V @ is there an FPINST2 to read? VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set)
VFPFMRX r8, FPINST2, NE @ FPINST2 if needed (and present) tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
beq 1f
VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present)
1:
stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2
@ and point r4 at the word at the @ and point r4 at the word at the
@ start of the register dump @ start of the register dump
...@@ -117,9 +120,12 @@ no_old_VFP_process: ...@@ -117,9 +120,12 @@ no_old_VFP_process:
@ FPEXC is in a safe state @ FPEXC is in a safe state
ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2 ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2
tst r1, #FPEXC_EX @ is there additional state to restore? tst r1, #FPEXC_EX @ is there additional state to restore?
VFPFMXR FPINST, r6, NE @ restore FPINST (only if FPEXC.EX is set) beq 1f
tstne r1, #FPEXC_FP2V @ is there an FPINST2 to write? VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set)
VFPFMXR FPINST2, r8, NE @ FPINST2 if needed (and present) tst r1, #FPEXC_FP2V @ is there an FPINST2 to write?
beq 1f
VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present)
1:
VFPFMXR FPSCR, r5 @ restore status VFPFMXR FPSCR, r5 @ restore status
check_for_exception: check_for_exception:
...@@ -175,9 +181,12 @@ ENTRY(vfp_save_state) ...@@ -175,9 +181,12 @@ ENTRY(vfp_save_state)
VFPFSTMIA r0, r2 @ save the working registers VFPFSTMIA r0, r2 @ save the working registers
VFPFMRX r2, FPSCR @ current status VFPFMRX r2, FPSCR @ current status
tst r1, #FPEXC_EX @ is there additional state to save? tst r1, #FPEXC_EX @ is there additional state to save?
VFPFMRX r3, FPINST, NE @ FPINST (only if FPEXC.EX is set) beq 1f
tstne r1, #FPEXC_FP2V @ is there an FPINST2 to read? VFPFMRX r3, FPINST @ FPINST (only if FPEXC.EX is set)
VFPFMRX r12, FPINST2, NE @ FPINST2 if needed (and present) tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
beq 1f
VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present)
1:
stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2
mov pc, lr mov pc, lr
ENDPROC(vfp_save_state) ENDPROC(vfp_save_state)
......
...@@ -371,6 +371,15 @@ static int __init vfp_init(void) ...@@ -371,6 +371,15 @@ static int __init vfp_init(void)
* in place; report VFP support to userspace. * in place; report VFP support to userspace.
*/ */
elf_hwcap |= HWCAP_VFP; elf_hwcap |= HWCAP_VFP;
#ifdef CONFIG_NEON
/*
* Check for the presence of the Advanced SIMD
* load/store instructions, integer and single
* precision floating point operations.
*/
if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
elf_hwcap |= HWCAP_NEON;
#endif
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment