Commit 1fdc08ab authored by Russell King's avatar Russell King

ARM: decompressor: avoid speculative prefetch from non-RAM areas

We setup identity MMU mappings across the entire 4GB of space, which
are permissionless because the domain is set to manager.

This unfortunately allows ARMv6 and later CPUs to speculatively
prefetch from the entire address space, which can cause undesirable
side effects if those regions contain devices.

As we setup the mappings with read/write permission, we can switch
the domain to client mode, and then use the XN bit for ARMv6 and
above to control speculative prefetch to non-RAM areas.
Reported-by: default avatarR Sricharan <r.sricharan@ti.com>
Acked-by: default avatarSantosh Shilimkar <santosh.shilimkar@ti.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent bd0493ea
...@@ -567,6 +567,12 @@ __armv3_mpu_cache_on: ...@@ -567,6 +567,12 @@ __armv3_mpu_cache_on:
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr mov pc, lr
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
#define CB_BITS 0x08
#else
#define CB_BITS 0x0c
#endif
__setup_mmu: sub r3, r4, #16384 @ Page directory size __setup_mmu: sub r3, r4, #16384 @ Page directory size
bic r3, r3, #0xff @ Align the pointer bic r3, r3, #0xff @ Align the pointer
bic r3, r3, #0x3f00 bic r3, r3, #0x3f00
...@@ -578,17 +584,14 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size ...@@ -578,17 +584,14 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
mov r9, r0, lsr #18 mov r9, r0, lsr #18
mov r9, r9, lsl #18 @ start of RAM mov r9, r9, lsl #18 @ start of RAM
add r10, r9, #0x10000000 @ a reasonable RAM size add r10, r9, #0x10000000 @ a reasonable RAM size
mov r1, #0x12 mov r1, #0x12 @ XN|U + section mapping
orr r1, r1, #3 << 10 orr r1, r1, #3 << 10 @ AP=11
add r2, r3, #16384 add r2, r3, #16384
1: cmp r1, r9 @ if virt > start of RAM 1: cmp r1, r9 @ if virt > start of RAM
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH cmphs r10, r1 @ && end of RAM > virt
orrhs r1, r1, #0x08 @ set cacheable bic r1, r1, #0x1c @ clear XN|U + C + B
#else orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
orrhs r1, r1, #0x0c @ set cacheable, bufferable orrhs r1, r1, r6 @ set RAM section settings
#endif
cmp r1, r10 @ if virt > end of RAM
bichs r1, r1, #0x0c @ clear cacheable, bufferable
str r1, [r0], #4 @ 1:1 mapping str r1, [r0], #4 @ 1:1 mapping
add r1, r1, #1048576 add r1, r1, #1048576
teq r0, r2 teq r0, r2
...@@ -599,7 +602,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size ...@@ -599,7 +602,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
* so there is no map overlap problem for up to 1 MB compressed kernel. * so there is no map overlap problem for up to 1 MB compressed kernel.
* If the execution is in RAM then we would only be duplicating the above. * If the execution is in RAM then we would only be duplicating the above.
*/ */
mov r1, #0x1e orr r1, r6, #0x04 @ ensure B is set for this
orr r1, r1, #3 << 10 orr r1, r1, #3 << 10
mov r2, pc mov r2, pc
mov r2, r2, lsr #20 mov r2, r2, lsr #20
...@@ -620,6 +623,7 @@ __arm926ejs_mmu_cache_on: ...@@ -620,6 +623,7 @@ __arm926ejs_mmu_cache_on:
__armv4_mmu_cache_on: __armv4_mmu_cache_on:
mov r12, lr mov r12, lr
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov r6, #CB_BITS | 0x12 @ U
bl __setup_mmu bl __setup_mmu
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
...@@ -641,6 +645,7 @@ __armv7_mmu_cache_on: ...@@ -641,6 +645,7 @@ __armv7_mmu_cache_on:
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
tst r11, #0xf @ VMSA tst r11, #0xf @ VMSA
movne r6, #CB_BITS | 0x02 @ !XN
blne __setup_mmu blne __setup_mmu
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
...@@ -655,7 +660,7 @@ __armv7_mmu_cache_on: ...@@ -655,7 +660,7 @@ __armv7_mmu_cache_on:
orr r0, r0, #1 << 25 @ big-endian page tables orr r0, r0, #1 << 25 @ big-endian page tables
#endif #endif
orrne r0, r0, #1 @ MMU enabled orrne r0, r0, #1 @ MMU enabled
movne r1, #-1 movne r1, #0xfffffffd @ domain 0 = client
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
#endif #endif
...@@ -668,6 +673,7 @@ __armv7_mmu_cache_on: ...@@ -668,6 +673,7 @@ __armv7_mmu_cache_on:
__fa526_cache_on: __fa526_cache_on:
mov r12, lr mov r12, lr
mov r6, #CB_BITS | 0x12 @ U
bl __setup_mmu bl __setup_mmu
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
...@@ -682,6 +688,7 @@ __fa526_cache_on: ...@@ -682,6 +688,7 @@ __fa526_cache_on:
__arm6_mmu_cache_on: __arm6_mmu_cache_on:
mov r12, lr mov r12, lr
mov r6, #CB_BITS | 0x12 @ U
bl __setup_mmu bl __setup_mmu
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment