Commit f9c39b08 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Paul Mackerras

PPC32: flush the cache more thoroughly on sleep.

parent f80da3c4
......@@ -40,9 +40,11 @@
Author: Terry Greeniaus (tgree@phys.ualberta.ca)
Please e-mail updates to this file to me, thanks!
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
#include <asm/cache.h>
/* Usage:
......@@ -101,6 +103,8 @@ BEGIN_FTR_SECTION
blr
END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
mflr r9
/* Stop DST streams */
BEGIN_FTR_SECTION
DSSALL
......@@ -115,6 +119,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtmsr r4
isync
/* Before we perform the global invalidation, we must disable dynamic
* power management via HID0[DPM] to work around a processor bug where
* DPM can possibly interfere with the state machine in the processor
* that invalidates the L2 cache tags.
*/
mfspr r8,HID0 /* Save HID0 in r8 */
rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
sync
mtspr HID0,r4 /* Disable DPM */
sync
/* Flush & disable L1 */
mr r5,r3
bl __flush_disable_L1
mr r3,r5
/* Get the current enable bit of the L2CR into r4 */
mfspr r4,L2CR
......@@ -136,27 +156,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/**** Might be a good idea to set L2DO here - to prevent instructions
from getting into the cache. But since we invalidate
the next time we enable the cache it doesn't really matter.
Don't do this unless you accommodate all processor variations.
Don't do this unless you accomodate all processor variations.
The bit moved on the 7450.....
****/
/* TODO: use HW flush assist when available */
lis r4,0x0004
lis r4,0x0002
mtctr r4
li r4,0
1:
lwzx r0,r0,r4
addi r4,r4,32 /* Go to start of next cache line */
bdnz 1b
isync
/* Now, flush the first 4MB of memory */
lis r4,0x0004
lis r4,0x0002
mtctr r4
li r4,0
sync
1:
dcbf r0,r4
dcbf 0,r4
addi r4,r4,32 /* Go to start of next cache line */
bdnz 1b
......@@ -166,25 +187,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* L1 icache
*/
b 20f
21:
.balign L1_CACHE_LINE_SIZE
22:
sync
mtspr L2CR,r3
sync
b 22f
b 23f
20:
b 21b
22:
/* Before we perform the global invalidation, we must disable dynamic
* power management via HID0[DPM] to work around a processor bug where
* DPM can possibly interfere with the state machine in the processor
* that invalidates the L2 cache tags.
*/
mfspr r8,HID0 /* Save HID0 in r8 */
rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
sync
mtspr HID0,r4 /* Disable DPM */
sync
b 21f
21: sync
isync
b 22b
23:
/* Perform a global invalidation */
oris r3,r3,0x0020
sync
......@@ -211,11 +226,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
mtspr L2CR,r3
sync
/* Restore HID0[DPM] to whatever it was before */
sync
mtspr 1008,r8
sync
/* See if we need to enable the cache */
cmplwi r5,0
beq 4f
......@@ -225,10 +235,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
mtspr L2CR,r3
sync
4:
bl __inval_enable_L1
/* Restore HID0[DPM] to whatever it was before */
sync
mtspr 1008,r8
sync
/* Restore MSR (restores EE and DR bits to original state) */
4: SYNC
SYNC
mtmsr r7
isync
mtlr r9
blr
_GLOBAL(_get_L2CR)
......@@ -286,7 +306,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
li r4,0
1:
lwzx r0,r0,r4
dcbf r0,r4
dcbf 0,r4
addi r4,r4,32 /* Go to start of next cache line */
bdnz 1b
......@@ -360,3 +380,73 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
/* --- End of PowerLogix code ---
*/
/* flush_disable_L1() - Flush and disable L1 cache
*
* clobbers r0, r3, ctr, cr0
*
*/
.globl __flush_disable_L1
__flush_disable_L1:
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync
/* Load counter to 0x1000 cache lines (128k) and
* load cache with datas
*/
lis r3,0x0002
// li r3,0x1000 /* 128kB / 32B */
mtctr r3
li r3, 0
1:
lwz r0,0(r3)
addi r3,r3,0x0020 /* Go to start of next cache line */
bdnz 1b
isync
sync
/* Now flush those cache lines */
lis r3,0x0002
// li r3,0x1000 /* 128kB / 32B */
mtctr r3
li r3, 0
1:
dcbf 0,r3
addi r3,r3,0x0020 /* Go to start of next cache line */
bdnz 1b
sync
/* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */
mfspr r3,SPRN_HID0
rlwinm r3,r3,0,18,15
mtspr SPRN_HID0,r3
sync
isync
blr
/* inval_enable_L1 - Invalidate and enable L1 cache
*
* Assumes L1 is already disabled and MSR:EE is off
*
* clobbers r3
*/
.globl __inval_enable_L1
__inval_enable_L1:
/* Enable and then Flash inval the instruction & data cache */
mfspr r3,SPRN_HID0
ori r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI
sync
isync
mtspr SPRN_HID0,r3
xori r3,r3, HID0_ICFI|HID0_DCI
mtspr SPRN_HID0,r3
sync
blr
......@@ -15,6 +15,7 @@
#include <asm/page.h>
#include <asm/ppc_asm.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/offsets.h>
......@@ -157,33 +158,22 @@ _GLOBAL(low_sleep_handler)
addi r3,r3,sleep_storage@l
stw r5,0(r3)
BEGIN_FTR_SECTION
DSSALL
/* Disable DPM during cache flush */
mfspr r3, SPRN_HID0
rlwinm r3,r3,0,12,10
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/*
* Flush the L1 data cache by reading the first 128kB of RAM
* and then flushing the same area with the dcbf instruction.
* The L2 cache has already been disabled.
*/
li r4,0x1000 /* 128kB / 32B */
mtctr r4
lis r4,KERNELBASE@h
1:
lwz r0,0(r4)
addi r4,r4,0x0020 /* Go to start of next cache line */
bdnz 1b
mtspr SPRN_HID0,r3
sync
li r4,0x1000 /* 128kB / 32B */
mtctr r4
lis r4,KERNELBASE@h
1:
dcbf r0,r4
addi r4,r4,0x0020 /* Go to start of next cache line */
bdnz 1b
/* Turn off data relocation. */
mfmsr r3 /* Save MSR in r7 */
rlwinm r3,r3,0,28,26 /* Turn off DR bit */
sync
mtmsr r3
isync
/* Flush & disable L1 cache */
bl __flush_disable_L1
/*
* Set the HID0 and MSR for sleep.
......@@ -192,6 +182,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
rlwinm r2,r2,0,10,7 /* clear doze, nap */
oris r2,r2,HID0_SLEEP@h
sync
isync
mtspr HID0,r2
sync
......@@ -252,16 +243,11 @@ _GLOBAL(core99_wake_up)
*/
grackle_wake_up:
/* Enable and then Flash inval the instruction & data cache */
mfspr r3,HID0
ori r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI
sync
isync
mtspr HID0,r3
xori r3,r3, HID0_ICFI|HID0_DCI
mtspr HID0,r3
sync
/* Invalidate & enable L1 cache, we don't care about
* whatever the ROM may have tried to write to memory
*/
bl __inval_enable_L1
/* Restore the kernel's segment registers before
* we do any r1 memory access as we are not sure they
* are in a sane state above the first 256Mb region
......@@ -274,6 +260,8 @@ grackle_wake_up:
addi r3,r3,0x111 /* increment VSID */
addis r4,r4,0x1000 /* address of next segment */
bdnz 3b
sync
isync
subi r1,r1,SL_PC
......@@ -325,6 +313,26 @@ grackle_wake_up:
lwz r4,SL_IBAT3+4(r1)
mtibatl 3,r4
BEGIN_FTR_SECTION
li r4,0
mtspr SPRN_DBAT4U,r4
mtspr SPRN_DBAT4L,r4
mtspr SPRN_DBAT5U,r4
mtspr SPRN_DBAT5L,r4
mtspr SPRN_DBAT6U,r4
mtspr SPRN_DBAT6L,r4
mtspr SPRN_DBAT7U,r4
mtspr SPRN_DBAT7L,r4
mtspr SPRN_IBAT4U,r4
mtspr SPRN_IBAT4L,r4
mtspr SPRN_IBAT5U,r4
mtspr SPRN_IBAT5L,r4
mtspr SPRN_IBAT6U,r4
mtspr SPRN_IBAT6L,r4
mtspr SPRN_IBAT7U,r4
mtspr SPRN_IBAT7L,r4
END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
/* Flush all TLBs */
lis r4,0x1000
1: addic. r4,r4,-0x1000
......@@ -368,8 +376,9 @@ turn_on_mmu:
#endif /* defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_CPU_FREQ) */
.data
.globl sleep_storage
.section .data
.balign L1_CACHE_LINE_SIZE
sleep_storage:
.long 0
.balign L1_CACHE_LINE_SIZE, 0
.section .text
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment