Commit 14b34661 authored by David Gibson's avatar David Gibson Committed by Paul Mackerras

[PATCH] Invert sense of SLB class bit

Currently, we set the class bit in kernel SLB entries, and clear it on
user SLB entries.  On POWER5, ERAT entries created in real mode have
the class bit clear.  So to avoid flushing kernel ERAT entries on each
context switch, this patch inverts our usage of the class bit, setting
it on user SLB entries and clearing it on kernel SLB entries.

Booted on POWER5 and G5.
Signed-off-by: default avatarDavid Gibson <dwg@au1.ibm.com>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 0fdf0b86
...@@ -400,15 +400,14 @@ BEGIN_FTR_SECTION ...@@ -400,15 +400,14 @@ BEGIN_FTR_SECTION
cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
cror eq,4*cr1+eq,eq cror eq,4*cr1+eq,eq
beq 2f /* if yes, don't slbie it */ beq 2f /* if yes, don't slbie it */
oris r0,r6,0x0800 /* set C (class) bit */
/* Bolt in the new stack SLB entry */ /* Bolt in the new stack SLB entry */
ld r7,KSP_VSID(r4) /* Get new stack's VSID */ ld r7,KSP_VSID(r4) /* Get new stack's VSID */
oris r6,r6,(SLB_ESID_V)@h oris r0,r6,(SLB_ESID_V)@h
ori r6,r6,(SLB_NUM_BOLTED-1)@l ori r0,r0,(SLB_NUM_BOLTED-1)@l
slbie r0 slbie r6
slbie r0 /* Workaround POWER5 < DD2.1 issue */ slbie r6 /* Workaround POWER5 < DD2.1 issue */
slbmte r7,r6 slbmte r7,r0
isync isync
2: 2:
......
...@@ -144,7 +144,8 @@ static void flush_low_segments(void *parm) ...@@ -144,7 +144,8 @@ static void flush_low_segments(void *parm)
for (i = 0; i < NUM_LOW_AREAS; i++) { for (i = 0; i < NUM_LOW_AREAS; i++) {
if (! (areas & (1U << i))) if (! (areas & (1U << i)))
continue; continue;
asm volatile("slbie %0" : : "r" (i << SID_SHIFT)); asm volatile("slbie %0"
: : "r" ((i << SID_SHIFT) | SLBIE_C));
} }
asm volatile("isync" : : : "memory"); asm volatile("isync" : : : "memory");
...@@ -164,7 +165,8 @@ static void flush_high_segments(void *parm) ...@@ -164,7 +165,8 @@ static void flush_high_segments(void *parm)
continue; continue;
for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
asm volatile("slbie %0" asm volatile("slbie %0"
:: "r" ((i << HTLB_AREA_SHIFT) + (j << SID_SHIFT))); :: "r" (((i << HTLB_AREA_SHIFT)
+ (j << SID_SHIFT)) | SLBIE_C));
} }
asm volatile("isync" : : : "memory"); asm volatile("isync" : : : "memory");
......
...@@ -87,8 +87,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) ...@@ -87,8 +87,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
int i; int i;
asm volatile("isync" : : : "memory"); asm volatile("isync" : : : "memory");
for (i = 0; i < offset; i++) { for (i = 0; i < offset; i++) {
esid_data = (unsigned long)get_paca()->slb_cache[i] esid_data = ((unsigned long)get_paca()->slb_cache[i]
<< SID_SHIFT; << SID_SHIFT) | SLBIE_C;
asm volatile("slbie %0" : : "r" (esid_data)); asm volatile("slbie %0" : : "r" (esid_data));
} }
asm volatile("isync" : : : "memory"); asm volatile("isync" : : : "memory");
......
...@@ -54,8 +54,10 @@ extern char initial_stab[]; ...@@ -54,8 +54,10 @@ extern char initial_stab[];
#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
#define SLB_VSID_LS ASM_CONST(0x0000000000000070) /* size of largepage */ #define SLB_VSID_LS ASM_CONST(0x0000000000000070) /* size of largepage */
#define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C) #define SLB_VSID_KERNEL (SLB_VSID_KP)
#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS) #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
#define SLBIE_C (0x08000000)
/* /*
* Hash table * Hash table
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment