powerpc: Replace mfmsr instructions with load from PACA kernel_msr field

On 64-bit, the mfmsr instruction can be quite slow, slower
than loading a field from the cache-hot PACA, which happens
to already contain the value we want in most cases.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 9424fabf
...@@ -298,7 +298,7 @@ label##_hv: \ ...@@ -298,7 +298,7 @@ label##_hv: \
/* Exception addition: Keep interrupt state */ /* Exception addition: Keep interrupt state */
#define ENABLE_INTS \ #define ENABLE_INTS \
mfmsr r11; \ ld r11,PACAKMSR(r13); \
ld r12,_MSR(r1); \ ld r12,_MSR(r1); \
rlwimi r11,r12,0,MSR_EE; \ rlwimi r11,r12,0,MSR_EE; \
mtmsrd r11,1 mtmsrd r11,1
......
...@@ -68,8 +68,8 @@ static inline bool arch_irqs_disabled(void) ...@@ -68,8 +68,8 @@ static inline bool arch_irqs_disabled(void)
#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory"); #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory");
#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory"); #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory");
#else #else
#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) #define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
#endif #endif
#define hard_irq_disable() \ #define hard_irq_disable() \
......
...@@ -557,10 +557,8 @@ _GLOBAL(ret_from_except_lite) ...@@ -557,10 +557,8 @@ _GLOBAL(ret_from_except_lite)
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
wrteei 0 wrteei 0
#else #else
mfmsr r10 /* Get current interrupt state */ ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
rldicl r9,r10,48,1 /* clear MSR_EE */ mtmsrd r10,1 /* Update machine state */
rotldi r9,r9,16
mtmsrd r9,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
...@@ -625,8 +623,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) ...@@ -625,8 +623,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
* userspace and we take an exception after restoring r13, * userspace and we take an exception after restoring r13,
* we end up corrupting the userspace r13 value. * we end up corrupting the userspace r13 value.
*/ */
mfmsr r4 ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
andc r4,r4,r0 /* r0 contains MSR_RI here */ andc r4,r4,r0 /* r0 contains MSR_RI here */
mtmsrd r4,1 mtmsrd r4,1
/* /*
...@@ -686,9 +684,7 @@ do_work: ...@@ -686,9 +684,7 @@ do_work:
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
wrteei 0 wrteei 0
#else #else
mfmsr r10 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
rldicl r10,r10,48,1
rotldi r10,r10,16
mtmsrd r10,1 mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PPC_BOOK3E */
li r0,0 li r0,0
......
...@@ -848,9 +848,8 @@ fast_exception_return: ...@@ -848,9 +848,8 @@ fast_exception_return:
REST_GPR(0, r1) REST_GPR(0, r1)
REST_8GPRS(2, r1) REST_8GPRS(2, r1)
mfmsr r10 ld r10,PACAKMSR(r13)
rldicl r10,r10,48,1 /* clear EE */ clrrdi r10,r10,2 /* clear RI */
rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
mtmsrd r10,1 mtmsrd r10,1
mtspr SPRN_SRR1,r12 mtspr SPRN_SRR1,r12
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment