Commit f7354cca authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/32: Remove CURRENT_THREAD_INFO and rename TI_CPU

Now that thread_info is similar to task_struct, its address is in r2
so CURRENT_THREAD_INFO() macro is useless. This patch removes it.

This patch also moves the 'tovirt(r2, r2)' down just before the
reactivation of MMU translation, so that we keep the physical address
of 'current' in r2 until then. It avoids a few calls to tophys().

At the same time, as the 'cpu' field is not anymore in thread_info,
TI_CPU is renamed TASK_CPU by this patch.

It also allows to get rid of a couple of
'#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE' as ACCOUNT_CPU_USER_ENTRY()
and ACCOUNT_CPU_USER_EXIT() are empty when
CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not defined.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
[mpe: Fix a missed conversion of TI_CPU idle_6xx.S]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 7c19c2e5
...@@ -431,7 +431,7 @@ ifdef CONFIG_SMP ...@@ -431,7 +431,7 @@ ifdef CONFIG_SMP
prepare: task_cpu_prepare prepare: task_cpu_prepare
task_cpu_prepare: prepare0 task_cpu_prepare: prepare0
$(eval KBUILD_CFLAGS += -D_TASK_CPU=$(shell awk '{if ($$2 == "TI_CPU") print $$3;}' include/generated/asm-offsets.h)) $(eval KBUILD_CFLAGS += -D_TASK_CPU=$(shell awk '{if ($$2 == "TASK_CPU") print $$3;}' include/generated/asm-offsets.h))
endif endif
# Check toolchain versions: # Check toolchain versions:
......
...@@ -19,8 +19,6 @@ ...@@ -19,8 +19,6 @@
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(ld dest, PACACURRENT(r13)) #define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(ld dest, PACACURRENT(r13))
#else
#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(mr dest, r2)
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -99,7 +99,7 @@ int main(void) ...@@ -99,7 +99,7 @@ int main(void)
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
OFFSET(TASK_STACK, task_struct, stack); OFFSET(TASK_STACK, task_struct, stack);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
OFFSET(TI_CPU, task_struct, cpu); OFFSET(TASK_CPU, task_struct, cpu);
#endif #endif
#ifdef CONFIG_LIVEPATCH #ifdef CONFIG_LIVEPATCH
......
...@@ -151,7 +151,6 @@ transfer_to_handler: ...@@ -151,7 +151,6 @@ transfer_to_handler:
stw r2,_XER(r11) stw r2,_XER(r11)
mfspr r12,SPRN_SPRG_THREAD mfspr r12,SPRN_SPRG_THREAD
addi r2,r12,-THREAD addi r2,r12,-THREAD
tovirt(r2,r2) /* set r2 to current */
beq 2f /* if from user, fix up THREAD.regs */ beq 2f /* if from user, fix up THREAD.regs */
addi r11,r1,STACK_FRAME_OVERHEAD addi r11,r1,STACK_FRAME_OVERHEAD
stw r11,PT_REGS(r12) stw r11,PT_REGS(r12)
...@@ -161,11 +160,7 @@ transfer_to_handler: ...@@ -161,11 +160,7 @@ transfer_to_handler:
lwz r12,THREAD_DBCR0(r12) lwz r12,THREAD_DBCR0(r12)
andis. r12,r12,DBCR0_IDM@h andis. r12,r12,DBCR0_IDM@h
#endif #endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
CURRENT_THREAD_INFO(r9, r1)
tophys(r9, r9)
ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
#endif
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
beq+ 3f beq+ 3f
/* From user and task is ptraced - load up global dbcr0 */ /* From user and task is ptraced - load up global dbcr0 */
...@@ -175,8 +170,7 @@ transfer_to_handler: ...@@ -175,8 +170,7 @@ transfer_to_handler:
tophys(r11,r11) tophys(r11,r11)
addi r11,r11,global_dbcr0@l addi r11,r11,global_dbcr0@l
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
CURRENT_THREAD_INFO(r9, r1) lwz r9,TASK_CPU(r2)
lwz r9,TI_CPU(r9)
slwi r9,r9,3 slwi r9,r9,3
add r11,r11,r9 add r11,r11,r9
#endif #endif
...@@ -197,9 +191,7 @@ transfer_to_handler: ...@@ -197,9 +191,7 @@ transfer_to_handler:
ble- stack_ovf /* then the kernel stack overflowed */ ble- stack_ovf /* then the kernel stack overflowed */
5: 5:
#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
CURRENT_THREAD_INFO(r9, r1) lwz r12,TI_LOCAL_FLAGS(r2)
tophys(r9,r9) /* check local flags */
lwz r12,TI_LOCAL_FLAGS(r9)
mtcrf 0x01,r12 mtcrf 0x01,r12
bt- 31-TLF_NAPPING,4f bt- 31-TLF_NAPPING,4f
bt- 31-TLF_SLEEPING,7f bt- 31-TLF_SLEEPING,7f
...@@ -208,6 +200,7 @@ transfer_to_handler: ...@@ -208,6 +200,7 @@ transfer_to_handler:
transfer_to_handler_cont: transfer_to_handler_cont:
3: 3:
mflr r9 mflr r9
tovirt(r2, r2) /* set r2 to current */
lwz r11,0(r9) /* virtual address of handler */ lwz r11,0(r9) /* virtual address of handler */
lwz r9,4(r9) /* where to go when done */ lwz r9,4(r9) /* where to go when done */
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
...@@ -271,11 +264,11 @@ reenable_mmu: /* re-enable mmu so we can */ ...@@ -271,11 +264,11 @@ reenable_mmu: /* re-enable mmu so we can */
#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) #if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
4: rlwinm r12,r12,0,~_TLF_NAPPING 4: rlwinm r12,r12,0,~_TLF_NAPPING
stw r12,TI_LOCAL_FLAGS(r9) stw r12,TI_LOCAL_FLAGS(r2)
b power_save_ppc32_restore b power_save_ppc32_restore
7: rlwinm r12,r12,0,~_TLF_SLEEPING 7: rlwinm r12,r12,0,~_TLF_SLEEPING
stw r12,TI_LOCAL_FLAGS(r9) stw r12,TI_LOCAL_FLAGS(r2)
lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
rlwinm r9,r9,0,~MSR_EE rlwinm r9,r9,0,~MSR_EE
lwz r12,_LINK(r11) /* and return to address in LR */ lwz r12,_LINK(r11) /* and return to address in LR */
...@@ -347,8 +340,7 @@ _GLOBAL(DoSyscall) ...@@ -347,8 +340,7 @@ _GLOBAL(DoSyscall)
mtmsr r11 mtmsr r11
1: 1:
#endif /* CONFIG_TRACE_IRQFLAGS */ #endif /* CONFIG_TRACE_IRQFLAGS */
CURRENT_THREAD_INFO(r10, r1) lwz r11,TI_FLAGS(r2)
lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_DOTRACE andi. r11,r11,_TIF_SYSCALL_DOTRACE
bne- syscall_dotrace bne- syscall_dotrace
syscall_dotrace_cont: syscall_dotrace_cont:
...@@ -381,13 +373,12 @@ ret_from_syscall: ...@@ -381,13 +373,12 @@ ret_from_syscall:
lwz r3,GPR3(r1) lwz r3,GPR3(r1)
#endif #endif
mr r6,r3 mr r6,r3
CURRENT_THREAD_INFO(r12, r1)
/* disable interrupts so current_thread_info()->flags can't change */ /* disable interrupts so current_thread_info()->flags can't change */
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
/* Note: We don't bother telling lockdep about it */ /* Note: We don't bother telling lockdep about it */
SYNC SYNC
MTMSRD(r10) MTMSRD(r10)
lwz r9,TI_FLAGS(r12) lwz r9,TI_FLAGS(r2)
li r8,-MAX_ERRNO li r8,-MAX_ERRNO
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
bne- syscall_exit_work bne- syscall_exit_work
...@@ -434,8 +425,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) ...@@ -434,8 +425,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
andi. r4,r8,MSR_PR andi. r4,r8,MSR_PR
beq 3f beq 3f
CURRENT_THREAD_INFO(r4, r1) ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
3: 3:
#endif #endif
lwz r4,_LINK(r1) lwz r4,_LINK(r1)
...@@ -528,7 +518,7 @@ syscall_exit_work: ...@@ -528,7 +518,7 @@ syscall_exit_work:
/* Clear per-syscall TIF flags if any are set. */ /* Clear per-syscall TIF flags if any are set. */
li r11,_TIF_PERSYSCALL_MASK li r11,_TIF_PERSYSCALL_MASK
addi r12,r12,TI_FLAGS addi r12,r2,TI_FLAGS
3: lwarx r8,0,r12 3: lwarx r8,0,r12
andc r8,r8,r11 andc r8,r8,r11
#ifdef CONFIG_IBM405_ERR77 #ifdef CONFIG_IBM405_ERR77
...@@ -536,7 +526,6 @@ syscall_exit_work: ...@@ -536,7 +526,6 @@ syscall_exit_work:
#endif #endif
stwcx. r8,0,r12 stwcx. r8,0,r12
bne- 3b bne- 3b
subi r12,r12,TI_FLAGS
4: /* Anything which requires enabling interrupts? */ 4: /* Anything which requires enabling interrupts? */
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
...@@ -815,8 +804,7 @@ ret_from_except: ...@@ -815,8 +804,7 @@ ret_from_except:
user_exc_return: /* r10 contains MSR_KERNEL here */ user_exc_return: /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */ /* Check current_thread_info()->flags */
CURRENT_THREAD_INFO(r9, r1) lwz r9,TI_FLAGS(r2)
lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_USER_WORK_MASK andi. r0,r9,_TIF_USER_WORK_MASK
bne do_work bne do_work
...@@ -828,18 +816,14 @@ restore_user: ...@@ -828,18 +816,14 @@ restore_user:
andis. r10,r0,DBCR0_IDM@h andis. r10,r0,DBCR0_IDM@h
bnel- load_dbcr0 bnel- load_dbcr0
#endif #endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
CURRENT_THREAD_INFO(r9, r1)
ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
#endif
b restore b restore
/* N.B. the only way to get here is from the beq following ret_from_except. */ /* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel: resume_kernel:
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */ /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
CURRENT_THREAD_INFO(r9, r1) lwz r8,TI_FLAGS(r2)
lwz r8,TI_FLAGS(r9)
andis. r0,r8,_TIF_EMULATE_STACK_STORE@h andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
beq+ 1f beq+ 1f
...@@ -865,7 +849,7 @@ resume_kernel: ...@@ -865,7 +849,7 @@ resume_kernel:
/* Clear _TIF_EMULATE_STACK_STORE flag */ /* Clear _TIF_EMULATE_STACK_STORE flag */
lis r11,_TIF_EMULATE_STACK_STORE@h lis r11,_TIF_EMULATE_STACK_STORE@h
addi r5,r9,TI_FLAGS addi r5,r2,TI_FLAGS
0: lwarx r8,0,r5 0: lwarx r8,0,r5
andc r8,r8,r11 andc r8,r8,r11
#ifdef CONFIG_IBM405_ERR77 #ifdef CONFIG_IBM405_ERR77
...@@ -877,7 +861,7 @@ resume_kernel: ...@@ -877,7 +861,7 @@ resume_kernel:
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
/* check current_thread_info->preempt_count */ /* check current_thread_info->preempt_count */
lwz r0,TI_PREEMPT(r9) lwz r0,TI_PREEMPT(r2)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore bne restore
andi. r8,r8,_TIF_NEED_RESCHED andi. r8,r8,_TIF_NEED_RESCHED
...@@ -893,8 +877,7 @@ resume_kernel: ...@@ -893,8 +877,7 @@ resume_kernel:
bl trace_hardirqs_off bl trace_hardirqs_off
#endif #endif
1: bl preempt_schedule_irq 1: bl preempt_schedule_irq
CURRENT_THREAD_INFO(r9, r1) lwz r3,TI_FLAGS(r2)
lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b bne- 1b
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
...@@ -1190,8 +1173,7 @@ load_dbcr0: ...@@ -1190,8 +1173,7 @@ load_dbcr0:
lis r11,global_dbcr0@ha lis r11,global_dbcr0@ha
addi r11,r11,global_dbcr0@l addi r11,r11,global_dbcr0@l
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
CURRENT_THREAD_INFO(r9, r1) lwz r9,TASK_CPU(r2)
lwz r9,TI_CPU(r9)
slwi r9,r9,3 slwi r9,r9,3
add r11,r11,r9 add r11,r11,r9
#endif #endif
...@@ -1231,8 +1213,7 @@ recheck: ...@@ -1231,8 +1213,7 @@ recheck:
LOAD_MSR_KERNEL(r10,MSR_KERNEL) LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC SYNC
MTMSRD(r10) /* disable interrupts */ MTMSRD(r10) /* disable interrupts */
CURRENT_THREAD_INFO(r9, r1) lwz r9,TI_FLAGS(r2)
lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_NEED_RESCHED andi. r0,r9,_TIF_NEED_RESCHED
bne- do_resched bne- do_resched
andi. r0,r9,_TIF_USER_WORK_MASK andi. r0,r9,_TIF_USER_WORK_MASK
......
...@@ -21,10 +21,9 @@ ...@@ -21,10 +21,9 @@
#ifndef CONFIG_PPC64 #ifndef CONFIG_PPC64
/* epapr_ev_idle() was derived from e500_idle() */ /* epapr_ev_idle() was derived from e500_idle() */
_GLOBAL(epapr_ev_idle) _GLOBAL(epapr_ev_idle)
CURRENT_THREAD_INFO(r3, r1) PPC_LL r4, TI_LOCAL_FLAGS(r2) /* set napping bit */
PPC_LL r4, TI_LOCAL_FLAGS(r3) /* set napping bit */
ori r4, r4,_TLF_NAPPING /* so when we take an exception */ ori r4, r4,_TLF_NAPPING /* so when we take an exception */
PPC_STL r4, TI_LOCAL_FLAGS(r3) /* it will return to our caller */ PPC_STL r4, TI_LOCAL_FLAGS(r2) /* it will return to our caller */
wrteei 1 wrteei 1
......
...@@ -244,8 +244,7 @@ set_ivor: ...@@ -244,8 +244,7 @@ set_ivor:
stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
CURRENT_THREAD_INFO(r22, r1) stw r24, TASK_CPU(r2)
stw r24, TI_CPU(r22)
#endif #endif
bl early_init bl early_init
...@@ -719,7 +718,7 @@ finish_tlb_load: ...@@ -719,7 +718,7 @@ finish_tlb_load:
/* Get the next_tlbcam_idx percpu var */ /* Get the next_tlbcam_idx percpu var */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
lwz r15, TI_CPU-THREAD(r12) lwz r15, TASK_CPU-THREAD(r12)
lis r14, __per_cpu_offset@h lis r14, __per_cpu_offset@h
ori r14, r14, __per_cpu_offset@l ori r14, r14, __per_cpu_offset@l
rlwinm r15, r15, 2, 0, 29 rlwinm r15, r15, 2, 0, 29
......
...@@ -136,10 +136,9 @@ BEGIN_FTR_SECTION ...@@ -136,10 +136,9 @@ BEGIN_FTR_SECTION
DSSALL DSSALL
sync sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
CURRENT_THREAD_INFO(r9, r1) lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */
lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
ori r8,r8,_TLF_NAPPING /* so when we take an exception */ ori r8,r8,_TLF_NAPPING /* so when we take an exception */
stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ stw r8,TI_LOCAL_FLAGS(r2) /* it will return to our caller */
mfmsr r7 mfmsr r7
ori r7,r7,MSR_EE ori r7,r7,MSR_EE
oris r7,r7,MSR_POW@h oris r7,r7,MSR_POW@h
...@@ -159,9 +158,7 @@ _GLOBAL(power_save_ppc32_restore) ...@@ -159,9 +158,7 @@ _GLOBAL(power_save_ppc32_restore)
stw r9,_NIP(r11) /* make it do a blr */ stw r9,_NIP(r11) /* make it do a blr */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
CURRENT_THREAD_INFO(r12, r1) lwz r11,TASK_CPU(r2) /* get cpu number * 4 */
tophys(r12, r12)
lwz r11,TI_CPU(r12) /* get cpu number * 4 */
slwi r11,r11,2 slwi r11,r11,2
#else #else
li r11,0 li r11,0
......
...@@ -22,10 +22,9 @@ ...@@ -22,10 +22,9 @@
.text .text
_GLOBAL(e500_idle) _GLOBAL(e500_idle)
CURRENT_THREAD_INFO(r3, r1) lwz r4,TI_LOCAL_FLAGS(r2) /* set napping bit */
lwz r4,TI_LOCAL_FLAGS(r3) /* set napping bit */
ori r4,r4,_TLF_NAPPING /* so when we take an exception */ ori r4,r4,_TLF_NAPPING /* so when we take an exception */
stw r4,TI_LOCAL_FLAGS(r3) /* it will return to our caller */ stw r4,TI_LOCAL_FLAGS(r2) /* it will return to our caller */
#ifdef CONFIG_PPC_E500MC #ifdef CONFIG_PPC_E500MC
wrteei 1 wrteei 1
...@@ -88,8 +87,7 @@ _GLOBAL(power_save_ppc32_restore) ...@@ -88,8 +87,7 @@ _GLOBAL(power_save_ppc32_restore)
stw r9,_NIP(r11) /* make it do a blr */ stw r9,_NIP(r11) /* make it do a blr */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
CURRENT_THREAD_INFO(r12, r1) lwz r11,TASK_CPU(r2) /* get cpu number * 4 */
lwz r11,TI_CPU(r12) /* get cpu number * 4 */
slwi r11,r11,2 slwi r11,r11,2
#else #else
li r11,0 li r11,0
......
...@@ -183,8 +183,7 @@ _GLOBAL(low_choose_750fx_pll) ...@@ -183,8 +183,7 @@ _GLOBAL(low_choose_750fx_pll)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Store new HID1 image */ /* Store new HID1 image */
CURRENT_THREAD_INFO(r6, r1) lwz r6,TASK_CPU(r2)
lwz r6,TI_CPU(r6)
slwi r6,r6,2 slwi r6,r6,2
#else #else
li r6, 0 li r6, 0
......
...@@ -183,8 +183,7 @@ _GLOBAL(add_hash_page) ...@@ -183,8 +183,7 @@ _GLOBAL(add_hash_page)
add r3,r3,r0 /* note create_hpte trims to 24 bits */ add r3,r3,r0 /* note create_hpte trims to 24 bits */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
CURRENT_THREAD_INFO(r8, r1) /* use cpu number to make tag */ lwz r8,TASK_CPU(r2) /* to go in mmu_hash_lock */
lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
oris r8,r8,12 oris r8,r8,12
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -540,9 +539,7 @@ _GLOBAL(flush_hash_pages) ...@@ -540,9 +539,7 @@ _GLOBAL(flush_hash_pages)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha
addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
CURRENT_THREAD_INFO(r8, r1) lwz r8,TASK_CPU(r2)
tophys(r8, r8)
lwz r8,TI_CPU(r8)
oris r8,r8,9 oris r8,r8,9
10: lwarx r0,0,r9 10: lwarx r0,0,r9
cmpi 0,r0,0 cmpi 0,r0,0
...@@ -637,8 +634,7 @@ EXPORT_SYMBOL(flush_hash_pages) ...@@ -637,8 +634,7 @@ EXPORT_SYMBOL(flush_hash_pages)
*/ */
_GLOBAL(_tlbie) _GLOBAL(_tlbie)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
CURRENT_THREAD_INFO(r8, r1) lwz r8,TASK_CPU(r2)
lwz r8,TI_CPU(r8)
oris r8,r8,11 oris r8,r8,11
mfmsr r10 mfmsr r10
SYNC SYNC
...@@ -675,8 +671,7 @@ _GLOBAL(_tlbie) ...@@ -675,8 +671,7 @@ _GLOBAL(_tlbie)
*/ */
_GLOBAL(_tlbia) _GLOBAL(_tlbia)
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
CURRENT_THREAD_INFO(r8, r1) lwz r8,TASK_CPU(r2)
lwz r8,TI_CPU(r8)
oris r8,r8,10 oris r8,r8,10
mfmsr r10 mfmsr r10
SYNC SYNC
......
...@@ -29,10 +29,9 @@ _GLOBAL(mpc6xx_enter_standby) ...@@ -29,10 +29,9 @@ _GLOBAL(mpc6xx_enter_standby)
ori r5, r5, ret_from_standby@l ori r5, r5, ret_from_standby@l
mtlr r5 mtlr r5
CURRENT_THREAD_INFO(r5, r1) lwz r6, TI_LOCAL_FLAGS(r2)
lwz r6, TI_LOCAL_FLAGS(r5)
ori r6, r6, _TLF_SLEEPING ori r6, r6, _TLF_SLEEPING
stw r6, TI_LOCAL_FLAGS(r5) stw r6, TI_LOCAL_FLAGS(r2)
mfmsr r5 mfmsr r5
ori r5, r5, MSR_EE ori r5, r5, MSR_EE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment