Commit b86fd2bd authored by Anton Blanchard's avatar Anton Blanchard Committed by Michael Ellerman

powerpc: Simplify TM restore checks

Instead of having multiple giveup_*_maybe_transactional() functions,
separate out the TM check into a new function called
check_if_tm_restore_required().

This will make it easier to optimise the giveup_*() functions in a
subsequent patch.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent af1bbc3d
...@@ -68,7 +68,7 @@ ...@@ -68,7 +68,7 @@
extern unsigned long _get_SP(void); extern unsigned long _get_SP(void);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void giveup_fpu_maybe_transactional(struct task_struct *tsk) static void check_if_tm_restore_required(struct task_struct *tsk)
{ {
/* /*
* If we are saving the current thread's registers, and the * If we are saving the current thread's registers, and the
...@@ -82,31 +82,9 @@ void giveup_fpu_maybe_transactional(struct task_struct *tsk) ...@@ -82,31 +82,9 @@ void giveup_fpu_maybe_transactional(struct task_struct *tsk)
tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
set_thread_flag(TIF_RESTORE_TM); set_thread_flag(TIF_RESTORE_TM);
} }
giveup_fpu(tsk);
}
void giveup_altivec_maybe_transactional(struct task_struct *tsk)
{
/*
* If we are saving the current thread's registers, and the
* thread is in a transactional state, set the TIF_RESTORE_TM
* bit so that we know to restore the registers before
* returning to userspace.
*/
if (tsk == current && tsk->thread.regs &&
MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
!test_thread_flag(TIF_RESTORE_TM)) {
tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
set_thread_flag(TIF_RESTORE_TM);
}
giveup_altivec(tsk);
} }
#else #else
#define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk) static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
#define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk)
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
...@@ -135,7 +113,8 @@ void flush_fp_to_thread(struct task_struct *tsk) ...@@ -135,7 +113,8 @@ void flush_fp_to_thread(struct task_struct *tsk)
* to still have its FP state in the CPU registers. * to still have its FP state in the CPU registers.
*/ */
BUG_ON(tsk != current); BUG_ON(tsk != current);
giveup_fpu_maybe_transactional(tsk); check_if_tm_restore_required(tsk);
giveup_fpu(tsk);
} }
preempt_enable(); preempt_enable();
} }
...@@ -147,10 +126,12 @@ void enable_kernel_fp(void) ...@@ -147,10 +126,12 @@ void enable_kernel_fp(void)
{ {
WARN_ON(preemptible()); WARN_ON(preemptible());
if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
giveup_fpu_maybe_transactional(current); check_if_tm_restore_required(current);
else giveup_fpu(current);
} else {
giveup_fpu(NULL); /* just enables FP for kernel */ giveup_fpu(NULL); /* just enables FP for kernel */
}
} }
EXPORT_SYMBOL(enable_kernel_fp); EXPORT_SYMBOL(enable_kernel_fp);
...@@ -159,10 +140,12 @@ void enable_kernel_altivec(void) ...@@ -159,10 +140,12 @@ void enable_kernel_altivec(void)
{ {
WARN_ON(preemptible()); WARN_ON(preemptible());
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
giveup_altivec_maybe_transactional(current); check_if_tm_restore_required(current);
else giveup_altivec(current);
} else {
giveup_altivec_notask(); giveup_altivec_notask();
}
} }
EXPORT_SYMBOL(enable_kernel_altivec); EXPORT_SYMBOL(enable_kernel_altivec);
...@@ -176,7 +159,8 @@ void flush_altivec_to_thread(struct task_struct *tsk) ...@@ -176,7 +159,8 @@ void flush_altivec_to_thread(struct task_struct *tsk)
preempt_disable(); preempt_disable();
if (tsk->thread.regs->msr & MSR_VEC) { if (tsk->thread.regs->msr & MSR_VEC) {
BUG_ON(tsk != current); BUG_ON(tsk != current);
giveup_altivec_maybe_transactional(tsk); check_if_tm_restore_required(tsk);
giveup_altivec(tsk);
} }
preempt_enable(); preempt_enable();
} }
...@@ -198,8 +182,9 @@ EXPORT_SYMBOL(enable_kernel_vsx); ...@@ -198,8 +182,9 @@ EXPORT_SYMBOL(enable_kernel_vsx);
void giveup_vsx(struct task_struct *tsk) void giveup_vsx(struct task_struct *tsk)
{ {
giveup_fpu_maybe_transactional(tsk); check_if_tm_restore_required(tsk);
giveup_altivec_maybe_transactional(tsk); giveup_fpu(tsk);
giveup_altivec(tsk);
__giveup_vsx(tsk); __giveup_vsx(tsk);
} }
EXPORT_SYMBOL(giveup_vsx); EXPORT_SYMBOL(giveup_vsx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment