Commit bf6a4d5b authored by Cyril Bur's avatar Cyril Bur Committed by Michael Ellerman

powerpc: Add the ability to save VSX without giving it up

This patch adds the ability to be able to save the VSX registers to the
thread struct without giving up (disabling the facility) next time the
process returns to userspace.

This patch builds on a previous optimisation for the FPU and VEC registers
in the thread copy path to avoid a possibly pointless reload of VSX state.
Signed-off-by: default avatarCyril Bur <cyrilbur@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 6f515d84
...@@ -56,14 +56,10 @@ static inline void __giveup_altivec(struct task_struct *t) { } ...@@ -56,14 +56,10 @@ static inline void __giveup_altivec(struct task_struct *t) { }
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
extern void enable_kernel_vsx(void); extern void enable_kernel_vsx(void);
extern void flush_vsx_to_thread(struct task_struct *); extern void flush_vsx_to_thread(struct task_struct *);
extern void giveup_vsx(struct task_struct *);
extern void __giveup_vsx(struct task_struct *);
static inline void disable_kernel_vsx(void) static inline void disable_kernel_vsx(void)
{ {
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
} }
#else
static inline void __giveup_vsx(struct task_struct *t) { }
#endif #endif
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
......
...@@ -28,10 +28,6 @@ EXPORT_SYMBOL(load_vr_state); ...@@ -28,10 +28,6 @@ EXPORT_SYMBOL(load_vr_state);
EXPORT_SYMBOL(store_vr_state); EXPORT_SYMBOL(store_vr_state);
#endif #endif
#ifdef CONFIG_VSX
EXPORT_SYMBOL_GPL(__giveup_vsx);
#endif
#ifdef CONFIG_EPAPR_PARAVIRT #ifdef CONFIG_EPAPR_PARAVIRT
EXPORT_SYMBOL(epapr_hypercall_start); EXPORT_SYMBOL(epapr_hypercall_start);
#endif #endif
......
...@@ -280,19 +280,31 @@ static inline int restore_altivec(struct task_struct *tsk) { return 0; } ...@@ -280,19 +280,31 @@ static inline int restore_altivec(struct task_struct *tsk) { return 0; }
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
void giveup_vsx(struct task_struct *tsk) static void __giveup_vsx(struct task_struct *tsk)
{ {
check_if_tm_restore_required(tsk);
msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
if (tsk->thread.regs->msr & MSR_FP) if (tsk->thread.regs->msr & MSR_FP)
__giveup_fpu(tsk); __giveup_fpu(tsk);
if (tsk->thread.regs->msr & MSR_VEC) if (tsk->thread.regs->msr & MSR_VEC)
__giveup_altivec(tsk); __giveup_altivec(tsk);
tsk->thread.regs->msr &= ~MSR_VSX;
}
static void giveup_vsx(struct task_struct *tsk)
{
check_if_tm_restore_required(tsk);
msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
__giveup_vsx(tsk); __giveup_vsx(tsk);
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
} }
EXPORT_SYMBOL(giveup_vsx);
static void save_vsx(struct task_struct *tsk)
{
if (tsk->thread.regs->msr & MSR_FP)
save_fpu(tsk);
if (tsk->thread.regs->msr & MSR_VEC)
save_altivec(tsk);
}
void enable_kernel_vsx(void) void enable_kernel_vsx(void)
{ {
...@@ -335,6 +347,7 @@ static int restore_vsx(struct task_struct *tsk) ...@@ -335,6 +347,7 @@ static int restore_vsx(struct task_struct *tsk)
} }
#else #else
static inline int restore_vsx(struct task_struct *tsk) { return 0; } static inline int restore_vsx(struct task_struct *tsk) { return 0; }
static inline void save_vsx(struct task_struct *tsk) { }
#endif /* CONFIG_VSX */ #endif /* CONFIG_VSX */
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
...@@ -478,14 +491,19 @@ void save_all(struct task_struct *tsk) ...@@ -478,14 +491,19 @@ void save_all(struct task_struct *tsk)
msr_check_and_set(msr_all_available); msr_check_and_set(msr_all_available);
/*
* Saving the way the register space is in hardware, save_vsx boils
* down to a save_fpu() and save_altivec()
*/
if (usermsr & MSR_VSX) {
save_vsx(tsk);
} else {
if (usermsr & MSR_FP) if (usermsr & MSR_FP)
save_fpu(tsk); save_fpu(tsk);
if (usermsr & MSR_VEC) if (usermsr & MSR_VEC)
save_altivec(tsk); save_altivec(tsk);
}
if (usermsr & MSR_VSX)
__giveup_vsx(tsk);
if (usermsr & MSR_SPE) if (usermsr & MSR_SPE)
__giveup_spe(tsk); __giveup_spe(tsk);
......
...@@ -151,23 +151,6 @@ _GLOBAL(load_up_vsx) ...@@ -151,23 +151,6 @@ _GLOBAL(load_up_vsx)
std r12,_MSR(r1) std r12,_MSR(r1)
b fast_exception_return b fast_exception_return
/*
* __giveup_vsx(tsk)
* Disable VSX for the task given as the argument.
* Does NOT save vsx registers.
*/
_GLOBAL(__giveup_vsx)
addi r3,r3,THREAD /* want THREAD of task */
ld r5,PT_REGS(r3)
cmpdi 0,r5,0
beq 1f
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r3,MSR_VSX@h
andc r4,r4,r3 /* disable VSX for previous task */
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
blr
#endif /* CONFIG_VSX */ #endif /* CONFIG_VSX */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment