Commit 44922150 authored by David S. Miller's avatar David S. Miller

sparc64: Fix userspace FPU register corruptions.

If we have a series of events from userpsace, with %fprs=FPRS_FEF,
like follows:

ETRAP
	ETRAP
		VIS_ENTRY(fprs=0x4)
		VIS_EXIT
		RTRAP (kernel FPU restore with fpu_saved=0x4)
	RTRAP

We will not restore the user registers that were clobbered by the FPU
using kernel code in the inner-most trap.

Traps allocate FPU save slots in the thread struct, and FPU using
sequences save the "dirty" FPU registers only.

This works at the initial trap level because all of the registers
get recorded into the top-level FPU save area, and we'll return
to userspace with the FPU disabled so that any FPU use by the user
will take an FPU disabled trap wherein we'll load the registers
back up properly.

But this is not how trap returns from kernel to kernel operate.

The simplest fix for this bug is to always save all FPU register state
for anything other than the top-most FPU save area.

Getting rid of the optimized inner-slot FPU saving code ends up
making VISEntryHalf degenerate into plain VISEntry.

Longer term we need to do something smarter to reinstate the partial
save optimizations.  Perhaps the fundament error is having trap entry
and exit allocate FPU save slots and restore register state.  Instead,
the VISEntry et al. calls should be doing that work.

This bug is about two decades old.
Reported-by: default avatarJames Y Knight <jyknight@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4469942b
...@@ -28,16 +28,10 @@ ...@@ -28,16 +28,10 @@
* Must preserve %o5 between VISEntryHalf and VISExitHalf */ * Must preserve %o5 between VISEntryHalf and VISExitHalf */
#define VISEntryHalf \ #define VISEntryHalf \
rd %fprs, %o5; \ VISEntry
andcc %o5, FPRS_FEF, %g0; \
be,pt %icc, 297f; \ #define VISExitHalf \
sethi %hi(298f), %g7; \ VISExit
sethi %hi(VISenterhalf), %g1; \
jmpl %g1 + %lo(VISenterhalf), %g0; \
or %g7, %lo(298f), %g7; \
clr %o5; \
297: wr %o5, FPRS_FEF, %fprs; \
298:
#define VISEntryHalfFast(fail_label) \ #define VISEntryHalfFast(fail_label) \
rd %fprs, %o5; \ rd %fprs, %o5; \
...@@ -47,7 +41,7 @@ ...@@ -47,7 +41,7 @@
ba,a,pt %xcc, fail_label; \ ba,a,pt %xcc, fail_label; \
297: wr %o5, FPRS_FEF, %fprs; 297: wr %o5, FPRS_FEF, %fprs;
#define VISExitHalf \ #define VISExitHalfFast \
wr %o5, 0, %fprs; wr %o5, 0, %fprs;
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %o0, 0x40, %o0 add %o0, 0x40, %o0
bne,pt %icc, 1b bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong) LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
#ifdef NON_USER_COPY
VISExitHalfFast
#else
VISExitHalf VISExitHalf
#endif
brz,pn %o2, .Lexit brz,pn %o2, .Lexit
cmp %o2, 19 cmp %o2, 19
ble,pn %icc, .Lsmall_unaligned ble,pn %icc, .Lsmall_unaligned
......
...@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 ...@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
stx %g3, [%g6 + TI_GSR] stx %g3, [%g6 + TI_GSR]
2: add %g6, %g1, %g3 2: add %g6, %g1, %g3
cmp %o5, FPRS_DU mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
be,pn %icc, 6f sll %g1, 3, %g1
sll %g1, 3, %g1
stb %o5, [%g3 + TI_FPSAVED] stb %o5, [%g3 + TI_FPSAVED]
rd %gsr, %g2 rd %gsr, %g2
add %g6, %g1, %g3 add %g6, %g1, %g3
...@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 ...@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
.align 32 .align 32
80: jmpl %g7 + %g0, %g0 80: jmpl %g7 + %g0, %g0
nop nop
6: ldub [%g3 + TI_FPSAVED], %o5
or %o5, FPRS_DU, %o5
add %g6, TI_FPREGS+0x80, %g2
stb %o5, [%g3 + TI_FPSAVED]
sll %g1, 5, %g1
add %g6, TI_FPREGS+0xc0, %g3
wr %g0, FPRS_FEF, %fprs
membar #Sync
stda %f32, [%g2 + %g1] ASI_BLK_P
stda %f48, [%g3 + %g1] ASI_BLK_P
membar #Sync
ba,pt %xcc, 80f
nop
.align 32
80: jmpl %g7 + %g0, %g0
nop
.align 32
VISenterhalf:
ldub [%g6 + TI_FPDEPTH], %g1
brnz,a,pn %g1, 1f
cmp %g1, 1
stb %g0, [%g6 + TI_FPSAVED]
stx %fsr, [%g6 + TI_XFSR]
clr %o5
jmpl %g7 + %g0, %g0
wr %g0, FPRS_FEF, %fprs
1: bne,pn %icc, 2f
srl %g1, 1, %g1
ba,pt %xcc, vis1
sub %g7, 8, %g7
2: addcc %g6, %g1, %g3
sll %g1, 3, %g1
andn %o5, FPRS_DU, %g2
stb %g2, [%g3 + TI_FPSAVED]
rd %gsr, %g2
add %g6, %g1, %g3
stx %g2, [%g3 + TI_GSR]
add %g6, %g1, %g2
stx %fsr, [%g2 + TI_XFSR]
sll %g1, 5, %g1
3: andcc %o5, FPRS_DL, %g0
be,pn %icc, 4f
add %g6, TI_FPREGS, %g2
add %g6, TI_FPREGS+0x40, %g3
membar #Sync
stda %f0, [%g2 + %g1] ASI_BLK_P
stda %f16, [%g3 + %g1] ASI_BLK_P
membar #Sync
ba,pt %xcc, 4f
nop
.align 32
4: and %o5, FPRS_DU, %o5
jmpl %g7 + %g0, %g0
wr %o5, FPRS_FEF, %fprs
...@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page); ...@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
void VISenter(void); void VISenter(void);
EXPORT_SYMBOL(VISenter); EXPORT_SYMBOL(VISenter);
/* CRYPTO code needs this */
void VISenterhalf(void);
EXPORT_SYMBOL(VISenterhalf);
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
unsigned long *); unsigned long *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment