Commit 5feed8ed authored by David S. Miller's avatar David S. Miller

[SPARC64]: Remove memcpy Ultra3 PCACHE patching trick.

It could never be enabled safely, so just toss
the code.
Signed-off-by: default avatarDavid S. Miller <davem@redhat.com>
parent 6d5a0e91
......@@ -185,16 +185,7 @@ enter:
.align 64
begin:
.globl U3copy_from_user_nop_1_6
U3copy_from_user_nop_1_6:
ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
sethi %uhi(DCU_PE), %o3
sllx %o3, 32, %o3
or %g3, %o3, %o3
stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
membar #Sync
membar #StoreLoad | #StoreStore | #LoadStore
prefetcha [%o1 + 0x000] %asi, #one_read
prefetcha [%o1 + 0x040] %asi, #one_read
andn %o2, (0x40 - 1), %o4
......@@ -280,12 +271,6 @@ loopfini:
add %o0, 0x40, %o0
add %o1, 0x40, %o1
.globl U3copy_from_user_nop_2_3
U3copy_from_user_nop_2_3:
mov PRIMARY_CONTEXT, %o3
stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
membar #Sync
/* Now we copy the (len modulo 64) bytes at the end.
......
......@@ -204,16 +204,7 @@ enter:
.align 64
begin:
.globl U3copy_to_user_nop_1_6
U3copy_to_user_nop_1_6:
ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
sethi %uhi(DCU_PE), %o3
sllx %o3, 32, %o3
or %g3, %o3, %o3
stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
membar #Sync
membar #StoreLoad | #StoreStore | #LoadStore
prefetch [%o1 + 0x000], #one_read
prefetch [%o1 + 0x040], #one_read
andn %o2, (0x40 - 1), %o4
......@@ -299,12 +290,6 @@ loopfini:
add %o0, 0x40, %o0
add %o1, 0x40, %o1
.globl U3copy_to_user_nop_2_3
U3copy_to_user_nop_2_3:
mov PRIMARY_CONTEXT, %o3
stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
membar #Sync
/* Now we copy the (len modulo 64) bytes at the end.
......
......@@ -90,16 +90,7 @@ enter:
.align 64
begin:
#ifdef __KERNEL__
.globl U3memcpy_nop_1_6
U3memcpy_nop_1_6:
ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
sethi %uhi(DCU_PE), %o3
sllx %o3, 32, %o3
or %g3, %o3, %o3
stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
membar #Sync
#endif
membar #StoreLoad | #StoreStore | #LoadStore
prefetch [%o1 + 0x000], #one_read
prefetch [%o1 + 0x040], #one_read
andn %o2, (0x40 - 1), %o4
......@@ -184,13 +175,6 @@ loopfini:
stda %f16, [%o0] ASI_BLK_P
add %o0, 0x40, %o0
add %o1, 0x40, %o1
#ifdef __KERNEL__
.globl U3memcpy_nop_2_3
U3memcpy_nop_2_3:
mov PRIMARY_CONTEXT, %o3
stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
#endif
membar #Sync
/* Now we copy the (len modulo 64) bytes at the end.
......
......@@ -360,28 +360,6 @@ bcopy: or %o0, 0, %g3 ! IEU0 Group
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
#define ULTRA3_PCACHE_DO_NOP(symbol) \
sethi %hi(symbol##_nop_1_6), %g1; \
or %g1, %lo(symbol##_nop_1_6), %g1; \
sethi %hi(NOP), %g2; \
stw %g2, [%g1 + 0x00]; \
stw %g2, [%g1 + 0x04]; \
flush %g1 + 0x00; \
stw %g2, [%g1 + 0x08]; \
stw %g2, [%g1 + 0x0c]; \
flush %g1 + 0x08; \
stw %g2, [%g1 + 0x10]; \
stw %g2, [%g1 + 0x04]; \
flush %g1 + 0x10; \
sethi %hi(symbol##_nop_2_3), %g1; \
or %g1, %lo(symbol##_nop_2_3), %g1; \
stw %g2, [%g1 + 0x00]; \
stw %g2, [%g1 + 0x04]; \
flush %g1 + 0x00; \
stw %g2, [%g1 + 0x08]; \
flush %g1 + 0x08;
#include <asm/dcu.h>
.globl cheetah_patch_copyops
cheetah_patch_copyops:
......@@ -389,22 +367,6 @@ cheetah_patch_copyops:
ULTRA3_DO_PATCH(__copy_from_user, U3copy_from_user)
ULTRA3_DO_PATCH(__copy_to_user, U3copy_to_user)
ULTRA3_DO_PATCH(__copy_in_user, U3copy_in_user)
#if 0 /* Causes data corruption, nop out the optimization
* for now -DaveM
*/
ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
sethi %uhi(DCU_PE), %o3
sllx %o3, 32, %o3
andcc %g3, %o3, %g0
be,pn %xcc, pcache_disabled
nop
#endif
ULTRA3_PCACHE_DO_NOP(U3memcpy)
ULTRA3_PCACHE_DO_NOP(U3copy_from_user)
ULTRA3_PCACHE_DO_NOP(U3copy_to_user)
#if 0
pcache_disabled:
#endif
retl
nop
#undef BRANCH_ALWAYS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment