Commit db091d18 authored by David S. Miller's avatar David S. Miller

[SPARC64]: On broken cheetah, enable p-cache around large copies.

parent 70189e27
......@@ -7,6 +7,8 @@
#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
#undef SMALL_COPY_USES_FPU
#define EXNV(x,y,a,b) \
98: x,y; \
......@@ -181,6 +183,16 @@ U3copy_from_user_enter:
.align 64
U3copy_from_user_begin:
#ifdef __KERNEL__
.globl U3copy_from_user_nop_1_6
U3copy_from_user_nop_1_6:
ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
sethi %uhi(DCU_PE), %o3
sllx %o3, 32, %o3
or %g3, %o3, %o3
stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
membar #Sync
#endif
prefetcha [%o1 + 0x000] %asi, #one_read ! MS Group1
prefetcha [%o1 + 0x040] %asi, #one_read ! MS Group2
andn %o2, (0x40 - 1), %o4 ! A0
......@@ -321,6 +333,13 @@ U3copy_from_user_loopfini:
stda %f16, [%o0] ASI_BLK_P ! MS
add %o0, 0x40, %o0 ! A0
add %o1, 0x40, %o1 ! A1
#ifdef __KERNEL__
.globl U3copy_from_user_nop_2_3
U3copy_from_user_nop_2_3:
mov PRIMARY_CONTEXT, %o3
stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
#endif
membar #Sync ! MS Group26 (7-cycle stall)
/* Now we copy the (len modulo 64) bytes at the end.
......
......@@ -7,6 +7,8 @@
#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
#undef SMALL_COPY_USES_FPU
#define EXNV(x,y,a,b) \
98: x,y; \
......@@ -228,6 +230,16 @@ U3copy_to_user_enter:
.align 64
U3copy_to_user_begin:
#ifdef __KERNEL__
.globl U3copy_to_user_nop_1_6
U3copy_to_user_nop_1_6:
ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
sethi %uhi(DCU_PE), %o3
sllx %o3, 32, %o3
or %g3, %o3, %o3
stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
membar #Sync
#endif
prefetch [%o1 + 0x000], #one_read ! MS Group1
prefetch [%o1 + 0x040], #one_read ! MS Group2
andn %o2, (0x40 - 1), %o4 ! A0
......@@ -368,6 +380,13 @@ U3copy_to_user_loopfini:
EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
add %o0, 0x40, %o0 ! A0
add %o1, 0x40, %o1 ! A1
#ifdef __KERNEL__
.globl U3copy_to_user_nop_2_3
U3copy_to_user_nop_2_3:
mov PRIMARY_CONTEXT, %o3
stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
#endif
membar #Sync ! MS Group26 (7-cycle stall)
/* Now we copy the (len modulo 64) bytes at the end.
......
......@@ -7,6 +7,8 @@
#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
#undef SMALL_COPY_USES_FPU
#else
#define ASI_BLK_P 0xf0
......@@ -109,6 +111,16 @@ U3memcpy_enter:
.align 64
U3memcpy_begin:
#ifdef __KERNEL__
.globl U3memcpy_nop_1_6
U3memcpy_nop_1_6:
ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
sethi %uhi(DCU_PE), %o3
sllx %o3, 32, %o3
or %g3, %o3, %o3
stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
membar #Sync
#endif
prefetch [%o1 + 0x000], #one_read ! MS Group1
prefetch [%o1 + 0x040], #one_read ! MS Group2
andn %o2, (0x40 - 1), %o4 ! A0
......@@ -249,6 +261,13 @@ U3memcpy_loopfini:
stda %f16, [%o0] ASI_BLK_P ! MS
add %o0, 0x40, %o0 ! A0
add %o1, 0x40, %o1 ! A1
#ifdef __KERNEL__
.globl U3memcpy_nop_2_3
U3memcpy_nop_2_3:
mov PRIMARY_CONTEXT, %o3
stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
#endif
membar #Sync ! MS Group26 (7-cycle stall)
/* Now we copy the (len modulo 64) bytes at the end.
......
......@@ -368,6 +368,28 @@ bcopy: or %o0, 0, %g3 ! IEU0 Group
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
#define ULTRA3_PCACHE_DO_NOP(symbol) \
sethi %hi(symbol##_nop_1_6), %g1; \
or %g1, %lo(symbol##_nop_1_6), %g1; \
sethi %hi(NOP), %g2; \
stw %g2, [%g1 + 0x00]; \
stw %g2, [%g1 + 0x04]; \
flush %g1 + 0x00; \
stw %g2, [%g1 + 0x08]; \
stw %g2, [%g1 + 0x0c]; \
flush %g1 + 0x08; \
stw %g2, [%g1 + 0x10]; \
stw %g2, [%g1 + 0x04]; \
flush %g1 + 0x10; \
sethi %hi(symbol##_nop_2_3), %g1; \
or %g1, %lo(symbol##_nop_2_3), %g1; \
stw %g2, [%g1 + 0x00]; \
stw %g2, [%g1 + 0x04]; \
flush %g1 + 0x00; \
stw %g2, [%g1 + 0x08]; \
flush %g1 + 0x08;
#include <asm/dcu.h>
.globl cheetah_patch_copyops
cheetah_patch_copyops:
......@@ -375,6 +397,17 @@ cheetah_patch_copyops:
ULTRA3_DO_PATCH(__copy_from_user, U3copy_from_user)
ULTRA3_DO_PATCH(__copy_to_user, U3copy_to_user)
ULTRA3_DO_PATCH(__copy_in_user, U3copy_in_user)
ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
sethi %uhi(DCU_PE), %o3
sllx %o3, 32, %o3
andcc %g3, %o3, %g0
be,pn %xcc, pcache_disabled
nop
ULTRA3_PCACHE_DO_NOP(U3memcpy)
ULTRA3_PCACHE_DO_NOP(U3copy_from_user)
ULTRA3_PCACHE_DO_NOP(U3copy_to_user)
ULTRA3_PCACHE_DO_NOP(cheetah_copy_user_page)
pcache_disabled:
retl
nop
#undef BRANCH_ALWAYS
......
......@@ -9,6 +9,8 @@
#include <asm/visasm.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
#include <asm/pgtable.h>
#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \
......@@ -111,6 +113,15 @@ cheetah_patch_1:
nop
cheetah_copy_user_page:
.globl cheetah_copy_user_page_nop_1_6
cheetah_copy_user_page_nop_1_6:
ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
sethi %uhi(DCU_PE), %o2
sllx %o2, 32, %o2
or %g3, %o2, %o2
stxa %o2, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
membar #Sync
sethi %hi((PAGE_SIZE/64)-7), %o2 ! A0 Group
prefetch [%o1 + 0x000], #one_read ! MS
or %o2, %lo((PAGE_SIZE/64)-7), %o2 ! A1 Group
......@@ -202,6 +213,11 @@ cheetah_copy_user_page:
fmovd %f12, %f44 ! FGA Group
fmovd %f14, %f46 ! FGA Group
stda %f32, [%o0] ASI_BLK_P ! MS
.globl cheetah_copy_user_page_nop_2_3
cheetah_copy_user_page_nop_2_3:
mov PRIMARY_CONTEXT, %o2
stxa %g0, [%o2] ASI_DMMU ! Flush P-cache
stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
ba,a,pt %xcc, copy_user_page_continue
spitfire_copy_user_page:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment