Commit 9dcbdb7e authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] Fix incorrect inline assembly in RAID-5

Pure luck that this ever worked at all. The optimized assembly for XOR
in RAID-5 declared did clobber registers, but did declare them as read-only.
I'm pretty sure that at least the 4 disk and possibly the 5 disk cases
did corrupt callee saved registers. The others probably got away because
they were always used in own functions (and only clobbering caller saved
registers)and only called via pointers, preventing inlining.

Some of the replacements are a bit complicated because the functions
exceed gcc's 10 asm argument limit when each input/output register needs
two arguments. Works around that by saving/restoring some of the registers
manually.

I wasn't able to test it in real-life because I don't have a RAID
setup and the RAID code didn't compile since several 2.5 releases.
I wrote some test programs that did test the XOR and they showed
no regression.

Also aligns to XMM save area to 16 bytes to save a few cycles.
parent e00fe306
...@@ -76,9 +76,9 @@ xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) ...@@ -76,9 +76,9 @@ xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
" addl $128, %2 ;\n" " addl $128, %2 ;\n"
" decl %0 ;\n" " decl %0 ;\n"
" jnz 1b ;\n" " jnz 1b ;\n"
: : "+r" (lines),
: "r" (lines), "+r" (p1), "+r" (p2)
"r" (p1), "r" (p2) :
: "memory"); : "memory");
FPU_RESTORE; FPU_RESTORE;
...@@ -126,9 +126,9 @@ xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -126,9 +126,9 @@ xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" addl $128, %3 ;\n" " addl $128, %3 ;\n"
" decl %0 ;\n" " decl %0 ;\n"
" jnz 1b ;\n" " jnz 1b ;\n"
: : "+r" (lines),
: "r" (lines), "+r" (p1), "+r" (p2), "+r" (p3)
"r" (p1), "r" (p2), "r" (p3) :
: "memory"); : "memory");
FPU_RESTORE; FPU_RESTORE;
...@@ -181,14 +181,15 @@ xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -181,14 +181,15 @@ xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" addl $128, %4 ;\n" " addl $128, %4 ;\n"
" decl %0 ;\n" " decl %0 ;\n"
" jnz 1b ;\n" " jnz 1b ;\n"
: : "+r" (lines),
: "r" (lines), "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
"r" (p1), "r" (p2), "r" (p3), "r" (p4) :
: "memory"); : "memory");
FPU_RESTORE; FPU_RESTORE;
} }
static void static void
xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5) unsigned long *p3, unsigned long *p4, unsigned long *p5)
...@@ -198,7 +199,11 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -198,7 +199,11 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
FPU_SAVE; FPU_SAVE;
/* need to save/restore p4/p5 manually otherwise gcc's 10 argument
limit gets exceeded (+ counts as two arguments) */
__asm__ __volatile__ ( __asm__ __volatile__ (
" pushl %4\n"
" pushl %5\n"
#undef BLOCK #undef BLOCK
#define BLOCK(i) \ #define BLOCK(i) \
LD(i,0) \ LD(i,0) \
...@@ -241,9 +246,11 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -241,9 +246,11 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" addl $128, %5 ;\n" " addl $128, %5 ;\n"
" decl %0 ;\n" " decl %0 ;\n"
" jnz 1b ;\n" " jnz 1b ;\n"
: " popl %5\n"
: "g" (lines), " popl %4\n"
"r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5) : "+r" (lines),
"+r" (p1), "+r" (p2), "+r" (p3)
: "r" (p4), "r" (p5)
: "memory"); : "memory");
FPU_RESTORE; FPU_RESTORE;
...@@ -297,9 +304,9 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) ...@@ -297,9 +304,9 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
" addl $64, %2 ;\n" " addl $64, %2 ;\n"
" decl %0 ;\n" " decl %0 ;\n"
" jnz 1b ;\n" " jnz 1b ;\n"
: : "+r" (lines),
: "r" (lines), "+r" (p1), "+r" (p2)
"r" (p1), "r" (p2) :
: "memory"); : "memory");
FPU_RESTORE; FPU_RESTORE;
...@@ -355,9 +362,9 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -355,9 +362,9 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" addl $64, %3 ;\n" " addl $64, %3 ;\n"
" decl %0 ;\n" " decl %0 ;\n"
" jnz 1b ;\n" " jnz 1b ;\n"
: : "+r" (lines),
: "r" (lines), "+r" (p1), "+r" (p2), "+r" (p3)
"r" (p1), "r" (p2), "r" (p3) :
: "memory" ); : "memory" );
FPU_RESTORE; FPU_RESTORE;
...@@ -422,9 +429,9 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -422,9 +429,9 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" addl $64, %4 ;\n" " addl $64, %4 ;\n"
" decl %0 ;\n" " decl %0 ;\n"
" jnz 1b ;\n" " jnz 1b ;\n"
: : "+r" (lines),
: "r" (lines), "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
"r" (p1), "r" (p2), "r" (p3), "r" (p4) :
: "memory"); : "memory");
FPU_RESTORE; FPU_RESTORE;
...@@ -439,7 +446,10 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -439,7 +446,10 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
FPU_SAVE; FPU_SAVE;
/* need to save p4/p5 manually to not exceed gcc's 10 argument limit */
__asm__ __volatile__ ( __asm__ __volatile__ (
" pushl %4\n"
" pushl %5\n"
" .align 32,0x90 ;\n" " .align 32,0x90 ;\n"
" 1: ;\n" " 1: ;\n"
" movq (%1), %%mm0 ;\n" " movq (%1), %%mm0 ;\n"
...@@ -498,9 +508,11 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -498,9 +508,11 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" addl $64, %5 ;\n" " addl $64, %5 ;\n"
" decl %0 ;\n" " decl %0 ;\n"
" jnz 1b ;\n" " jnz 1b ;\n"
: " popl %5\n"
: "g" (lines), " popl %4\n"
"r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5) : "+g" (lines),
"+r" (p1), "+r" (p2), "+r" (p3)
: "r" (p4), "r" (p5)
: "memory"); : "memory");
FPU_RESTORE; FPU_RESTORE;
...@@ -554,6 +566,8 @@ static struct xor_block_template xor_block_p5_mmx = { ...@@ -554,6 +566,8 @@ static struct xor_block_template xor_block_p5_mmx = {
: "r" (cr0), "r" (xmm_save) \ : "r" (cr0), "r" (xmm_save) \
: "memory") : "memory")
#define ALIGN16 __attribute__((aligned(16)))
#define OFFS(x) "16*("#x")" #define OFFS(x) "16*("#x")"
#define PF_OFFS(x) "256+16*("#x")" #define PF_OFFS(x) "256+16*("#x")"
#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
...@@ -575,7 +589,7 @@ static void ...@@ -575,7 +589,7 @@ static void
xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{ {
unsigned long lines = bytes >> 8; unsigned long lines = bytes >> 8;
char xmm_save[16*4]; char xmm_save[16*4] ALIGN16;
int cr0; int cr0;
XMMS_SAVE; XMMS_SAVE;
...@@ -616,9 +630,9 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) ...@@ -616,9 +630,9 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
" addl $256, %2 ;\n" " addl $256, %2 ;\n"
" decl %0 ;\n" " decl %0 ;\n"
" jnz 1b ;\n" " jnz 1b ;\n"
: "+r" (lines),
"+r" (p1), "+r" (p2)
: :
: "r" (lines),
"r" (p1), "r" (p2)
: "memory"); : "memory");
XMMS_RESTORE; XMMS_RESTORE;
...@@ -629,7 +643,7 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -629,7 +643,7 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3) unsigned long *p3)
{ {
unsigned long lines = bytes >> 8; unsigned long lines = bytes >> 8;
char xmm_save[16*4]; char xmm_save[16*4] ALIGN16;
int cr0; int cr0;
XMMS_SAVE; XMMS_SAVE;
...@@ -677,9 +691,9 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -677,9 +691,9 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" addl $256, %3 ;\n" " addl $256, %3 ;\n"
" decl %0 ;\n" " decl %0 ;\n"
" jnz 1b ;\n" " jnz 1b ;\n"
: "+r" (lines),
"+r" (p1), "+r"(p2), "+r"(p3)
: :
: "r" (lines),
"r" (p1), "r"(p2), "r"(p3)
: "memory" ); : "memory" );
XMMS_RESTORE; XMMS_RESTORE;
...@@ -690,7 +704,7 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -690,7 +704,7 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4) unsigned long *p3, unsigned long *p4)
{ {
unsigned long lines = bytes >> 8; unsigned long lines = bytes >> 8;
char xmm_save[16*4]; char xmm_save[16*4] ALIGN16;
int cr0; int cr0;
XMMS_SAVE; XMMS_SAVE;
...@@ -745,9 +759,9 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -745,9 +759,9 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" addl $256, %4 ;\n" " addl $256, %4 ;\n"
" decl %0 ;\n" " decl %0 ;\n"
" jnz 1b ;\n" " jnz 1b ;\n"
: "+r" (lines),
"+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
: :
: "r" (lines),
"r" (p1), "r" (p2), "r" (p3), "r" (p4)
: "memory" ); : "memory" );
XMMS_RESTORE; XMMS_RESTORE;
...@@ -758,12 +772,15 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -758,12 +772,15 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5) unsigned long *p3, unsigned long *p4, unsigned long *p5)
{ {
unsigned long lines = bytes >> 8; unsigned long lines = bytes >> 8;
char xmm_save[16*4]; char xmm_save[16*4] ALIGN16;
int cr0; int cr0;
XMMS_SAVE; XMMS_SAVE;
/* need to save p4/p5 manually to not exceed gcc's 10 argument limit */
__asm__ __volatile__ ( __asm__ __volatile__ (
" pushl %4\n"
" pushl %5\n"
#undef BLOCK #undef BLOCK
#define BLOCK(i) \ #define BLOCK(i) \
PF1(i) \ PF1(i) \
...@@ -820,9 +837,11 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, ...@@ -820,9 +837,11 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
" addl $256, %5 ;\n" " addl $256, %5 ;\n"
" decl %0 ;\n" " decl %0 ;\n"
" jnz 1b ;\n" " jnz 1b ;\n"
: " popl %5\n"
: "r" (lines), " popl %4\n"
"r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5) : "+r" (lines),
"+r" (p1), "+r" (p2), "+r" (p3)
: "r" (p4), "r" (p5)
: "memory"); : "memory");
XMMS_RESTORE; XMMS_RESTORE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment