Commit 2c76397b authored by Ingo Molnar's avatar Ingo Molnar

x86: Clean up csum-copy_64.S a bit

The many stray whitespaces and other uncleanlinesses made this code
almost unreadable to me - so fix those.

No changes to the code.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 0d2eb44f
/* /*
* Copyright 2002,2003 Andi Kleen, SuSE Labs. * Copyright 2002, 2003 Andi Kleen, SuSE Labs.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive * License. See the file COPYING in the main directory of this archive
...@@ -31,58 +31,58 @@ ...@@ -31,58 +31,58 @@
.macro source .macro source
10: 10:
.section __ex_table,"a" .section __ex_table, "a"
.align 8 .align 8
.quad 10b,.Lbad_source .quad 10b, .Lbad_source
.previous .previous
.endm .endm
.macro dest .macro dest
20: 20:
.section __ex_table,"a" .section __ex_table, "a"
.align 8 .align 8
.quad 20b,.Lbad_dest .quad 20b, .Lbad_dest
.previous .previous
.endm .endm
.macro ignore L=.Lignore .macro ignore L=.Lignore
30: 30:
.section __ex_table,"a" .section __ex_table, "a"
.align 8 .align 8
.quad 30b,\L .quad 30b, \L
.previous .previous
.endm .endm
ENTRY(csum_partial_copy_generic) ENTRY(csum_partial_copy_generic)
CFI_STARTPROC CFI_STARTPROC
cmpl $3*64,%edx cmpl $3*64, %edx
jle .Lignore jle .Lignore
.Lignore: .Lignore:
subq $7*8,%rsp subq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET 7*8 CFI_ADJUST_CFA_OFFSET 7*8
movq %rbx,2*8(%rsp) movq %rbx, 2*8(%rsp)
CFI_REL_OFFSET rbx, 2*8 CFI_REL_OFFSET rbx, 2*8
movq %r12,3*8(%rsp) movq %r12, 3*8(%rsp)
CFI_REL_OFFSET r12, 3*8 CFI_REL_OFFSET r12, 3*8
movq %r14,4*8(%rsp) movq %r14, 4*8(%rsp)
CFI_REL_OFFSET r14, 4*8 CFI_REL_OFFSET r14, 4*8
movq %r13,5*8(%rsp) movq %r13, 5*8(%rsp)
CFI_REL_OFFSET r13, 5*8 CFI_REL_OFFSET r13, 5*8
movq %rbp,6*8(%rsp) movq %rbp, 6*8(%rsp)
CFI_REL_OFFSET rbp, 6*8 CFI_REL_OFFSET rbp, 6*8
movq %r8,(%rsp) movq %r8, (%rsp)
movq %r9,1*8(%rsp) movq %r9, 1*8(%rsp)
movl %ecx,%eax movl %ecx, %eax
movl %edx,%ecx movl %edx, %ecx
xorl %r9d,%r9d xorl %r9d, %r9d
movq %rcx,%r12 movq %rcx, %r12
shrq $6,%r12 shrq $6, %r12
jz .Lhandle_tail /* < 64 */ jz .Lhandle_tail /* < 64 */
clc clc
...@@ -94,156 +94,156 @@ ENTRY(csum_partial_copy_generic) ...@@ -94,156 +94,156 @@ ENTRY(csum_partial_copy_generic)
.p2align 4 .p2align 4
.Lloop: .Lloop:
source source
movq (%rdi),%rbx movq (%rdi), %rbx
source source
movq 8(%rdi),%r8 movq 8(%rdi), %r8
source source
movq 16(%rdi),%r11 movq 16(%rdi), %r11
source source
movq 24(%rdi),%rdx movq 24(%rdi), %rdx
source source
movq 32(%rdi),%r10 movq 32(%rdi), %r10
source source
movq 40(%rdi),%rbp movq 40(%rdi), %rbp
source source
movq 48(%rdi),%r14 movq 48(%rdi), %r14
source source
movq 56(%rdi),%r13 movq 56(%rdi), %r13
ignore 2f ignore 2f
prefetcht0 5*64(%rdi) prefetcht0 5*64(%rdi)
2: 2:
adcq %rbx,%rax adcq %rbx, %rax
adcq %r8,%rax adcq %r8, %rax
adcq %r11,%rax adcq %r11, %rax
adcq %rdx,%rax adcq %rdx, %rax
adcq %r10,%rax adcq %r10, %rax
adcq %rbp,%rax adcq %rbp, %rax
adcq %r14,%rax adcq %r14, %rax
adcq %r13,%rax adcq %r13, %rax
decl %r12d decl %r12d
dest dest
movq %rbx,(%rsi) movq %rbx, (%rsi)
dest dest
movq %r8,8(%rsi) movq %r8, 8(%rsi)
dest dest
movq %r11,16(%rsi) movq %r11, 16(%rsi)
dest dest
movq %rdx,24(%rsi) movq %rdx, 24(%rsi)
dest dest
movq %r10,32(%rsi) movq %r10, 32(%rsi)
dest dest
movq %rbp,40(%rsi) movq %rbp, 40(%rsi)
dest dest
movq %r14,48(%rsi) movq %r14, 48(%rsi)
dest dest
movq %r13,56(%rsi) movq %r13, 56(%rsi)
3: 3:
leaq 64(%rdi),%rdi leaq 64(%rdi), %rdi
leaq 64(%rsi),%rsi leaq 64(%rsi), %rsi
jnz .Lloop jnz .Lloop
adcq %r9,%rax adcq %r9, %rax
/* do last up to 56 bytes */ /* do last up to 56 bytes */
.Lhandle_tail: .Lhandle_tail:
/* ecx: count */ /* ecx: count */
movl %ecx,%r10d movl %ecx, %r10d
andl $63,%ecx andl $63, %ecx
shrl $3,%ecx shrl $3, %ecx
jz .Lfold jz .Lfold
clc clc
.p2align 4 .p2align 4
.Lloop_8: .Lloop_8:
source source
movq (%rdi),%rbx movq (%rdi), %rbx
adcq %rbx,%rax adcq %rbx, %rax
decl %ecx decl %ecx
dest dest
movq %rbx,(%rsi) movq %rbx, (%rsi)
leaq 8(%rsi),%rsi /* preserve carry */ leaq 8(%rsi), %rsi /* preserve carry */
leaq 8(%rdi),%rdi leaq 8(%rdi), %rdi
jnz .Lloop_8 jnz .Lloop_8
adcq %r9,%rax /* add in carry */ adcq %r9, %rax /* add in carry */
.Lfold: .Lfold:
/* reduce checksum to 32bits */ /* reduce checksum to 32bits */
movl %eax,%ebx movl %eax, %ebx
shrq $32,%rax shrq $32, %rax
addl %ebx,%eax addl %ebx, %eax
adcl %r9d,%eax adcl %r9d, %eax
/* do last up to 6 bytes */ /* do last up to 6 bytes */
.Lhandle_7: .Lhandle_7:
movl %r10d,%ecx movl %r10d, %ecx
andl $7,%ecx andl $7, %ecx
shrl $1,%ecx shrl $1, %ecx
jz .Lhandle_1 jz .Lhandle_1
movl $2,%edx movl $2, %edx
xorl %ebx,%ebx xorl %ebx, %ebx
clc clc
.p2align 4 .p2align 4
.Lloop_1: .Lloop_1:
source source
movw (%rdi),%bx movw (%rdi), %bx
adcl %ebx,%eax adcl %ebx, %eax
decl %ecx decl %ecx
dest dest
movw %bx,(%rsi) movw %bx, (%rsi)
leaq 2(%rdi),%rdi leaq 2(%rdi), %rdi
leaq 2(%rsi),%rsi leaq 2(%rsi), %rsi
jnz .Lloop_1 jnz .Lloop_1
adcl %r9d,%eax /* add in carry */ adcl %r9d, %eax /* add in carry */
/* handle last odd byte */ /* handle last odd byte */
.Lhandle_1: .Lhandle_1:
testl $1,%r10d testl $1, %r10d
jz .Lende jz .Lende
xorl %ebx,%ebx xorl %ebx, %ebx
source source
movb (%rdi),%bl movb (%rdi), %bl
dest dest
movb %bl,(%rsi) movb %bl, (%rsi)
addl %ebx,%eax addl %ebx, %eax
adcl %r9d,%eax /* carry */ adcl %r9d, %eax /* carry */
CFI_REMEMBER_STATE CFI_REMEMBER_STATE
.Lende: .Lende:
movq 2*8(%rsp),%rbx movq 2*8(%rsp), %rbx
CFI_RESTORE rbx CFI_RESTORE rbx
movq 3*8(%rsp),%r12 movq 3*8(%rsp), %r12
CFI_RESTORE r12 CFI_RESTORE r12
movq 4*8(%rsp),%r14 movq 4*8(%rsp), %r14
CFI_RESTORE r14 CFI_RESTORE r14
movq 5*8(%rsp),%r13 movq 5*8(%rsp), %r13
CFI_RESTORE r13 CFI_RESTORE r13
movq 6*8(%rsp),%rbp movq 6*8(%rsp), %rbp
CFI_RESTORE rbp CFI_RESTORE rbp
addq $7*8,%rsp addq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET -7*8 CFI_ADJUST_CFA_OFFSET -7*8
ret ret
CFI_RESTORE_STATE CFI_RESTORE_STATE
/* Exception handlers. Very simple, zeroing is done in the wrappers */ /* Exception handlers. Very simple, zeroing is done in the wrappers */
.Lbad_source: .Lbad_source:
movq (%rsp),%rax movq (%rsp), %rax
testq %rax,%rax testq %rax, %rax
jz .Lende jz .Lende
movl $-EFAULT,(%rax) movl $-EFAULT, (%rax)
jmp .Lende jmp .Lende
.Lbad_dest: .Lbad_dest:
movq 8(%rsp),%rax movq 8(%rsp), %rax
testq %rax,%rax testq %rax, %rax
jz .Lende jz .Lende
movl $-EFAULT,(%rax) movl $-EFAULT, (%rax)
jmp .Lende jmp .Lende
CFI_ENDPROC CFI_ENDPROC
ENDPROC(csum_partial_copy_generic) ENDPROC(csum_partial_copy_generic)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment