Commit 8aac8fa6 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.37pre4

parent 6258f70d
......@@ -11,6 +11,6 @@ else
endif
L_TARGET = lib.a
L_OBJS = checksum.o checksumcopy.o semaphore.o locks.o
L_OBJS = checksum.o semaphore.o locks.o
include $(TOPDIR)/Rules.make
......@@ -11,25 +11,21 @@
* Lots of code moved from tcp.c and ip.c; see those files
* for more names.
*
* Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
* handling.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/uaccess.h>
#include <net/checksum.h>
/*
* Computes a partial checksum
*
* mostly used for IP header checksumming. Thus we define the following
* fastpath: len==20, buff is 4 byte aligned.
*
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
unsigned int csum_partial_bug=0;
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) {
/*
* Experiments with ethernet and slip connections show that buff
......@@ -38,9 +34,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* Fortunately, it is easy to convert 2-byte alignment to 4-byte
* alignment for the unrolled loop.
*/
__asm__("
testl $2, %%esi # Check alignment.
jz 2f # Jump if alignment is ok.
subl $2, %%ecx # Alignment uses up two bytes.
......@@ -98,11 +92,155 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
adcl $0, %%eax
7: "
: "=a"(sum)
: "0"(sum), "c"(len), "S"(buff), "m"(csum_partial_bug)
: "0"(sum), "c"(len), "S"(buff)
: "bx", "cx", "dx", "si");
return(sum);
}
/*
* Copy from ds while checksumming, otherwise like csum_partial
*
* The macros SRC and DST specify the type of access for the instruction.
* thus we can call a custom exception handler for all access types.
*
* FIXME: could someone double check wether i havent mixed up some SRC and
* DST definitions? It's damn hard to trigger all cases, i hope i got
* them all but theres no guarantee ...
*/
#define SRC(y...) \
" 9999: "#y"; \n \
.section __ex_table, \"a\"; \n \
.long 9999b, src_access_fault \n \
.previous"
#define DST(y...) \
" 9999: "#y"; \n \
.section __ex_table, \"a\"; \n \
.long 9999b, dst_access_fault \n \
.previous"
unsigned int csum_partial_copy_generic (const char *src, char *dst,
int len, int sum, int *src_err_ptr, int *dst_err_ptr)
{
__asm__ __volatile__ ( "
testl $2, %%edi # Check alignment.
jz 2f # Jump if alignment is ok.
subl $2, %%ecx # Alignment uses up two bytes.
jae 1f # Jump if we had at least two bytes.
addl $2, %%ecx # ecx was < 2. Deal with it.
jmp 4f
"SRC( 1: movw (%%esi), %%bx )"
addl $2, %%esi
"DST( movw %%bx, (%%edi) )"
addl $2, %%edi
addw %%bx, %%ax
adcl $0, %%eax
2:
pushl %%ecx
shrl $5, %%ecx
jz 2f
testl %%esi, %%esi
"SRC( 1: movl (%%esi), %%ebx )"
"SRC( movl 4(%%esi), %%edx )"
adcl %%ebx, %%eax
"DST( movl %%ebx, (%%edi) )"
adcl %%edx, %%eax
"DST( movl %%edx, 4(%%edi) )"
"SRC( movl 8(%%esi), %%ebx )"
"SRC( movl 12(%%esi), %%edx )"
adcl %%ebx, %%eax
"DST( movl %%ebx, 8(%%edi) )"
adcl %%edx, %%eax
"DST( movl %%edx, 12(%%edi) )"
"SRC( movl 16(%%esi), %%ebx )"
"SRC( movl 20(%%esi), %%edx )"
adcl %%ebx, %%eax
"DST( movl %%ebx, 16(%%edi) )"
adcl %%edx, %%eax
"DST( movl %%edx, 20(%%edi) )"
"SRC( movl 24(%%esi), %%ebx )"
"SRC( movl 28(%%esi), %%edx )"
adcl %%ebx, %%eax
"DST( movl %%ebx, 24(%%edi) )"
adcl %%edx, %%eax
"DST( movl %%edx, 28(%%edi) )"
"SRC( lea 32(%%esi), %%esi )"
"DST( lea 32(%%edi), %%edi )"
dec %%ecx
jne 1b
adcl $0, %%eax
2: popl %%edx
movl %%edx, %%ecx
andl $0x1c, %%edx
je 4f
shrl $2, %%edx # This clears CF
"SRC( 3: movl (%%esi), %%ebx )"
adcl %%ebx, %%eax
"DST( movl %%ebx, (%%edi) )"
"SRC( lea 4(%%esi), %%esi )"
"DST( lea 4(%%edi), %%edi )"
dec %%edx
jne 3b
adcl $0, %%eax
4: andl $3, %%ecx
jz 7f
cmpl $2, %%ecx
jb 5f
"SRC( movw (%%esi), %%cx )"
"SRC( leal 2(%%esi), %%esi )"
"DST( movw %%cx, (%%edi) )"
"DST( leal 2(%%edi), %%edi )"
je 6f
shll $16,%%ecx
"SRC( 5: movb (%%esi), %%cl )"
"DST( movb %%cl, (%%edi) )"
6: addl %%ecx, %%eax
adcl $0, %%eax
7:
end_of_body:
# Exception handler:
################################################
#
.section .fixup, \"a\" #
#
common_fixup: #
#
movl %7, (%%ebx) #
#
# FIXME: do zeroing of rest of the buffer here. #
#
jmp end_of_body #
#
src_access_fault: #
movl %1, %%ebx #
jmp common_fixup #
#
dst_access_fault: #
movl %2, %%ebx #
jmp common_fixup #
#
.previous #
#
################################################
"
: "=a" (sum), "=m" (src_err_ptr), "=m" (dst_err_ptr)
: "0" (sum), "c" (len), "S" (src), "D" (dst),
"i" (-EFAULT)
: "bx", "cx", "dx", "si", "di" );
return(sum);
}
#undef SRC
#undef DST
/*
* FIXME: old compatibility stuff, will be removed soon.
......@@ -112,14 +250,12 @@ unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum)
{
int src_err=0, dst_err=0;
if (!access_ok(VERIFY_READ, src, len))
src = NULL;
sum = __csum_partial_copy_i386_generic ( src, dst, len, sum, &src_err, &dst_err);
sum = csum_partial_copy_generic ( src, dst, len, sum, &src_err, &dst_err);
if (src_err || dst_err)
printk("old csum_partial_copy() exception, tell mingo to convert me.\n");
printk("old csum_partial_copy_fromuser(), tell mingo to convert me.\n");
return sum;
}
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IP/TCP/UDP checksumming routines
*
* Authors: Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Tom May, <ftom@netcom.com>
* Lots of code moved from tcp.c and ip.c; see those files
* for more names.
*
* Changes: Ingo Molnar, converted to 2.1 exception handling
* Istvan Marosi, fixed lots of bugs in the converted
* assembly code.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/segment.h>
#define ASSEMBLY
#include <asm/smp.h>
#include <asm/errno.h>
/*
* Copy from ds while checksumming, otherwise like csum_partial
*
* ok, from now on better close your eyes. This file is a top canditate for
* the 'ugliest place in LinuxLand' award.
*
* The macros SRC_* and DST_* specify the type of access for the instruction.
* thus we can call a custom exception handler for all access types.
*
* Damn damn damn. We mess up the stack frame thus we have to have exception
* handlers for all these cases:
*
* 1) source fault + clean stack
* 2) source fault + 32 bits pushed to the stack
* 3) destination fault + clean stack
* 4) destination fault + 32 bits pushed to stack
*
* {the _S postfix below means 'clean up the stack'}
*/
#define SRC(x...) \
9999: x; \
.section __ex_table, "a"; \
.long 9999b, src_access_fault; \
.previous
#define SRC_S(x...) \
9999: x; \
.section __ex_table, "a"; \
.long 9999b, src_access_fault_stack; \
.previous
#define DST(x...) \
9999: x; \
.section __ex_table, "a"; \
.long 9999b, dst_access_fault; \
.previous
#define DST_S(x...) \
9999: x; \
.section __ex_table, "a"; \
.long 9999b, dst_access_fault_stack; \
.previous
ENTRY(__csum_partial_copy_i386_generic)
# As we are a .S, we have to get all parameters from the stack ourselves :(
#
# stack layout after register saving (dont break it ..):
#
# char *src --------------> 0x14(%esp)
# char *dst --------------> 0x18(%esp)
# int len ----------------> 0x1c(%esp)
# int sum ----------------> 0x20(%esp)
# int *src_err_ptr -------> 0x24(%esp)
# int *dst_err_ptr -------> 0x28(%esp)
pushl %edi
pushl %esi
pushl %ebx
pushl %ecx
movl 0x14(%esp), %esi
movl 0x18(%esp), %edi
movl 0x1c(%esp), %ecx
movl 0x20(%esp), %eax
testl $2, %edi # Check alignment.
jz 2f # Jump if alignment is ok.
subl $2, %ecx # Alignment uses up two bytes.
jae 1f # Jump if we had at least two bytes.
addl $2, %ecx # ecx was < 2. Deal with it.
jmp 4f
SRC( 1: movw (%esi), %bx )
addl $2, %esi
DST( movw %bx, (%edi) )
addl $2, %edi
addw %bx, %ax
adcl $0, %eax
2:
pushl %ecx
# from here on we have 32 bits on the stack.
shrl $5, %ecx
jz 2f
testl %esi, %esi
SRC_S( 1: movl (%esi), %ebx )
SRC_S( movl 4(%esi), %edx )
adcl %ebx, %eax
DST_S( movl %ebx, (%edi) )
adcl %edx, %eax
DST_S( movl %edx, 4(%edi) )
SRC_S( movl 8(%esi), %ebx )
SRC_S( movl 12(%esi), %edx )
adcl %ebx, %eax
DST_S( movl %ebx, 8(%edi) )
adcl %edx, %eax
DST_S( movl %edx, 12(%edi) )
SRC_S( movl 16(%esi), %ebx )
SRC_S( movl 20(%esi), %edx )
adcl %ebx, %eax
DST_S( movl %ebx, 16(%edi) )
adcl %edx, %eax
DST_S( movl %edx, 20(%edi) )
SRC_S( movl 24(%esi), %ebx )
SRC_S( movl 28(%esi), %edx )
adcl %ebx, %eax
DST_S( movl %ebx, 24(%edi) )
adcl %edx, %eax
DST_S( movl %edx, 28(%edi) )
# lea cannot fault, it's just pointer preparation:
lea 32(%esi), %esi
lea 32(%edi), %edi
dec %ecx
jne 1b
adcl $0, %eax
2: popl %edx
# from here on we have a clean stack again
movl %edx, %ecx
andl $0x1c, %edx
je 4f
shrl $2, %edx # This clears CF
SRC( 3: movl (%esi), %ebx )
lea 4(%esi), %esi
adcl %ebx, %eax
DST( movl %ebx, (%edi) )
lea 4(%edi), %edi
dec %edx
jne 3b
adcl $0, %eax
4: andl $3, %ecx
jz 7f
cmpl $2, %ecx
jb 5f
SRC( movw (%esi), %cx )
SRC( leal 2(%esi), %esi )
DST( movw %cx, (%edi) )
DST( leal 2(%edi), %edi )
je 6f
shll $16,%ecx
SRC( 5: movb (%esi), %cl )
DST( movb %cl, (%edi) )
6: addl %ecx, %eax
adcl $0, %eax
7:
end_of_body:
popl %ecx
popl %ebx
popl %esi
popl %edi
ret
# Exception handlers:
################################################
.section .fixup, "a"
common_src_fixup:
# the _from_user() stuff has to zero out the kernel buffer, to prevent
# sending out random kernel data.
# This inefficient but safe. We do not care too much about exception handler
# efficiency, they are totally dead code usually.
# We cannot use %ecx easily, as it could contain 'real length', or
# 'real length/32'. So we simply reload %ecx from our parameter block
# and zero the whole buffer. This means we set sum = input sum too.
# We assume that writing into %edi is safe. So if we are ever so crazy to
# do some kind of user_to_user checksum with copying, be prepared to fix
# this ...
movl 0x24(%esp), %ebx
movl $(-EFAULT), %eax
movl %eax, (%ebx)
movl 0x18(%esp), %edi
movl 0x1c(%esp), %ecx
xorl %eax,%eax
cld
rep
stosb
movl 0x20(%esp), %eax
jmp end_of_body
src_access_fault:
jmp common_src_fixup
src_access_fault_stack:
popl %ebx # <--- clean up the stack
jmp common_src_fixup
# Currently we do not copy and checksum towards user space,
# but the code is ready anyways ...
common_dst_fixup:
movl 0x28(%esp), %ebx
movl $(-EFAULT), (%ebx)
movl 0x20(%esp), %eax
jmp end_of_body
dst_access_fault:
jmp common_dst_fixup
dst_access_fault_stack:
popl %ebx # <--- clean up the stack
jmp common_dst_fixup
.previous
#ifndef _I386_CHECKSUM_H
#define _I386_CHECKSUM_H
#include <asm/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
......@@ -15,9 +13,7 @@
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern
unsigned int csum_partial (const unsigned char * buff, int len, unsigned int sum);
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
/*
* the same as csum_partial, but copies from src while it
......@@ -27,7 +23,7 @@ unsigned int csum_partial (const unsigned char * buff, int len, unsigned int sum
* better 64-bit) boundary
*/
unsigned int __csum_partial_copy_i386_generic( const char *src, char *dst, int len, int sum,
unsigned int csum_partial_copy_generic( const char *src, char *dst, int len, int sum,
int *src_err_ptr, int *dst_err_ptr);
extern __inline__
......@@ -36,7 +32,7 @@ unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
{
int *src_err_ptr=NULL, *dst_err_ptr=NULL;
return __csum_partial_copy_i386_generic ( src, dst, len, sum, src_err_ptr, dst_err_ptr);
return csum_partial_copy_generic ( src, dst, len, sum, src_err_ptr, dst_err_ptr);
}
extern __inline__
......@@ -45,15 +41,7 @@ unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
{
int *dst_err_ptr=NULL;
/*
* If the source address is invalid, force an exception via NULL pointer.
* The point of this solution is to make the code smaller. The exception path
* doesnt have to be fast.
*/
if (!access_ok(VERIFY_READ, src, len))
src = NULL;
return __csum_partial_copy_i386_generic ( src, dst, len, sum, err_ptr, dst_err_ptr);
return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, dst_err_ptr);
}
/*
......@@ -66,10 +54,7 @@ unsigned int csum_partial_copy_to_user ( const char *src, char *dst,
{
int *src_err_ptr=NULL;
if (!access_ok(VERIFY_WRITE, dst, len))
dst = NULL;
return __csum_partial_copy_i386_generic ( src, dst, len, sum, src_err_ptr, err_ptr);
return csum_partial_copy_generic ( src, dst, len, sum, src_err_ptr, err_ptr);
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment