Commit 8585093e authored by David S. Miller's avatar David S. Miller

[NET]: Apply missed parts of csum_partial_copy killing patch.

parent 9d99fd6a
......@@ -167,7 +167,6 @@ EXPORT_SYMBOL(sys_wait4);
EXPORT_SYMBOL(csum_tcpudp_magic);
EXPORT_SYMBOL(ip_compute_csum);
EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(csum_partial_copy);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(csum_partial_copy_from_user);
EXPORT_SYMBOL(csum_ipv6_magic);
......
......@@ -385,16 +385,3 @@ csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum)
{
return do_csum_partial_copy_from_user(src, dst, len, sum, NULL);
}
unsigned int
csum_partial_copy (const char *src, char *dst, int len, unsigned int sum)
{
unsigned int ret;
int error = 0;
ret = do_csum_partial_copy_from_user(src, dst, len, sum, &error);
if (error)
printk("csum_partial_copy_old(): tell mingo to convert me!\n");
return ret;
}
......@@ -80,48 +80,3 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
BITOFF;
return(sum);
}
#if 0
/*
* copy while checksumming, otherwise like csum_partial
*/
unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst,
int len, unsigned int sum)
{
const unsigned char *endMarker;
const unsigned char *marker;
printk("csum_partial_copy len %d.\n", len);
#if 0
if((int)src & 0x3)
printk("unaligned src %p\n", src);
if((int)dst & 0x3)
printk("unaligned dst %p\n", dst);
__delay(1800); /* extra delay of 90 us to test performance hit */
#endif
endMarker = src + len;
marker = endMarker - (len % 16);
CBITON;
while(src < marker) {
sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++);
sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++);
sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++);
sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++);
sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++);
sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++);
sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++);
sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++);
}
marker = endMarker - (len % 2);
while(src < marker) {
sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++);
}
if(endMarker - src > 0) {
sum += (*dst = *src); /* add extra byte seperately */
}
CBITOFF;
return(sum);
}
#endif
......@@ -146,16 +146,3 @@ csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum)
return do_csum_partial_copy_from_user(src, dst, len, sum, NULL);
}
unsigned int
csum_partial_copy (const char *src, char *dst, int len, unsigned int sum)
{
unsigned int ret;
int error = 0;
ret = do_csum_partial_copy_from_user(src, dst, len, sum, &error);
if (error)
printk("csum_partial_copy_old(): tell mingo to convert me!\n");
return ret;
}
......@@ -61,9 +61,6 @@ EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(vme_brdtype);
#endif
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy);
/* The following are special because they're not called
explicitly (the C compiler generates them). Fortunately,
their interface isn't gonna change any time soon now, so
......
......@@ -318,103 +318,3 @@ csum_partial_copy_from_user(const char *src, char *dst, int len,
return(sum);
}
/*
* copy from kernel space while checksumming, otherwise like csum_partial
*/
unsigned int
csum_partial_copy(const char *src, char *dst, int len, int sum)
{
unsigned long tmp1, tmp2;
__asm__("movel %2,%4\n\t"
"btst #1,%4\n\t" /* Check alignment */
"jeq 2f\n\t"
"subql #2,%1\n\t" /* buff%4==2: treat first word */
"jgt 1f\n\t"
"addql #2,%1\n\t" /* len was == 2, treat only rest */
"jra 4f\n"
"1:\t"
"movew %2@+,%4\n\t" /* add first word to sum */
"addw %4,%0\n\t"
"movew %4,%3@+\n\t"
"clrl %4\n\t"
"addxl %4,%0\n" /* add X bit */
"2:\t"
/* unrolled loop for the main part: do 8 longs at once */
"movel %1,%4\n\t" /* save len in tmp1 */
"lsrl #5,%1\n\t" /* len/32 */
"jeq 2f\n\t" /* not enough... */
"subql #1,%1\n"
"1:\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"dbra %1,1b\n\t"
"clrl %5\n\t"
"addxl %5,%0\n\t" /* add X bit */
"clrw %1\n\t"
"subql #1,%1\n\t"
"jcc 1b\n"
"2:\t"
"movel %4,%1\n\t" /* restore len from tmp1 */
"andw #0x1c,%4\n\t" /* number of rest longs */
"jeq 4f\n\t"
"lsrw #2,%4\n\t"
"subqw #1,%4\n"
"3:\t"
/* loop for rest longs */
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"dbra %4,3b\n\t"
"clrl %5\n\t"
"addxl %5,%0\n" /* add X bit */
"4:\t"
/* now check for rest bytes that do not fit into longs */
"andw #3,%1\n\t"
"jeq 7f\n\t"
"clrl %5\n\t" /* clear tmp2 for rest bytes */
"subqw #2,%1\n\t"
"jlt 5f\n\t"
"movew %2@+,%5\n\t" /* have rest >= 2: get word */
"movew %5,%3@+\n\t"
"swap %5\n\t" /* into bits 16..31 */
"tstw %1\n\t" /* another byte? */
"jeq 6f\n"
"5:\t"
"moveb %2@,%5\n\t" /* have odd rest: get byte */
"moveb %5,%3@+\n\t"
"lslw #8,%5\n" /* into bits 8..15; 16..31 untouched */
"6:\t"
"addl %5,%0\n\t" /* now add rest long to sum */
"clrl %5\n\t"
"addxl %5,%0\n" /* add X bit */
"7:\t"
: "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
"=&d" (tmp1), "=&d" (tmp2)
: "0" (sum), "1" (len), "2" (src), "3" (dst)
);
return(sum);
}
......@@ -79,9 +79,6 @@ EXPORT_SYMBOL_NOVERS(__strnlen_user_nocheck_asm);
EXPORT_SYMBOL_NOVERS(__strnlen_user_asm);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy);
/*
* Functions to control caches.
*/
......
......@@ -25,8 +25,8 @@
/*
* copy while checksumming, otherwise like csum_partial
*/
unsigned int csum_partial_copy(const char *src, char *dst,
int len, unsigned int sum)
unsigned int csum_partial_copy_nocheck(const char *src, char *dst,
int len, unsigned int sum)
{
/*
* It's 2:30 am and I don't feel like doing it real ...
......
......@@ -75,9 +75,6 @@ EXPORT_SYMBOL_NOVERS(__strnlen_user_nocheck_asm);
EXPORT_SYMBOL_NOVERS(__strnlen_user_asm);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy);
/*
* Functions to control caches.
*/
......
......@@ -16,8 +16,8 @@
/*
* copy while checksumming, otherwise like csum_partial
*/
unsigned int csum_partial_copy(const char *src, char *dst,
int len, unsigned int sum)
unsigned int csum_partial_copy_nocheck(const char *src, char *dst,
int len, unsigned int sum)
{
/*
* It's 2:30 am and I don't feel like doing it real ...
......
......@@ -97,8 +97,8 @@ unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum)
/*
* copy while checksumming, otherwise like csum_partial
*/
unsigned int csum_partial_copy(const char *src, char *dst,
int len, unsigned int sum)
unsigned int csum_partial_copy_nocheck(const char *src, char *dst,
int len, unsigned int sum)
{
/*
* It's 2:30 am and I don't feel like doing it real ...
......
......@@ -36,9 +36,6 @@ EXPORT_SYMBOL(disable_irq_nosync);
EXPORT_SYMBOL(irq_desc);
EXPORT_SYMBOL(no_irq_type);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy);
EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strlen);
......
......@@ -42,14 +42,6 @@ extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned i
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*
* this will go away soon.
*/
unsigned int csum_partial_copy(const char *src, char *dst, int len, unsigned int sum);
/*
* this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer.
*/
unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, unsigned int sum, int *errp);
......
......@@ -49,14 +49,6 @@ unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL);
}
/*
* This is the old (and unsafe) way of doing checksums, a warning message will
* be printed if it is used and an exeption occurs.
*
* this function should go away after some time.
*/
unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
......
......@@ -48,15 +48,6 @@ extern unsigned int csum_partial (const unsigned char * buff, int len,
*
* Here it is even more important to align src and dst on a 32-bit (or
* even better 64-bit) boundary.
*
* this will go away soon.
*/
extern unsigned int csum_partial_copy (const char *src, char *dst, int len,
unsigned int sum);
/*
* This is a new version of the above that records errors it finds in
* *errp, but continues and zeros the rest of the buffer.
*/
extern unsigned int csum_partial_copy_from_user (const char *src, char *dst,
int len, unsigned int sum,
......
......@@ -21,18 +21,6 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*
* this will go away soon.
*/
unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum);
/*
* the same as csum_partial_copy, but copies from user space.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern unsigned int csum_partial_copy_from_user(const char *src, char *dst,
......
......@@ -24,12 +24,6 @@
*/
unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum);
/*
* this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer.
*/
#define csum_partial_copy_nocheck csum_partial_copy
/*
* this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer.
......@@ -58,11 +52,9 @@ extern inline unsigned int csum_and_copy_to_user (const char *src, char *dst,
/*
* the same as csum_partial, but copies from user space (but on MIPS
* we have just one address space, so this is identical to the above)
*
* this is obsolete and will go away.
*/
unsigned int csum_partial_copy(const char *src, char *dst, int len,
unsigned int sum);
unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len,
unsigned int sum);
/*
* Fold a partial checksum without adding pseudo headers
......
......@@ -26,12 +26,6 @@
*/
unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum);
/*
* this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer.
*/
#define csum_partial_copy_nocheck csum_partial_copy
/*
* this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer.
......@@ -60,11 +54,9 @@ extern inline unsigned int csum_and_copy_to_user (const char *src, char *dst,
/*
* the same as csum_partial, but copies from user space (but on MIPS
* we have just one address space, so this is identical to the above)
*
* this is obsolete and will go away.
*/
unsigned int csum_partial_copy(const char *src, char *dst, int len,
unsigned int sum);
unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len,
unsigned int sum);
/*
* Fold a partial checksum without adding pseudo headers
......
......@@ -21,10 +21,8 @@ extern unsigned int csum_partial(const unsigned char *, int, unsigned int);
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*
* this will go away soon.
*/
extern unsigned int csum_partial_copy(const char *, char *, int, unsigned int);
extern unsigned int csum_partial_copy_nocheck(const char *, char *, int, unsigned int);
/*
* this is a new version of the above that records errors it finds in *errp,
......@@ -32,20 +30,6 @@ extern unsigned int csum_partial_copy(const char *, char *, int, unsigned int);
*/
unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, unsigned int sum, int *errp);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
* verify_area().
*/
extern __inline__
unsigned int csum_partial_copy_nocheck (const char *src, char *dst,
int len, int sum)
{
return csum_partial_copy (src, dst, len, sum);
}
/*
* Optimized for IP headers, which always checksum on 4 octet boundaries.
*
......
......@@ -38,13 +38,6 @@ extern unsigned int csum_partial_copy_generic(const char *src, char *dst,
/* FIXME: this needs to be written to really do no check -- Cort */
#define csum_partial_copy_nocheck(src, dst, len, sum) \
csum_partial_copy_generic((src), (dst), (len), (sum), 0, 0)
/*
* Old version which ignore errors.
* it will go away soon.
*/
#define csum_partial_copy(src, dst, len, sum) \
csum_partial_copy_generic((src), (dst), (len), (sum), 0, 0)
/*
* turns a 32-bit partial checksum (e.g. from csum_partial) into a
......
......@@ -43,12 +43,7 @@ extern unsigned int csum_partial(const unsigned char * buff, int len,
/*
* the same as csum_partial, but copies from src to dst while it
* checksums
*
* csum_partial_copy will go away soon.
*/
unsigned int csum_partial_copy(const char *src, char *dst,
int len, unsigned int sum);
extern unsigned int csum_partial_copy_generic(const char *src, char *dst,
int len, unsigned int sum,
int *src_err, int *dst_err);
......
......@@ -61,23 +61,6 @@ csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
return sum;
}
/*
* the same as csum_partial, but copies from src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*
* this will go away soon.
*/
static inline unsigned int
csum_partial_copy(const char *src, char *dst, int len,unsigned int sum)
{
memcpy(dst,src,len);
return csum_partial_inline(dst, len, sum);
}
/*
* the same as csum_partial_copy, but copies from user space.
*
......
......@@ -63,23 +63,6 @@ csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
return sum;
}
/*
* the same as csum_partial, but copies from src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*
* this will go away soon.
*/
static inline unsigned int
csum_partial_copy(const char *src, char *dst, int len,unsigned int sum)
{
memcpy(dst,src,len);
return csum_partial_inline(dst, len, sum);
}
/*
* the same as csum_partial_copy, but copies from user space.
*
......
......@@ -57,14 +57,6 @@ unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL);
}
/*
* This is the old (and unsafe) way of doing checksums, a warning message will
* be printed if it is used and an exeption occurs.
*
* this function should go away after some time.
*/
unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
/*
* Fold a partial checksum
*/
......
......@@ -40,10 +40,6 @@ extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned i
* better 64-bit) boundary
*/
/* FIXME: Remove this macro ASAP */
#define csum_partial_copy(src, dst, len, sum) \
csum_partial_copy_nocheck(src,dst,len,sum)
extern unsigned int __csum_partial_copy_sparc_generic (const char *, char *);
extern __inline__ unsigned int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment