patch-2.3.99-pre4 linux/include/asm-sh/checksum.h
Next file: linux/include/asm-sh/current.h
Previous file: linux/include/asm-sh/byteorder.h
Back to the patch index
Back to the overall index
- Lines: 141
- Date:
Mon Mar 27 10:26:15 2000
- Orig file:
v2.3.99-pre3/linux/include/asm-sh/checksum.h
- Orig date:
Tue Feb 1 01:35:44 2000
diff -u --recursive --new-file v2.3.99-pre3/linux/include/asm-sh/checksum.h linux/include/asm-sh/checksum.h
@@ -55,24 +55,6 @@
return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL);
}
-#if 0
-
-/* Not used at the moment. It is difficult to imagine for what purpose
- it can be used :-) Please, do not forget to verify_area before it --ANK
- */
-
-/*
- * This combination is currently not used, but possible:
- */
-
-extern __inline__
-unsigned int csum_partial_copy_to_user ( const char *src, char *dst,
- int len, int sum, int *err_ptr)
-{
- return csum_partial_copy_generic ( src, dst, len, sum, NULL, err_ptr);
-}
-#endif
-
/*
* These are the old (and unsafe) way of doing checksums, a warning message will be
* printed if they are used and an exeption occurs.
@@ -91,12 +73,12 @@
{
unsigned int __dummy;
__asm__("clrt\n\t"
- "mov %0,%1\n\t"
+ "mov %0, %1\n\t"
"shll16 %0\n\t"
- "addc %0,%1\n\t"
+ "addc %0, %1\n\t"
"movt %0\n\t"
"shlr16 %1\n\t"
- "add %1,%0"
+ "add %1, %0"
: "=r" (sum), "=&r" (__dummy)
: "0" (sum));
return ~sum;
@@ -114,24 +96,24 @@
unsigned int sum, __dummy;
__asm__ __volatile__(
- "mov.l @%1+,%0\n\t"
- "add #-4,%2\n\t"
+ "mov.l @%1+, %0\n\t"
+ "add #-4, %2\n\t"
"clrt\n\t"
- "mov.l @%1+,%3\n\t"
- "addc %3,%0\n\t"
- "mov.l @%1+,%3\n\t"
- "addc %3,%0\n\t"
- "mov.l @%1+,%3\n\t"
- "addc %3,%0\n"
+ "mov.l @%1+, %3\n\t"
+ "addc %3, %0\n\t"
+ "mov.l @%1+, %3\n\t"
+ "addc %3, %0\n\t"
+ "mov.l @%1+, %3\n\t"
+ "addc %3, %0\n"
"1:\t"
- "mov.l @%1+,%3\n\t"
- "addc %3,%0\n\t"
+ "mov.l @%1+, %3\n\t"
+ "addc %3, %0\n\t"
"movt %3\n\t"
"dt %2\n\t"
"bf/s 1b\n\t"
- " cmp/eq #1,%3\n\t"
- "mov #0,%3\n\t"
- "addc %3,%0\n\t"
+ " cmp/eq #1, %3\n\t"
+ "mov #0, %3\n\t"
+ "addc %3, %0\n\t"
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
@@ -153,11 +135,11 @@
unsigned long len_proto = (proto<<16)+len;
#endif
__asm__("clrt\n\t"
- "addc %0,%1\n\t"
- "addc %2,%1\n\t"
- "addc %3,%1\n\t"
+ "addc %0, %1\n\t"
+ "addc %2, %1\n\t"
+ "addc %3, %1\n\t"
"movt %0\n\t"
- "add %1,%0"
+ "add %1, %0"
: "=r" (sum), "=r" (len_proto)
: "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum));
return sum;
@@ -195,26 +177,26 @@
{
unsigned int __dummy;
__asm__("clrt\n\t"
- "mov.l @(0,%2),%1\n\t"
- "addc %1,%0\n\t"
- "mov.l @(4,%2),%1\n\t"
- "addc %1,%0\n\t"
- "mov.l @(8,%2),%1\n\t"
- "addc %1,%0\n\t"
- "mov.l @(12,%2),%1\n\t"
- "addc %1,%0\n\t"
- "mov.l @(0,%3),%1\n\t"
- "addc %1,%0\n\t"
- "mov.l @(4,%3),%1\n\t"
- "addc %1,%0\n\t"
- "mov.l @(8,%3),%1\n\t"
- "addc %1,%0\n\t"
- "mov.l @(12,%3),%1\n\t"
- "addc %1,%0\n\t"
- "addc %4,%0\n\t"
- "addc %5,%0\n\t"
+ "mov.l @(0,%2), %1\n\t"
+ "addc %1, %0\n\t"
+ "mov.l @(4,%2), %1\n\t"
+ "addc %1, %0\n\t"
+ "mov.l @(8,%2), %1\n\t"
+ "addc %1, %0\n\t"
+ "mov.l @(12,%2), %1\n\t"
+ "addc %1, %0\n\t"
+ "mov.l @(0,%3), %1\n\t"
+ "addc %1, %0\n\t"
+ "mov.l @(4,%3), %1\n\t"
+ "addc %1, %0\n\t"
+ "mov.l @(8,%3), %1\n\t"
+ "addc %1, %0\n\t"
+ "mov.l @(12,%3), %1\n\t"
+ "addc %1, %0\n\t"
+ "addc %4, %0\n\t"
+ "addc %5, %0\n\t"
"movt %1\n\t"
- "add %1,%0\n"
+ "add %1, %0\n"
: "=r" (sum), "=&r" (__dummy)
: "r" (saddr), "r" (daddr),
"r" (htonl(len)), "r" (htonl(proto)), "0" (sum));
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)