diff options
Diffstat (limited to 'arch/m68k/lib')
-rw-r--r-- | arch/m68k/lib/Makefile | 11 | ||||
-rw-r--r-- | arch/m68k/lib/Makefile_mm | 6 | ||||
-rw-r--r-- | arch/m68k/lib/Makefile_no | 7 | ||||
-rw-r--r-- | arch/m68k/lib/checksum.c | 430 | ||||
-rw-r--r-- | arch/m68k/lib/checksum_mm.c | 425 | ||||
-rw-r--r-- | arch/m68k/lib/checksum_no.c | 153 | ||||
-rw-r--r-- | arch/m68k/lib/delay.c | 21 | ||||
-rw-r--r-- | arch/m68k/lib/divsi3.S | 125 | ||||
-rw-r--r-- | arch/m68k/lib/memcpy.c | 62 | ||||
-rw-r--r-- | arch/m68k/lib/memmove.c | 105 | ||||
-rw-r--r-- | arch/m68k/lib/memset.c | 47 | ||||
-rw-r--r-- | arch/m68k/lib/modsi3.S | 113 | ||||
-rw-r--r-- | arch/m68k/lib/muldi3.c | 68 | ||||
-rw-r--r-- | arch/m68k/lib/muldi3_mm.c | 63 | ||||
-rw-r--r-- | arch/m68k/lib/muldi3_no.c | 86 | ||||
-rw-r--r-- | arch/m68k/lib/mulsi3.S | 110 | ||||
-rw-r--r-- | arch/m68k/lib/udivsi3.S | 162 | ||||
-rw-r--r-- | arch/m68k/lib/umodsi3.S | 113 |
18 files changed, 1613 insertions, 494 deletions
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile index af9abf8d9d9..1f95881d843 100644 --- a/arch/m68k/lib/Makefile +++ b/arch/m68k/lib/Makefile @@ -1,6 +1,5 @@ -# -# Makefile for m68k-specific library files.. -# - -lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ - checksum.o string.o uaccess.o +ifdef CONFIG_MMU +include arch/m68k/lib/Makefile_mm +else +include arch/m68k/lib/Makefile_no +endif diff --git a/arch/m68k/lib/Makefile_mm b/arch/m68k/lib/Makefile_mm new file mode 100644 index 00000000000..af9abf8d9d9 --- /dev/null +++ b/arch/m68k/lib/Makefile_mm @@ -0,0 +1,6 @@ +# +# Makefile for m68k-specific library files.. +# + +lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ + checksum.o string.o uaccess.o diff --git a/arch/m68k/lib/Makefile_no b/arch/m68k/lib/Makefile_no new file mode 100644 index 00000000000..32d852e586d --- /dev/null +++ b/arch/m68k/lib/Makefile_no @@ -0,0 +1,7 @@ +# +# Makefile for m68knommu specific library files.. +# + +lib-y := ashldi3.o ashrdi3.o lshrdi3.o \ + muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \ + checksum.o memcpy.o memmove.o memset.o delay.o diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c index 6216f12a756..1297536060d 100644 --- a/arch/m68k/lib/checksum.c +++ b/arch/m68k/lib/checksum.c @@ -1,425 +1,5 @@ -/* - * INET An implementation of the TCP/IP protocol suite for the LINUX - * operating system. INET is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * IP/TCP/UDP checksumming routines - * - * Authors: Jorge Cwik, <jorge@laser.satlink.net> - * Arnt Gulbrandsen, <agulbra@nvg.unit.no> - * Tom May, <ftom@netcom.com> - * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> - * Lots of code moved from tcp.c and ip.c; see those files - * for more names. - * - * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: - * Fixed some nasty bugs, causing some horrible crashes. - * A: At some points, the sum (%0) was used as - * length-counter instead of the length counter - * (%1). Thanks to Roman Hodek for pointing this out. - * B: GCC seems to mess up if one uses too many - * data-registers to hold input values and one tries to - * specify d0 and d1 as scratch registers. Letting gcc - * choose these registers itself solves the problem. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * 1998/8/31 Andreas Schwab: - * Zero out rest of buffer on exception in - * csum_partial_copy_from_user. - */ - -#include <linux/module.h> -#include <net/checksum.h> - -/* - * computes a partial checksum, e.g. for TCP/UDP fragments - */ - -__wsum csum_partial(const void *buff, int len, __wsum sum) -{ - unsigned long tmp1, tmp2; - /* - * Experiments with ethernet and slip connections show that buff - * is aligned on either a 2-byte or 4-byte boundary. - */ - __asm__("movel %2,%3\n\t" - "btst #1,%3\n\t" /* Check alignment */ - "jeq 2f\n\t" - "subql #2,%1\n\t" /* buff%4==2: treat first word */ - "jgt 1f\n\t" - "addql #2,%1\n\t" /* len was == 2, treat only rest */ - "jra 4f\n" - "1:\t" - "addw %2@+,%0\n\t" /* add first word to sum */ - "clrl %3\n\t" - "addxl %3,%0\n" /* add X bit */ - "2:\t" - /* unrolled loop for the main part: do 8 longs at once */ - "movel %1,%3\n\t" /* save len in tmp1 */ - "lsrl #5,%1\n\t" /* len/32 */ - "jeq 2f\n\t" /* not enough... */ - "subql #1,%1\n" - "1:\t" - "movel %2@+,%4\n\t" - "addxl %4,%0\n\t" - "movel %2@+,%4\n\t" - "addxl %4,%0\n\t" - "movel %2@+,%4\n\t" - "addxl %4,%0\n\t" - "movel %2@+,%4\n\t" - "addxl %4,%0\n\t" - "movel %2@+,%4\n\t" - "addxl %4,%0\n\t" - "movel %2@+,%4\n\t" - "addxl %4,%0\n\t" - "movel %2@+,%4\n\t" - "addxl %4,%0\n\t" - "movel %2@+,%4\n\t" - "addxl %4,%0\n\t" - "dbra %1,1b\n\t" - "clrl %4\n\t" - "addxl %4,%0\n\t" /* add X bit */ - "clrw %1\n\t" - "subql #1,%1\n\t" - "jcc 1b\n" - "2:\t" - "movel %3,%1\n\t" /* restore len from tmp1 */ - "andw #0x1c,%3\n\t" /* number of rest longs */ - "jeq 4f\n\t" - "lsrw #2,%3\n\t" - "subqw #1,%3\n" - "3:\t" - /* loop for rest longs */ - "movel %2@+,%4\n\t" - "addxl %4,%0\n\t" - "dbra %3,3b\n\t" - "clrl %4\n\t" - "addxl %4,%0\n" /* add X bit */ - "4:\t" - /* now check for rest bytes that do not fit into longs */ - "andw #3,%1\n\t" - "jeq 7f\n\t" - "clrl %4\n\t" /* clear tmp2 for rest bytes */ - "subqw #2,%1\n\t" - "jlt 5f\n\t" - "movew %2@+,%4\n\t" /* have rest >= 2: get word */ - "swap %4\n\t" /* into bits 16..31 */ - "tstw %1\n\t" /* another byte? */ - "jeq 6f\n" - "5:\t" - "moveb %2@,%4\n\t" /* have odd rest: get byte */ - "lslw #8,%4\n\t" /* into bits 8..15; 16..31 untouched */ - "6:\t" - "addl %4,%0\n\t" /* now add rest long to sum */ - "clrl %4\n\t" - "addxl %4,%0\n" /* add X bit */ - "7:\t" - : "=d" (sum), "=d" (len), "=a" (buff), - "=&d" (tmp1), "=&d" (tmp2) - : "0" (sum), "1" (len), "2" (buff) - ); - return(sum); -} - -EXPORT_SYMBOL(csum_partial); - - -/* - * copy from user space while checksumming, with exception handling. - */ - -__wsum -csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *csum_err) -{ - /* - * GCC doesn't like more than 10 operands for the asm - * statements so we have to use tmp2 for the error - * code. - */ - unsigned long tmp1, tmp2; - - __asm__("movel %2,%4\n\t" - "btst #1,%4\n\t" /* Check alignment */ - "jeq 2f\n\t" - "subql #2,%1\n\t" /* buff%4==2: treat first word */ - "jgt 1f\n\t" - "addql #2,%1\n\t" /* len was == 2, treat only rest */ - "jra 4f\n" - "1:\n" - "10:\t" - "movesw %2@+,%4\n\t" /* add first word to sum */ - "addw %4,%0\n\t" - "movew %4,%3@+\n\t" - "clrl %4\n\t" - "addxl %4,%0\n" /* add X bit */ - "2:\t" - /* unrolled loop for the main part: do 8 longs at once */ - "movel %1,%4\n\t" /* save len in tmp1 */ - "lsrl #5,%1\n\t" /* len/32 */ - "jeq 2f\n\t" /* not enough... */ - "subql #1,%1\n" - "1:\n" - "11:\t" - "movesl %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "12:\t" - "movesl %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "13:\t" - "movesl %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "14:\t" - "movesl %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "15:\t" - "movesl %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "16:\t" - "movesl %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "17:\t" - "movesl %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "18:\t" - "movesl %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "dbra %1,1b\n\t" - "clrl %5\n\t" - "addxl %5,%0\n\t" /* add X bit */ - "clrw %1\n\t" - "subql #1,%1\n\t" - "jcc 1b\n" - "2:\t" - "movel %4,%1\n\t" /* restore len from tmp1 */ - "andw #0x1c,%4\n\t" /* number of rest longs */ - "jeq 4f\n\t" - "lsrw #2,%4\n\t" - "subqw #1,%4\n" - "3:\n" - /* loop for rest longs */ - "19:\t" - "movesl %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "dbra %4,3b\n\t" - "clrl %5\n\t" - "addxl %5,%0\n" /* add X bit */ - "4:\t" - /* now check for rest bytes that do not fit into longs */ - "andw #3,%1\n\t" - "jeq 7f\n\t" - "clrl %5\n\t" /* clear tmp2 for rest bytes */ - "subqw #2,%1\n\t" - "jlt 5f\n\t" - "20:\t" - "movesw %2@+,%5\n\t" /* have rest >= 2: get word */ - "movew %5,%3@+\n\t" - "swap %5\n\t" /* into bits 16..31 */ - "tstw %1\n\t" /* another byte? */ - "jeq 6f\n" - "5:\n" - "21:\t" - "movesb %2@,%5\n\t" /* have odd rest: get byte */ - "moveb %5,%3@+\n\t" - "lslw #8,%5\n\t" /* into bits 8..15; 16..31 untouched */ - "6:\t" - "addl %5,%0\n\t" /* now add rest long to sum */ - "clrl %5\n\t" - "addxl %5,%0\n\t" /* add X bit */ - "7:\t" - "clrl %5\n" /* no error - clear return value */ - "8:\n" - ".section .fixup,\"ax\"\n" - ".even\n" - /* If any exception occurs zero out the rest. - Similarities with the code above are intentional :-) */ - "90:\t" - "clrw %3@+\n\t" - "movel %1,%4\n\t" - "lsrl #5,%1\n\t" - "jeq 1f\n\t" - "subql #1,%1\n" - "91:\t" - "clrl %3@+\n" - "92:\t" - "clrl %3@+\n" - "93:\t" - "clrl %3@+\n" - "94:\t" - "clrl %3@+\n" - "95:\t" - "clrl %3@+\n" - "96:\t" - "clrl %3@+\n" - "97:\t" - "clrl %3@+\n" - "98:\t" - "clrl %3@+\n\t" - "dbra %1,91b\n\t" - "clrw %1\n\t" - "subql #1,%1\n\t" - "jcc 91b\n" - "1:\t" - "movel %4,%1\n\t" - "andw #0x1c,%4\n\t" - "jeq 1f\n\t" - "lsrw #2,%4\n\t" - "subqw #1,%4\n" - "99:\t" - "clrl %3@+\n\t" - "dbra %4,99b\n\t" - "1:\t" - "andw #3,%1\n\t" - "jeq 9f\n" - "100:\t" - "clrw %3@+\n\t" - "tstw %1\n\t" - "jeq 9f\n" - "101:\t" - "clrb %3@+\n" - "9:\t" -#define STR(X) STR1(X) -#define STR1(X) #X - "moveq #-" STR(EFAULT) ",%5\n\t" - "jra 8b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" - ".long 10b,90b\n" - ".long 11b,91b\n" - ".long 12b,92b\n" - ".long 13b,93b\n" - ".long 14b,94b\n" - ".long 15b,95b\n" - ".long 16b,96b\n" - ".long 17b,97b\n" - ".long 18b,98b\n" - ".long 19b,99b\n" - ".long 20b,100b\n" - ".long 21b,101b\n" - ".previous" - : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst), - "=&d" (tmp1), "=d" (tmp2) - : "0" (sum), "1" (len), "2" (src), "3" (dst) - ); - - *csum_err = tmp2; - - return(sum); -} - -EXPORT_SYMBOL(csum_partial_copy_from_user); - - -/* - * copy from kernel space while checksumming, otherwise like csum_partial - */ - -__wsum -csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) -{ - unsigned long tmp1, tmp2; - __asm__("movel %2,%4\n\t" - "btst #1,%4\n\t" /* Check alignment */ - "jeq 2f\n\t" - "subql #2,%1\n\t" /* buff%4==2: treat first word */ - "jgt 1f\n\t" - "addql #2,%1\n\t" /* len was == 2, treat only rest */ - "jra 4f\n" - "1:\t" - "movew %2@+,%4\n\t" /* add first word to sum */ - "addw %4,%0\n\t" - "movew %4,%3@+\n\t" - "clrl %4\n\t" - "addxl %4,%0\n" /* add X bit */ - "2:\t" - /* unrolled loop for the main part: do 8 longs at once */ - "movel %1,%4\n\t" /* save len in tmp1 */ - "lsrl #5,%1\n\t" /* len/32 */ - "jeq 2f\n\t" /* not enough... */ - "subql #1,%1\n" - "1:\t" - "movel %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "movel %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "movel %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "movel %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "movel %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "movel %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "movel %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "movel %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "dbra %1,1b\n\t" - "clrl %5\n\t" - "addxl %5,%0\n\t" /* add X bit */ - "clrw %1\n\t" - "subql #1,%1\n\t" - "jcc 1b\n" - "2:\t" - "movel %4,%1\n\t" /* restore len from tmp1 */ - "andw #0x1c,%4\n\t" /* number of rest longs */ - "jeq 4f\n\t" - "lsrw #2,%4\n\t" - "subqw #1,%4\n" - "3:\t" - /* loop for rest longs */ - "movel %2@+,%5\n\t" - "addxl %5,%0\n\t" - "movel %5,%3@+\n\t" - "dbra %4,3b\n\t" - "clrl %5\n\t" - "addxl %5,%0\n" /* add X bit */ - "4:\t" - /* now check for rest bytes that do not fit into longs */ - "andw #3,%1\n\t" - "jeq 7f\n\t" - "clrl %5\n\t" /* clear tmp2 for rest bytes */ - "subqw #2,%1\n\t" - "jlt 5f\n\t" - "movew %2@+,%5\n\t" /* have rest >= 2: get word */ - "movew %5,%3@+\n\t" - "swap %5\n\t" /* into bits 16..31 */ - "tstw %1\n\t" /* another byte? */ - "jeq 6f\n" - "5:\t" - "moveb %2@,%5\n\t" /* have odd rest: get byte */ - "moveb %5,%3@+\n\t" - "lslw #8,%5\n" /* into bits 8..15; 16..31 untouched */ - "6:\t" - "addl %5,%0\n\t" /* now add rest long to sum */ - "clrl %5\n\t" - "addxl %5,%0\n" /* add X bit */ - "7:\t" - : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst), - "=&d" (tmp1), "=&d" (tmp2) - : "0" (sum), "1" (len), "2" (src), "3" (dst) - ); - return(sum); -} -EXPORT_SYMBOL(csum_partial_copy_nocheck); +#ifdef CONFIG_MMU +#include "checksum_mm.c" +#else +#include "checksum_no.c" +#endif diff --git a/arch/m68k/lib/checksum_mm.c b/arch/m68k/lib/checksum_mm.c new file mode 100644 index 00000000000..6216f12a756 --- /dev/null +++ b/arch/m68k/lib/checksum_mm.c @@ -0,0 +1,425 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * IP/TCP/UDP checksumming routines + * + * Authors: Jorge Cwik, <jorge@laser.satlink.net> + * Arnt Gulbrandsen, <agulbra@nvg.unit.no> + * Tom May, <ftom@netcom.com> + * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> + * Lots of code moved from tcp.c and ip.c; see those files + * for more names. + * + * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: + * Fixed some nasty bugs, causing some horrible crashes. + * A: At some points, the sum (%0) was used as + * length-counter instead of the length counter + * (%1). Thanks to Roman Hodek for pointing this out. + * B: GCC seems to mess up if one uses too many + * data-registers to hold input values and one tries to + * specify d0 and d1 as scratch registers. Letting gcc + * choose these registers itself solves the problem. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * 1998/8/31 Andreas Schwab: + * Zero out rest of buffer on exception in + * csum_partial_copy_from_user. + */ + +#include <linux/module.h> +#include <net/checksum.h> + +/* + * computes a partial checksum, e.g. for TCP/UDP fragments + */ + +__wsum csum_partial(const void *buff, int len, __wsum sum) +{ + unsigned long tmp1, tmp2; + /* + * Experiments with ethernet and slip connections show that buff + * is aligned on either a 2-byte or 4-byte boundary. + */ + __asm__("movel %2,%3\n\t" + "btst #1,%3\n\t" /* Check alignment */ + "jeq 2f\n\t" + "subql #2,%1\n\t" /* buff%4==2: treat first word */ + "jgt 1f\n\t" + "addql #2,%1\n\t" /* len was == 2, treat only rest */ + "jra 4f\n" + "1:\t" + "addw %2@+,%0\n\t" /* add first word to sum */ + "clrl %3\n\t" + "addxl %3,%0\n" /* add X bit */ + "2:\t" + /* unrolled loop for the main part: do 8 longs at once */ + "movel %1,%3\n\t" /* save len in tmp1 */ + "lsrl #5,%1\n\t" /* len/32 */ + "jeq 2f\n\t" /* not enough... */ + "subql #1,%1\n" + "1:\t" + "movel %2@+,%4\n\t" + "addxl %4,%0\n\t" + "movel %2@+,%4\n\t" + "addxl %4,%0\n\t" + "movel %2@+,%4\n\t" + "addxl %4,%0\n\t" + "movel %2@+,%4\n\t" + "addxl %4,%0\n\t" + "movel %2@+,%4\n\t" + "addxl %4,%0\n\t" + "movel %2@+,%4\n\t" + "addxl %4,%0\n\t" + "movel %2@+,%4\n\t" + "addxl %4,%0\n\t" + "movel %2@+,%4\n\t" + "addxl %4,%0\n\t" + "dbra %1,1b\n\t" + "clrl %4\n\t" + "addxl %4,%0\n\t" /* add X bit */ + "clrw %1\n\t" + "subql #1,%1\n\t" + "jcc 1b\n" + "2:\t" + "movel %3,%1\n\t" /* restore len from tmp1 */ + "andw #0x1c,%3\n\t" /* number of rest longs */ + "jeq 4f\n\t" + "lsrw #2,%3\n\t" + "subqw #1,%3\n" + "3:\t" + /* loop for rest longs */ + "movel %2@+,%4\n\t" + "addxl %4,%0\n\t" + "dbra %3,3b\n\t" + "clrl %4\n\t" + "addxl %4,%0\n" /* add X bit */ + "4:\t" + /* now check for rest bytes that do not fit into longs */ + "andw #3,%1\n\t" + "jeq 7f\n\t" + "clrl %4\n\t" /* clear tmp2 for rest bytes */ + "subqw #2,%1\n\t" + "jlt 5f\n\t" + "movew %2@+,%4\n\t" /* have rest >= 2: get word */ + "swap %4\n\t" /* into bits 16..31 */ + "tstw %1\n\t" /* another byte? */ + "jeq 6f\n" + "5:\t" + "moveb %2@,%4\n\t" /* have odd rest: get byte */ + "lslw #8,%4\n\t" /* into bits 8..15; 16..31 untouched */ + "6:\t" + "addl %4,%0\n\t" /* now add rest long to sum */ + "clrl %4\n\t" + "addxl %4,%0\n" /* add X bit */ + "7:\t" + : "=d" (sum), "=d" (len), "=a" (buff), + "=&d" (tmp1), "=&d" (tmp2) + : "0" (sum), "1" (len), "2" (buff) + ); + return(sum); +} + +EXPORT_SYMBOL(csum_partial); + + +/* + * copy from user space while checksumming, with exception handling. + */ + +__wsum +csum_partial_copy_from_user(const void __user *src, void *dst, + int len, __wsum sum, int *csum_err) +{ + /* + * GCC doesn't like more than 10 operands for the asm + * statements so we have to use tmp2 for the error + * code. + */ + unsigned long tmp1, tmp2; + + __asm__("movel %2,%4\n\t" + "btst #1,%4\n\t" /* Check alignment */ + "jeq 2f\n\t" + "subql #2,%1\n\t" /* buff%4==2: treat first word */ + "jgt 1f\n\t" + "addql #2,%1\n\t" /* len was == 2, treat only rest */ + "jra 4f\n" + "1:\n" + "10:\t" + "movesw %2@+,%4\n\t" /* add first word to sum */ + "addw %4,%0\n\t" + "movew %4,%3@+\n\t" + "clrl %4\n\t" + "addxl %4,%0\n" /* add X bit */ + "2:\t" + /* unrolled loop for the main part: do 8 longs at once */ + "movel %1,%4\n\t" /* save len in tmp1 */ + "lsrl #5,%1\n\t" /* len/32 */ + "jeq 2f\n\t" /* not enough... */ + "subql #1,%1\n" + "1:\n" + "11:\t" + "movesl %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "12:\t" + "movesl %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "13:\t" + "movesl %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "14:\t" + "movesl %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "15:\t" + "movesl %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "16:\t" + "movesl %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "17:\t" + "movesl %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "18:\t" + "movesl %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "dbra %1,1b\n\t" + "clrl %5\n\t" + "addxl %5,%0\n\t" /* add X bit */ + "clrw %1\n\t" + "subql #1,%1\n\t" + "jcc 1b\n" + "2:\t" + "movel %4,%1\n\t" /* restore len from tmp1 */ + "andw #0x1c,%4\n\t" /* number of rest longs */ + "jeq 4f\n\t" + "lsrw #2,%4\n\t" + "subqw #1,%4\n" + "3:\n" + /* loop for rest longs */ + "19:\t" + "movesl %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "dbra %4,3b\n\t" + "clrl %5\n\t" + "addxl %5,%0\n" /* add X bit */ + "4:\t" + /* now check for rest bytes that do not fit into longs */ + "andw #3,%1\n\t" + "jeq 7f\n\t" + "clrl %5\n\t" /* clear tmp2 for rest bytes */ + "subqw #2,%1\n\t" + "jlt 5f\n\t" + "20:\t" + "movesw %2@+,%5\n\t" /* have rest >= 2: get word */ + "movew %5,%3@+\n\t" + "swap %5\n\t" /* into bits 16..31 */ + "tstw %1\n\t" /* another byte? */ + "jeq 6f\n" + "5:\n" + "21:\t" + "movesb %2@,%5\n\t" /* have odd rest: get byte */ + "moveb %5,%3@+\n\t" + "lslw #8,%5\n\t" /* into bits 8..15; 16..31 untouched */ + "6:\t" + "addl %5,%0\n\t" /* now add rest long to sum */ + "clrl %5\n\t" + "addxl %5,%0\n\t" /* add X bit */ + "7:\t" + "clrl %5\n" /* no error - clear return value */ + "8:\n" + ".section .fixup,\"ax\"\n" + ".even\n" + /* If any exception occurs zero out the rest. + Similarities with the code above are intentional :-) */ + "90:\t" + "clrw %3@+\n\t" + "movel %1,%4\n\t" + "lsrl #5,%1\n\t" + "jeq 1f\n\t" + "subql #1,%1\n" + "91:\t" + "clrl %3@+\n" + "92:\t" + "clrl %3@+\n" + "93:\t" + "clrl %3@+\n" + "94:\t" + "clrl %3@+\n" + "95:\t" + "clrl %3@+\n" + "96:\t" + "clrl %3@+\n" + "97:\t" + "clrl %3@+\n" + "98:\t" + "clrl %3@+\n\t" + "dbra %1,91b\n\t" + "clrw %1\n\t" + "subql #1,%1\n\t" + "jcc 91b\n" + "1:\t" + "movel %4,%1\n\t" + "andw #0x1c,%4\n\t" + "jeq 1f\n\t" + "lsrw #2,%4\n\t" + "subqw #1,%4\n" + "99:\t" + "clrl %3@+\n\t" + "dbra %4,99b\n\t" + "1:\t" + "andw #3,%1\n\t" + "jeq 9f\n" + "100:\t" + "clrw %3@+\n\t" + "tstw %1\n\t" + "jeq 9f\n" + "101:\t" + "clrb %3@+\n" + "9:\t" +#define STR(X) STR1(X) +#define STR1(X) #X + "moveq #-" STR(EFAULT) ",%5\n\t" + "jra 8b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".long 10b,90b\n" + ".long 11b,91b\n" + ".long 12b,92b\n" + ".long 13b,93b\n" + ".long 14b,94b\n" + ".long 15b,95b\n" + ".long 16b,96b\n" + ".long 17b,97b\n" + ".long 18b,98b\n" + ".long 19b,99b\n" + ".long 20b,100b\n" + ".long 21b,101b\n" + ".previous" + : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst), + "=&d" (tmp1), "=d" (tmp2) + : "0" (sum), "1" (len), "2" (src), "3" (dst) + ); + + *csum_err = tmp2; + + return(sum); +} + +EXPORT_SYMBOL(csum_partial_copy_from_user); + + +/* + * copy from kernel space while checksumming, otherwise like csum_partial + */ + +__wsum +csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) +{ + unsigned long tmp1, tmp2; + __asm__("movel %2,%4\n\t" + "btst #1,%4\n\t" /* Check alignment */ + "jeq 2f\n\t" + "subql #2,%1\n\t" /* buff%4==2: treat first word */ + "jgt 1f\n\t" + "addql #2,%1\n\t" /* len was == 2, treat only rest */ + "jra 4f\n" + "1:\t" + "movew %2@+,%4\n\t" /* add first word to sum */ + "addw %4,%0\n\t" + "movew %4,%3@+\n\t" + "clrl %4\n\t" + "addxl %4,%0\n" /* add X bit */ + "2:\t" + /* unrolled loop for the main part: do 8 longs at once */ + "movel %1,%4\n\t" /* save len in tmp1 */ + "lsrl #5,%1\n\t" /* len/32 */ + "jeq 2f\n\t" /* not enough... */ + "subql #1,%1\n" + "1:\t" + "movel %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "movel %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "movel %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "movel %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "movel %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "movel %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "movel %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "movel %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "dbra %1,1b\n\t" + "clrl %5\n\t" + "addxl %5,%0\n\t" /* add X bit */ + "clrw %1\n\t" + "subql #1,%1\n\t" + "jcc 1b\n" + "2:\t" + "movel %4,%1\n\t" /* restore len from tmp1 */ + "andw #0x1c,%4\n\t" /* number of rest longs */ + "jeq 4f\n\t" + "lsrw #2,%4\n\t" + "subqw #1,%4\n" + "3:\t" + /* loop for rest longs */ + "movel %2@+,%5\n\t" + "addxl %5,%0\n\t" + "movel %5,%3@+\n\t" + "dbra %4,3b\n\t" + "clrl %5\n\t" + "addxl %5,%0\n" /* add X bit */ + "4:\t" + /* now check for rest bytes that do not fit into longs */ + "andw #3,%1\n\t" + "jeq 7f\n\t" + "clrl %5\n\t" /* clear tmp2 for rest bytes */ + "subqw #2,%1\n\t" + "jlt 5f\n\t" + "movew %2@+,%5\n\t" /* have rest >= 2: get word */ + "movew %5,%3@+\n\t" + "swap %5\n\t" /* into bits 16..31 */ + "tstw %1\n\t" /* another byte? */ + "jeq 6f\n" + "5:\t" + "moveb %2@,%5\n\t" /* have odd rest: get byte */ + "moveb %5,%3@+\n\t" + "lslw #8,%5\n" /* into bits 8..15; 16..31 untouched */ + "6:\t" + "addl %5,%0\n\t" /* now add rest long to sum */ + "clrl %5\n\t" + "addxl %5,%0\n" /* add X bit */ + "7:\t" + : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst), + "=&d" (tmp1), "=&d" (tmp2) + : "0" (sum), "1" (len), "2" (src), "3" (dst) + ); + return(sum); +} +EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/m68k/lib/checksum_no.c b/arch/m68k/lib/checksum_no.c new file mode 100644 index 00000000000..eccf25d3d73 --- /dev/null +++ b/arch/m68k/lib/checksum_no.c @@ -0,0 +1,153 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * IP/TCP/UDP checksumming routines + * + * Authors: Jorge Cwik, <jorge@laser.satlink.net> + * Arnt Gulbrandsen, <agulbra@nvg.unit.no> + * Tom May, <ftom@netcom.com> + * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> + * Lots of code moved from tcp.c and ip.c; see those files + * for more names. + * + * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: + * Fixed some nasty bugs, causing some horrible crashes. + * A: At some points, the sum (%0) was used as + * length-counter instead of the length counter + * (%1). Thanks to Roman Hodek for pointing this out. + * B: GCC seems to mess up if one uses too many + * data-registers to hold input values and one tries to + * specify d0 and d1 as scratch registers. Letting gcc choose these + * registers itself solves the problem. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most + of the assembly has to go. */ + +#include <linux/module.h> +#include <net/checksum.h> + +static inline unsigned short from32to16(unsigned long x) +{ + /* add up 16-bit and 16-bit for 16+c bit */ + x = (x & 0xffff) + (x >> 16); + /* add up carry.. */ + x = (x & 0xffff) + (x >> 16); + return x; +} + +static unsigned long do_csum(const unsigned char * buff, int len) +{ + int odd, count; + unsigned long result = 0; + + if (len <= 0) + goto out; + odd = 1 & (unsigned long) buff; + if (odd) { + result = *buff; + len--; + buff++; + } + count = len >> 1; /* nr of 16-bit words.. */ + if (count) { + if (2 & (unsigned long) buff) { + result += *(unsigned short *) buff; + count--; + len -= 2; + buff += 2; + } + count >>= 1; /* nr of 32-bit words.. */ + if (count) { + unsigned long carry = 0; + do { + unsigned long w = *(unsigned long *) buff; + count--; + buff += 4; + result += carry; + result += w; + carry = (w > result); + } while (count); + result += carry; + result = (result & 0xffff) + (result >> 16); + } + if (len & 2) { + result += *(unsigned short *) buff; + buff += 2; + } + } + if (len & 1) + result += (*buff << 8); + result = from32to16(result); + if (odd) + result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); +out: + return result; +} + +#ifdef CONFIG_COLDFIRE +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +__sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + return (__force __sum16)~do_csum(iph,ihl*4); +} +#endif + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum csum_partial(const void *buff, int len, __wsum sum) +{ + unsigned int result = do_csum(buff, len); + + /* add in old sum, and carry.. */ + result += (__force u32)sum; + if ((__force u32)sum > result) + result += 1; + return (__force __wsum)result; +} + +EXPORT_SYMBOL(csum_partial); + +/* + * copy from fs while checksumming, otherwise like csum_partial + */ + +__wsum +csum_partial_copy_from_user(const void __user *src, void *dst, + int len, __wsum sum, int *csum_err) +{ + if (csum_err) *csum_err = 0; + memcpy(dst, (__force const void *)src, len); + return csum_partial(dst, len, sum); +} + +/* + * copy from ds while checksumming, otherwise like csum_partial + */ + +__wsum +csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) +{ + memcpy(dst, src, len); + return csum_partial(dst, len, sum); +} diff --git a/arch/m68k/lib/delay.c b/arch/m68k/lib/delay.c new file mode 100644 index 00000000000..5bd5472d38a --- /dev/null +++ b/arch/m68k/lib/delay.c @@ -0,0 +1,21 @@ +/* + * arch/m68knommu/lib/delay.c + * + * (C) Copyright 2004, Greg Ungerer <gerg@snapgear.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <asm/param.h> +#include <asm/delay.h> + |