patch-2.4.10 linux/arch/sh/lib/checksum.S
Next file: linux/arch/sh/lib/strlen.S
Previous file: linux/arch/sh/lib/Makefile
Back to the patch index
Back to the overall index
- Lines: 189
- Date:
Sat Sep 8 12:29:09 2001
- Orig file:
v2.4.9/linux/arch/sh/lib/checksum.S
- Orig date:
Sun Jan 28 18:56:00 2001
diff -u --recursive --new-file v2.4.9/linux/arch/sh/lib/checksum.S linux/arch/sh/lib/checksum.S
@@ -1,4 +1,4 @@
-/* $Id: checksum.S,v 1.4 2000/05/14 08:41:26 gniibe Exp $
+/* $Id: checksum.S,v 1.10 2001/07/06 13:11:32 gniibe Exp $
*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -54,13 +54,14 @@
tst #2, r0 ! Check alignment.
bt 2f ! Jump if alignment is ok.
!
- add #-2, r5 ! Alignment uses up two bytes.
+ add #-2, r5 ! Alignment uses up two bytes.
cmp/pz r5 !
bt/s 1f ! Jump if we had at least two bytes.
clrt
bra 6f
add #2, r5 ! r5 was < 2. Deal with it.
1:
+ mov r5, r1 ! Save new len for later use.
mov.w @r4+, r0
extu.w r0, r0
addc r0, r6
@@ -90,7 +91,7 @@
addc r3, r6
addc r0, r6
addc r2, r6
- movt r0
+ movt r0
dt r5
bf/s 3b
cmp/eq #1, r0
@@ -185,48 +186,48 @@
mov.l r5,@-r15
mov.l r6,@-r15
- mov #3, r0 ! Check src and dest are equally aligned
- mov r4, r1
- and r0, r1
- and r5, r0
- cmp/eq r1, r0
- bf 3f ! Different alignments, use slow version
- tst #1,r0 ! Check dest word aligned
- bf 3f ! If not, do it the slow way
+ mov #3,r0 ! Check src and dest are equally aligned
+ mov r4,r1
+ and r0,r1
+ and r5,r0
+ cmp/eq r1,r0
+ bf 3f ! Different alignments, use slow version
+ tst #1,r0 ! Check dest word aligned
+ bf 3f ! If not, do it the slow way
mov #2,r0
- tst r0,r5 ! Check dest alignment.
- bt 2f ! Jump if alignment is ok.
- add #-2,r6 ! Alignment uses up two bytes.
- cmp/pz r6 ! Jump if we had at least two bytes.
+ tst r0,r5 ! Check dest alignment.
+ bt 2f ! Jump if alignment is ok.
+ add #-2,r6 ! Alignment uses up two bytes.
+ cmp/pz r6 ! Jump if we had at least two bytes.
bt/s 1f
clrt
bra 4f
- add #2,r6 ! r6 was < 2. Deal with it.
+ add #2,r6 ! r6 was < 2. Deal with it.
-3: ! Handle different src and dest alinments.
+3: ! Handle different src and dest alignments.
! This is not common, so simple byte by byte copy will do.
- mov r6, r2
+ mov r6,r2
shlr r6
- tst r6, r6
+ tst r6,r6
bt 4f
clrt
-5:
-SRC( mov.b @r4+,r0 )
-DST( mov.b r0,@r5 )
- add #1, r5
-SRC( mov.b @r4+,r1 )
-DST( mov.b r1,@r5 )
- add #1,r5
-
- extu.b r0,r0
+ .align 2
+5:
+SRC( mov.b @r4+,r1 )
+SRC( mov.b @r4+,r0 )
extu.b r1,r1
+DST( mov.b r1,@r5 )
+DST( mov.b r0,@(1,r5) )
+ extu.b r0,r0
+ add #2,r5
+
#ifdef __LITTLE_ENDIAN__
- shll8 r1
-#else
shll8 r0
+#else
+ shll8 r1
#endif
- or r1,r0
+ or r1,r0
addc r0,r7
movt r0
@@ -238,16 +239,16 @@
mov r2, r0
tst #1, r0
- bt 7f
+ bt 7f
bra 5f
clrt
! src and dest equally aligned, but to a two byte boundary.
! Handle first two bytes as a special case
- .align 5
+ .align 2
1:
-SRC( mov.w @r4+,r0 )
-DST( mov.w r0,@r5 )
+SRC( mov.w @r4+,r0 )
+DST( mov.w r0,@r5 )
add #2,r5
extu.w r0,r0
addc r0,r7
@@ -260,42 +261,36 @@
tst r6,r6
bt/s 2f
clrt
+ .align 2
1:
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+SRC( mov.l @r4+,r0 )
+SRC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@r5 )
- add #4,r5
+DST( mov.l r0,@r5 )
+DST( mov.l r1,@(4,r5) )
addc r1,r7
-DST( mov.l r1,@r5 )
- add #4,r5
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+SRC( mov.l @r4+,r0 )
+SRC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@r5 )
- add #4,r5
+DST( mov.l r0,@(8,r5) )
+DST( mov.l r1,@(12,r5) )
addc r1,r7
-DST( mov.l r1,@r5 )
- add #4,r5
SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+SRC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@r5 )
- add #4,r5
+DST( mov.l r0,@(16,r5) )
+DST( mov.l r1,@(20,r5) )
addc r1,r7
-DST( mov.l r1,@r5 )
- add #4,r5
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+SRC( mov.l @r4+,r0 )
+SRC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@r5 )
- add #4,r5
+DST( mov.l r0,@(24,r5) )
+DST( mov.l r1,@(28,r5) )
addc r1,r7
-DST( mov.l r1,@r5 )
- add #4,r5
+ add #32,r5
movt r0
dt r6
bf/s 1b
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)