patch-2.1.107 linux/arch/i386/lib/checksum.c

Next file: linux/arch/i386/lib/locks.S
Previous file: linux/arch/i386/kernel/vm86.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.106/linux/arch/i386/lib/checksum.c linux/arch/i386/lib/checksum.c
@@ -33,9 +33,9 @@
 
 unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) {
 	  /*
-	   * Experiments with ethernet and slip connections show that buff
+	   * Experiments with Ethernet and SLIP connections show that buff
 	   * is aligned on either a 2-byte or 4-byte boundary.  We get at
-	   * least a 2x speedup on 486 and Pentium if it is 4-byte aligned.
+	   * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
 	   * Fortunately, it is easy to convert 2-byte alignment to 4-byte
 	   * alignment for the unrolled loop.
 	   */
@@ -118,7 +118,7 @@
             negl %%ebx
             lea 45f(%%ebx,%%ebx,2), %%ebx
             testl %%esi, %%esi
-            jmp %%ebx
+            jmp *%%ebx
 
             # Handle 2-byte-aligned regions
 20:         addw (%%esi), %%ax
@@ -203,9 +203,9 @@
  * The macros SRC and DST specify the type of access for the instruction.
  * thus we can call a custom exception handler for all access types.
  *
- * FIXME: could someone double check wether i havent mixed up some SRC and
- *	  DST definitions? It's damn hard to trigger all cases, i hope i got
- *	  them all but theres no guarantee ...
+ * FIXME: could someone double-check whether I haven't mixed up some SRC and
+ *	  DST definitions? It's damn hard to trigger all cases.  I hope I got
+ *	  them all but there's no guarantee.
  */
 
 #define SRC(y...)			\
@@ -369,7 +369,7 @@
         subl %%ebx, %%edi  
         lea 3f(%%ebx,%%ebx), %%ebx
         testl %%esi, %%esi 
-        jmp %%ebx         
+        jmp *%%ebx         
 1:      addl $64,%%esi
         addl $64,%%edi\n" 
 ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov