2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IP/TCP/UDP checksumming routines
8 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
9 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
10 * Tom May, <ftom@netcom.com>
11 * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
12 * Lots of code moved from tcp.c and ip.c; see those files
15 * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
16 * Fixed some nasty bugs, causing some horrible crashes.
17 * A: At some points, the sum (%0) was used as
18 * length-counter instead of the length counter
19 * (%1). Thanks to Roman Hodek for pointing this out.
20 * B: GCC seems to mess up if one uses too many
21 * data-registers to hold input values and one tries to
22 * specify d0 and d1 as scratch registers. Letting gcc
23 * choose these registers itself solves the problem.
25 * This program is free software; you can redistribute it and/or
26 * modify it under the terms of the GNU General Public License
27 * as published by the Free Software Foundation; either version
28 * 2 of the License, or (at your option) any later version.
30 * 1998/8/31 Andreas Schwab:
31 * Zero out rest of buffer on exception in
32 * csum_partial_copy_from_user.
35 #include <net/checksum.h>
38 * computes a partial checksum, e.g. for TCP/UDP fragments
42 csum_partial (const unsigned char *buff, int len, unsigned int sum)
44 unsigned long tmp1, tmp2;
46 * Experiments with ethernet and slip connections show that buff
47 * is aligned on either a 2-byte or 4-byte boundary.
49 __asm__("movel %2,%3\n\t"
50 "btst #1,%3\n\t" /* Check alignment */
52 "subql #2,%1\n\t" /* buff%4==2: treat first word */
54 "addql #2,%1\n\t" /* len was == 2, treat only rest */
57 "addw %2@+,%0\n\t" /* add first word to sum */
59 "addxl %3,%0\n" /* add X bit */
61 /* unrolled loop for the main part: do 8 longs at once */
62 "movel %1,%3\n\t" /* save len in tmp1 */
63 "lsrl #5,%1\n\t" /* len/32 */
64 "jeq 2f\n\t" /* not enough... */
85 "addxl %4,%0\n\t" /* add X bit */
90 "movel %3,%1\n\t" /* restore len from tmp1 */
91 "andw #0x1c,%3\n\t" /* number of rest longs */
96 /* loop for rest longs */
101 "addxl %4,%0\n" /* add X bit */
103 /* now check for rest bytes that do not fit into longs */
106 "clrl %4\n\t" /* clear tmp2 for rest bytes */
109 "movew %2@+,%4\n\t" /* have rest >= 2: get word */
110 "swap %4\n\t" /* into bits 16..31 */
111 "tstw %1\n\t" /* another byte? */
114 "moveb %2@,%4\n\t" /* have odd rest: get byte */
115 "lslw #8,%4\n\t" /* into bits 8..15; 16..31 untouched */
117 "addl %4,%0\n\t" /* now add rest long to sum */
119 "addxl %4,%0\n" /* add X bit */
121 : "=d" (sum), "=d" (len), "=a" (buff),
122 "=&d" (tmp1), "=&d" (tmp2)
123 : "0" (sum), "1" (len), "2" (buff)
131 * copy from user space while checksumming, with exception handling.
135 csum_partial_copy_from_user(const char *src, char *dst, int len,
136 int sum, int *csum_err)
139 * GCC doesn't like more than 10 operands for the asm
140 * statements so we have to use tmp2 for the error
143 unsigned long tmp1, tmp2;
145 __asm__("movel %2,%4\n\t"
146 "btst #1,%4\n\t" /* Check alignment */
148 "subql #2,%1\n\t" /* buff%4==2: treat first word */
150 "addql #2,%1\n\t" /* len was == 2, treat only rest */
154 "movesw %2@+,%4\n\t" /* add first word to sum */
158 "addxl %4,%0\n" /* add X bit */
160 /* unrolled loop for the main part: do 8 longs at once */
161 "movel %1,%4\n\t" /* save len in tmp1 */
162 "lsrl #5,%1\n\t" /* len/32 */
163 "jeq 2f\n\t" /* not enough... */
200 "addxl %5,%0\n\t" /* add X bit */
205 "movel %4,%1\n\t" /* restore len from tmp1 */
206 "andw #0x1c,%4\n\t" /* number of rest longs */
211 /* loop for rest longs */
218 "addxl %5,%0\n" /* add X bit */
220 /* now check for rest bytes that do not fit into longs */
223 "clrl %5\n\t" /* clear tmp2 for rest bytes */
227 "movesw %2@+,%5\n\t" /* have rest >= 2: get word */
229 "swap %5\n\t" /* into bits 16..31 */
230 "tstw %1\n\t" /* another byte? */
234 "movesb %2@,%5\n\t" /* have odd rest: get byte */
236 "lslw #8,%5\n\t" /* into bits 8..15; 16..31 untouched */
238 "addl %5,%0\n\t" /* now add rest long to sum */
240 "addxl %5,%0\n\t" /* add X bit */
242 "clrl %5\n" /* no error - clear return value */
244 ".section .fixup,\"ax\"\n"
246 /* If any execption occurs zero out the rest.
247 Similarities with the code above are intentional :-) */
293 #define STR(X) STR1(X)
295 "moveq #-" STR(EFAULT) ",%5\n\t"
298 ".section __ex_table,\"a\"\n"
312 : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
313 "=&d" (tmp1), "=d" (tmp2)
314 : "0" (sum), "1" (len), "2" (src), "3" (dst)
323 * copy from kernel space while checksumming, otherwise like csum_partial
327 csum_partial_copy(const char *src, char *dst, int len, int sum)
329 unsigned long tmp1, tmp2;
330 __asm__("movel %2,%4\n\t"
331 "btst #1,%4\n\t" /* Check alignment */
333 "subql #2,%1\n\t" /* buff%4==2: treat first word */
335 "addql #2,%1\n\t" /* len was == 2, treat only rest */
338 "movew %2@+,%4\n\t" /* add first word to sum */
342 "addxl %4,%0\n" /* add X bit */
344 /* unrolled loop for the main part: do 8 longs at once */
345 "movel %1,%4\n\t" /* save len in tmp1 */
346 "lsrl #5,%1\n\t" /* len/32 */
347 "jeq 2f\n\t" /* not enough... */
376 "addxl %5,%0\n\t" /* add X bit */
381 "movel %4,%1\n\t" /* restore len from tmp1 */
382 "andw #0x1c,%4\n\t" /* number of rest longs */
387 /* loop for rest longs */
393 "addxl %5,%0\n" /* add X bit */
395 /* now check for rest bytes that do not fit into longs */
398 "clrl %5\n\t" /* clear tmp2 for rest bytes */
401 "movew %2@+,%5\n\t" /* have rest >= 2: get word */
403 "swap %5\n\t" /* into bits 16..31 */
404 "tstw %1\n\t" /* another byte? */
407 "moveb %2@,%5\n\t" /* have odd rest: get byte */
409 "lslw #8,%5\n" /* into bits 8..15; 16..31 untouched */
411 "addl %5,%0\n\t" /* now add rest long to sum */
413 "addxl %5,%0\n" /* add X bit */
415 : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
416 "=&d" (tmp1), "=&d" (tmp2)
417 : "0" (sum), "1" (len), "2" (src), "3" (dst)