1 /* 2 * 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * IP/TCP/UDP checksumming routines 8 * 9 * Authors: Jorge Cwik, <jorge@laser.satlink.net> 10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 11 * Tom May, <ftom@netcom.com> 12 * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> 13 * Lots of code moved from tcp.c and ip.c; see those files 14 * for more names. 15 * 16 * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: 17 * Fixed some nasty bugs, causing some horrible crashes. 18 * A: At some points, the sum (%0) was used as 19 * length-counter instead of the length counter 20 * (%1). Thanks to Roman Hodek for pointing this out. 21 * B: GCC seems to mess up if one uses too many 22 * data-registers to hold input values and one tries to 23 * specify d0 and d1 as scratch registers. Letting gcc 24 * choose these registers itself solves the problem. 25 * 26 * This program is free software; you can redistribute it and/or 27 * modify it under the terms of the GNU General Public License 28 * as published by the Free Software Foundation; either version 29 * 2 of the License, or (at your option) any later version. 30 */ 31 32 /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access 33 kills, so most of the assembly has to go. */ 34 35 #include <linux/export.h> 36 #include <net/checksum.h> 37 38 #include <asm/byteorder.h> 39 40 #ifndef do_csum 41 static inline unsigned short from32to16(unsigned int x) 42 { 43 /* add up 16-bit and 16-bit for 16+c bit */ 44 x = (x & 0xffff) + (x >> 16); 45 /* add up carry.. */ 46 x = (x & 0xffff) + (x >> 16); 47 return x; 48 } 49 50 static unsigned int do_csum(const unsigned char *buff, int len) 51 { 52 int odd; 53 unsigned int result = 0; 54 55 if (len <= 0) 56 goto out; 57 odd = 1 & (unsigned long) buff; 58 if (odd) { 59 #ifdef __LITTLE_ENDIAN 60 result += (*buff << 8); 61 #else 62 result = *buff; 63 #endif 64 len--; 65 buff++; 66 } 67 if (len >= 2) { 68 if (2 & (unsigned long) buff) { 69 result += *(unsigned short *) buff; 70 len -= 2; 71 buff += 2; 72 } 73 if (len >= 4) { 74 const unsigned char *end = buff + ((unsigned)len & ~3); 75 unsigned int carry = 0; 76 do { 77 unsigned int w = *(unsigned int *) buff; 78 buff += 4; 79 result += carry; 80 result += w; 81 carry = (w > result); 82 } while (buff < end); 83 result += carry; 84 result = (result & 0xffff) + (result >> 16); 85 } 86 if (len & 2) { 87 result += *(unsigned short *) buff; 88 buff += 2; 89 } 90 } 91 if (len & 1) 92 #ifdef __LITTLE_ENDIAN 93 result += *buff; 94 #else 95 result += (*buff << 8); 96 #endif 97 result = from32to16(result); 98 if (odd) 99 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); 100 out: 101 return result; 102 } 103 #endif 104 105 #ifndef ip_fast_csum 106 /* 107 * This is a version of ip_compute_csum() optimized for IP headers, 108 * which always checksum on 4 octet boundaries. 109 */ 110 __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 111 { 112 return (__force __sum16)~do_csum(iph, ihl*4); 113 } 114 EXPORT_SYMBOL(ip_fast_csum); 115 #endif 116 117 /* 118 * computes the checksum of a memory block at buff, length len, 119 * and adds in "sum" (32-bit) 120 * 121 * returns a 32-bit number suitable for feeding into itself 122 * or csum_tcpudp_magic 123 * 124 * this function must be called with even lengths, except 125 * for the last fragment, which may be odd 126 * 127 * it's best to have buff aligned on a 32-bit boundary 128 */ 129 __wsum csum_partial(const void *buff, int len, __wsum wsum) 130 { 131 unsigned int sum = (__force unsigned int)wsum; 132 unsigned int result = do_csum(buff, len); 133 134 /* add in old sum, and carry.. */ 135 result += sum; 136 if (sum > result) 137 result += 1; 138 return (__force __wsum)result; 139 } 140 EXPORT_SYMBOL(csum_partial); 141 142 /* 143 * this routine is used for miscellaneous IP-like checksums, mainly 144 * in icmp.c 145 */ 146 __sum16 ip_compute_csum(const void *buff, int len) 147 { 148 return (__force __sum16)~do_csum(buff, len); 149 } 150 EXPORT_SYMBOL(ip_compute_csum); 151 152 /* 153 * copy from fs while checksumming, otherwise like csum_partial 154 */ 155 __wsum 156 csum_partial_copy_from_user(const void __user *src, void *dst, int len, 157 __wsum sum, int *csum_err) 158 { 159 int missing; 160 161 missing = __copy_from_user(dst, src, len); 162 if (missing) { 163 memset(dst + len - missing, 0, missing); 164 *csum_err = -EFAULT; 165 } else 166 *csum_err = 0; 167 168 return csum_partial(dst, len, sum); 169 } 170 EXPORT_SYMBOL(csum_partial_copy_from_user); 171 172 /* 173 * copy from ds while checksumming, otherwise like csum_partial 174 */ 175 __wsum 176 csum_partial_copy(const void *src, void *dst, int len, __wsum sum) 177 { 178 memcpy(dst, src, len); 179 return csum_partial(dst, len, sum); 180 } 181 EXPORT_SYMBOL(csum_partial_copy); 182 183 #ifndef csum_tcpudp_nofold 184 static inline u32 from64to32(u64 x) 185 { 186 /* add up 32-bit and 32-bit for 32+c bit */ 187 x = (x & 0xffffffff) + (x >> 32); 188 /* add up carry.. */ 189 x = (x & 0xffffffff) + (x >> 32); 190 return (u32)x; 191 } 192 193 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 194 __u32 len, __u8 proto, __wsum sum) 195 { 196 unsigned long long s = (__force u32)sum; 197 198 s += (__force u32)saddr; 199 s += (__force u32)daddr; 200 #ifdef __BIG_ENDIAN 201 s += proto + len; 202 #else 203 s += (proto + len) << 8; 204 #endif 205 return (__force __wsum)from64to32(s); 206 } 207 EXPORT_SYMBOL(csum_tcpudp_nofold); 208 #endif 209