1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1995, 96, 97, 98, 99, 2001 by Ralf Baechle 7 * Copyright (C) 1999 Silicon Graphics, Inc. 8 * Copyright (C) 2001 Thiemo Seufer. 9 * Copyright (C) 2002 Maciej W. Rozycki 10 * Copyright (C) 2014 Imagination Technologies Ltd. 11 */ 12 #ifndef _ASM_CHECKSUM_H 13 #define _ASM_CHECKSUM_H 14 15 #ifdef CONFIG_GENERIC_CSUM 16 #include <asm-generic/checksum.h> 17 #else 18 19 #include <linux/in6.h> 20 21 #include <linux/uaccess.h> 22 23 /* 24 * computes the checksum of a memory block at buff, length len, 25 * and adds in "sum" (32-bit) 26 * 27 * returns a 32-bit number suitable for feeding into itself 28 * or csum_tcpudp_magic 29 * 30 * this function must be called with even lengths, except 31 * for the last fragment, which may be odd 32 * 33 * it's best to have buff aligned on a 32-bit boundary 34 */ 35 __wsum csum_partial(const void *buff, int len, __wsum sum); 36 37 __wsum __csum_partial_copy_kernel(const void *src, void *dst, 38 int len, __wsum sum, int *err_ptr); 39 40 __wsum __csum_partial_copy_from_user(const void *src, void *dst, 41 int len, __wsum sum, int *err_ptr); 42 __wsum __csum_partial_copy_to_user(const void *src, void *dst, 43 int len, __wsum sum, int *err_ptr); 44 /* 45 * this is a new version of the above that records errors it finds in *errp, 46 * but continues and zeros the rest of the buffer. 47 */ 48 static inline 49 __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, 50 __wsum sum, int *err_ptr) 51 { 52 might_fault(); 53 if (uaccess_kernel()) 54 return __csum_partial_copy_kernel((__force void *)src, dst, 55 len, sum, err_ptr); 56 else 57 return __csum_partial_copy_from_user((__force void *)src, dst, 58 len, sum, err_ptr); 59 } 60 61 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 62 static inline 63 __wsum csum_and_copy_from_user(const void __user *src, void *dst, 64 int len, __wsum sum, int *err_ptr) 65 { 66 if (access_ok(VERIFY_READ, src, len)) 67 return csum_partial_copy_from_user(src, dst, len, sum, 68 err_ptr); 69 if (len) 70 *err_ptr = -EFAULT; 71 72 return sum; 73 } 74 75 /* 76 * Copy and checksum to user 77 */ 78 #define HAVE_CSUM_COPY_USER 79 static inline 80 __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, 81 __wsum sum, int *err_ptr) 82 { 83 might_fault(); 84 if (access_ok(VERIFY_WRITE, dst, len)) { 85 if (uaccess_kernel()) 86 return __csum_partial_copy_kernel(src, 87 (__force void *)dst, 88 len, sum, err_ptr); 89 else 90 return __csum_partial_copy_to_user(src, 91 (__force void *)dst, 92 len, sum, err_ptr); 93 } 94 if (len) 95 *err_ptr = -EFAULT; 96 97 return (__force __wsum)-1; /* invalid checksum */ 98 } 99 100 /* 101 * the same as csum_partial, but copies from user space (but on MIPS 102 * we have just one address space, so this is identical to the above) 103 */ 104 __wsum csum_partial_copy_nocheck(const void *src, void *dst, 105 int len, __wsum sum); 106 #define csum_partial_copy_nocheck csum_partial_copy_nocheck 107 108 /* 109 * Fold a partial checksum without adding pseudo headers 110 */ 111 static inline __sum16 csum_fold(__wsum csum) 112 { 113 u32 sum = (__force u32)csum;; 114 115 sum += (sum << 16); 116 csum = (sum < csum); 117 sum >>= 16; 118 sum += csum; 119 120 return (__force __sum16)~sum; 121 } 122 #define csum_fold csum_fold 123 124 /* 125 * This is a version of ip_compute_csum() optimized for IP headers, 126 * which always checksum on 4 octet boundaries. 127 * 128 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by 129 * Arnt Gulbrandsen. 130 */ 131 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 132 { 133 const unsigned int *word = iph; 134 const unsigned int *stop = word + ihl; 135 unsigned int csum; 136 int carry; 137 138 csum = word[0]; 139 csum += word[1]; 140 carry = (csum < word[1]); 141 csum += carry; 142 143 csum += word[2]; 144 carry = (csum < word[2]); 145 csum += carry; 146 147 csum += word[3]; 148 carry = (csum < word[3]); 149 csum += carry; 150 151 word += 4; 152 do { 153 csum += *word; 154 carry = (csum < *word); 155 csum += carry; 156 word++; 157 } while (word != stop); 158 159 return csum_fold(csum); 160 } 161 #define ip_fast_csum ip_fast_csum 162 163 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 164 __u32 len, __u8 proto, 165 __wsum sum) 166 { 167 __asm__( 168 " .set push # csum_tcpudp_nofold\n" 169 " .set noat \n" 170 #ifdef CONFIG_32BIT 171 " addu %0, %2 \n" 172 " sltu $1, %0, %2 \n" 173 " addu %0, $1 \n" 174 175 " addu %0, %3 \n" 176 " sltu $1, %0, %3 \n" 177 " addu %0, $1 \n" 178 179 " addu %0, %4 \n" 180 " sltu $1, %0, %4 \n" 181 " addu %0, $1 \n" 182 #endif 183 #ifdef CONFIG_64BIT 184 " daddu %0, %2 \n" 185 " daddu %0, %3 \n" 186 " daddu %0, %4 \n" 187 " dsll32 $1, %0, 0 \n" 188 " daddu %0, $1 \n" 189 " sltu $1, %0, $1 \n" 190 " dsra32 %0, %0, 0 \n" 191 " addu %0, $1 \n" 192 #endif 193 " .set pop" 194 : "=r" (sum) 195 : "0" ((__force unsigned long)daddr), 196 "r" ((__force unsigned long)saddr), 197 #ifdef __MIPSEL__ 198 "r" ((proto + len) << 8), 199 #else 200 "r" (proto + len), 201 #endif 202 "r" ((__force unsigned long)sum)); 203 204 return sum; 205 } 206 #define csum_tcpudp_nofold csum_tcpudp_nofold 207 208 /* 209 * this routine is used for miscellaneous IP-like checksums, mainly 210 * in icmp.c 211 */ 212 static inline __sum16 ip_compute_csum(const void *buff, int len) 213 { 214 return csum_fold(csum_partial(buff, len, 0)); 215 } 216 217 #define _HAVE_ARCH_IPV6_CSUM 218 static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, 219 const struct in6_addr *daddr, 220 __u32 len, __u8 proto, 221 __wsum sum) 222 { 223 __wsum tmp; 224 225 __asm__( 226 " .set push # csum_ipv6_magic\n" 227 " .set noreorder \n" 228 " .set noat \n" 229 " addu %0, %5 # proto (long in network byte order)\n" 230 " sltu $1, %0, %5 \n" 231 " addu %0, $1 \n" 232 233 " addu %0, %6 # csum\n" 234 " sltu $1, %0, %6 \n" 235 " lw %1, 0(%2) # four words source address\n" 236 " addu %0, $1 \n" 237 " addu %0, %1 \n" 238 " sltu $1, %0, %1 \n" 239 240 " lw %1, 4(%2) \n" 241 " addu %0, $1 \n" 242 " addu %0, %1 \n" 243 " sltu $1, %0, %1 \n" 244 245 " lw %1, 8(%2) \n" 246 " addu %0, $1 \n" 247 " addu %0, %1 \n" 248 " sltu $1, %0, %1 \n" 249 250 " lw %1, 12(%2) \n" 251 " addu %0, $1 \n" 252 " addu %0, %1 \n" 253 " sltu $1, %0, %1 \n" 254 255 " lw %1, 0(%3) \n" 256 " addu %0, $1 \n" 257 " addu %0, %1 \n" 258 " sltu $1, %0, %1 \n" 259 260 " lw %1, 4(%3) \n" 261 " addu %0, $1 \n" 262 " addu %0, %1 \n" 263 " sltu $1, %0, %1 \n" 264 265 " lw %1, 8(%3) \n" 266 " addu %0, $1 \n" 267 " addu %0, %1 \n" 268 " sltu $1, %0, %1 \n" 269 270 " lw %1, 12(%3) \n" 271 " addu %0, $1 \n" 272 " addu %0, %1 \n" 273 " sltu $1, %0, %1 \n" 274 275 " addu %0, $1 # Add final carry\n" 276 " .set pop" 277 : "=&r" (sum), "=&r" (tmp) 278 : "r" (saddr), "r" (daddr), 279 "0" (htonl(len)), "r" (htonl(proto)), "r" (sum)); 280 281 return csum_fold(sum); 282 } 283 284 #include <asm-generic/checksum.h> 285 #endif /* CONFIG_GENERIC_CSUM */ 286 287 #endif /* _ASM_CHECKSUM_H */ 288