1 /* 2 * Network checksum routines 3 * 4 * Copyright (C) 1999, 2003 Hewlett-Packard Co 5 * Stephane Eranian <eranian@hpl.hp.com> 6 * 7 * Most of the code coming from arch/alpha/lib/checksum.c 8 * 9 * This file contains network checksum routines that are better done 10 * in an architecture-specific manner due to speed.. 11 */ 12 13 #include <linux/module.h> 14 #include <linux/string.h> 15 16 #include <asm/byteorder.h> 17 18 static inline unsigned short 19 from64to16 (unsigned long x) 20 { 21 /* add up 32-bit words for 33 bits */ 22 x = (x & 0xffffffff) + (x >> 32); 23 /* add up 16-bit and 17-bit words for 17+c bits */ 24 x = (x & 0xffff) + (x >> 16); 25 /* add up 16-bit and 2-bit for 16+c bit */ 26 x = (x & 0xffff) + (x >> 16); 27 /* add up carry.. */ 28 x = (x & 0xffff) + (x >> 16); 29 return x; 30 } 31 32 /* 33 * computes the checksum of the TCP/UDP pseudo-header 34 * returns a 16-bit checksum, already complemented. 35 */ 36 __sum16 37 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, 38 __u8 proto, __wsum sum) 39 { 40 return (__force __sum16)~from64to16( 41 (__force u64)saddr + (__force u64)daddr + 42 (__force u64)sum + ((len + proto) << 8)); 43 } 44 45 EXPORT_SYMBOL(csum_tcpudp_magic); 46 47 __wsum 48 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, 49 __u8 proto, __wsum sum) 50 { 51 unsigned long result; 52 53 result = (__force u64)saddr + (__force u64)daddr + 54 (__force u64)sum + ((len + proto) << 8); 55 56 /* Fold down to 32-bits so we don't lose in the typedef-less network stack. */ 57 /* 64 to 33 */ 58 result = (result & 0xffffffff) + (result >> 32); 59 /* 33 to 32 */ 60 result = (result & 0xffffffff) + (result >> 32); 61 return (__force __wsum)result; 62 } 63 EXPORT_SYMBOL(csum_tcpudp_nofold); 64 65 extern unsigned long do_csum (const unsigned char *, long); 66 67 /* 68 * computes the checksum of a memory block at buff, length len, 69 * and adds in "sum" (32-bit) 70 * 71 * returns a 32-bit number suitable for feeding into itself 72 * or csum_tcpudp_magic 73 * 74 * this function must be called with even lengths, except 75 * for the last fragment, which may be odd 76 * 77 * it's best to have buff aligned on a 32-bit boundary 78 */ 79 __wsum csum_partial(const void *buff, int len, __wsum sum) 80 { 81 u64 result = do_csum(buff, len); 82 83 /* add in old sum, and carry.. */ 84 result += (__force u32)sum; 85 /* 32+c bits -> 32 bits */ 86 result = (result & 0xffffffff) + (result >> 32); 87 return (__force __wsum)result; 88 } 89 90 EXPORT_SYMBOL(csum_partial); 91 92 /* 93 * this routine is used for miscellaneous IP-like checksums, mainly 94 * in icmp.c 95 */ 96 __sum16 ip_compute_csum (const void *buff, int len) 97 { 98 return (__force __sum16)~do_csum(buff,len); 99 } 100 101 EXPORT_SYMBOL(ip_compute_csum); 102