xref: /openbmc/linux/arch/alpha/lib/checksum.c (revision 547c178b)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * arch/alpha/lib/checksum.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * This file contains network checksum routines that are better done
51da177e4SLinus Torvalds  * in an architecture-specific manner due to speed..
61da177e4SLinus Torvalds  * Comments in other versions indicate that the algorithms are from RFC1071
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * accellerated versions (and 21264 assembly versions ) contributed by
91da177e4SLinus Torvalds  *	Rick Gorton	<rick.gorton@alpha-processor.com>
101da177e4SLinus Torvalds  */
111da177e4SLinus Torvalds 
121da177e4SLinus Torvalds #include <linux/module.h>
131da177e4SLinus Torvalds #include <linux/string.h>
141da177e4SLinus Torvalds 
151da177e4SLinus Torvalds #include <asm/byteorder.h>
161da177e4SLinus Torvalds 
171da177e4SLinus Torvalds static inline unsigned short from64to16(unsigned long x)
181da177e4SLinus Torvalds {
191da177e4SLinus Torvalds 	/* Using extract instructions is a bit more efficient
201da177e4SLinus Torvalds 	   than the original shift/bitmask version.  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds 	union {
231da177e4SLinus Torvalds 		unsigned long	ul;
241da177e4SLinus Torvalds 		unsigned int	ui[2];
251da177e4SLinus Torvalds 		unsigned short	us[4];
261da177e4SLinus Torvalds 	} in_v, tmp_v, out_v;
271da177e4SLinus Torvalds 
281da177e4SLinus Torvalds 	in_v.ul = x;
291da177e4SLinus Torvalds 	tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1];
301da177e4SLinus Torvalds 
311da177e4SLinus Torvalds 	/* Since the bits of tmp_v.sh[3] are going to always be zero,
321da177e4SLinus Torvalds 	   we don't have to bother to add that in.  */
331da177e4SLinus Torvalds 	out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1]
341da177e4SLinus Torvalds 			+ (unsigned long) tmp_v.us[2];
351da177e4SLinus Torvalds 
361da177e4SLinus Torvalds 	/* Similarly, out_v.us[2] is always zero for the final add.  */
371da177e4SLinus Torvalds 	return out_v.us[0] + out_v.us[1];
381da177e4SLinus Torvalds }
391da177e4SLinus Torvalds 
401da177e4SLinus Torvalds /*
411da177e4SLinus Torvalds  * computes the checksum of the TCP/UDP pseudo-header
421da177e4SLinus Torvalds  * returns a 16-bit checksum, already complemented.
431da177e4SLinus Torvalds  */
449be259aaSAl Viro __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
451da177e4SLinus Torvalds 				   unsigned short len,
461da177e4SLinus Torvalds 				   unsigned short proto,
479be259aaSAl Viro 				   __wsum sum)
481da177e4SLinus Torvalds {
499be259aaSAl Viro 	return (__force __sum16)~from64to16(
509be259aaSAl Viro 		(__force u64)saddr + (__force u64)daddr +
519be259aaSAl Viro 		(__force u64)sum + ((len + proto) << 8));
521da177e4SLinus Torvalds }
531da177e4SLinus Torvalds 
549be259aaSAl Viro __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
551da177e4SLinus Torvalds 				   unsigned short len,
561da177e4SLinus Torvalds 				   unsigned short proto,
579be259aaSAl Viro 				   __wsum sum)
581da177e4SLinus Torvalds {
591da177e4SLinus Torvalds 	unsigned long result;
601da177e4SLinus Torvalds 
619be259aaSAl Viro 	result = (__force u64)saddr + (__force u64)daddr +
629be259aaSAl Viro 		 (__force u64)sum + ((len + proto) << 8);
631da177e4SLinus Torvalds 
641da177e4SLinus Torvalds 	/* Fold down to 32-bits so we don't lose in the typedef-less
651da177e4SLinus Torvalds 	   network stack.  */
661da177e4SLinus Torvalds 	/* 64 to 33 */
671da177e4SLinus Torvalds 	result = (result & 0xffffffff) + (result >> 32);
681da177e4SLinus Torvalds 	/* 33 to 32 */
691da177e4SLinus Torvalds 	result = (result & 0xffffffff) + (result >> 32);
709be259aaSAl Viro 	return (__force __wsum)result;
711da177e4SLinus Torvalds }
72547c178bSAl Viro EXPORT_SYMBOL(csum_tcpudp_nofold);
731da177e4SLinus Torvalds 
741da177e4SLinus Torvalds /*
751da177e4SLinus Torvalds  * Do a 64-bit checksum on an arbitrary memory area..
761da177e4SLinus Torvalds  *
771da177e4SLinus Torvalds  * This isn't a great routine, but it's not _horrible_ either. The
781da177e4SLinus Torvalds  * inner loop could be unrolled a bit further, and there are better
791da177e4SLinus Torvalds  * ways to do the carry, but this is reasonable.
801da177e4SLinus Torvalds  */
811da177e4SLinus Torvalds static inline unsigned long do_csum(const unsigned char * buff, int len)
821da177e4SLinus Torvalds {
831da177e4SLinus Torvalds 	int odd, count;
841da177e4SLinus Torvalds 	unsigned long result = 0;
851da177e4SLinus Torvalds 
861da177e4SLinus Torvalds 	if (len <= 0)
871da177e4SLinus Torvalds 		goto out;
881da177e4SLinus Torvalds 	odd = 1 & (unsigned long) buff;
891da177e4SLinus Torvalds 	if (odd) {
901da177e4SLinus Torvalds 		result = *buff << 8;
911da177e4SLinus Torvalds 		len--;
921da177e4SLinus Torvalds 		buff++;
931da177e4SLinus Torvalds 	}
941da177e4SLinus Torvalds 	count = len >> 1;		/* nr of 16-bit words.. */
951da177e4SLinus Torvalds 	if (count) {
961da177e4SLinus Torvalds 		if (2 & (unsigned long) buff) {
971da177e4SLinus Torvalds 			result += *(unsigned short *) buff;
981da177e4SLinus Torvalds 			count--;
991da177e4SLinus Torvalds 			len -= 2;
1001da177e4SLinus Torvalds 			buff += 2;
1011da177e4SLinus Torvalds 		}
1021da177e4SLinus Torvalds 		count >>= 1;		/* nr of 32-bit words.. */
1031da177e4SLinus Torvalds 		if (count) {
1041da177e4SLinus Torvalds 			if (4 & (unsigned long) buff) {
1051da177e4SLinus Torvalds 				result += *(unsigned int *) buff;
1061da177e4SLinus Torvalds 				count--;
1071da177e4SLinus Torvalds 				len -= 4;
1081da177e4SLinus Torvalds 				buff += 4;
1091da177e4SLinus Torvalds 			}
1101da177e4SLinus Torvalds 			count >>= 1;	/* nr of 64-bit words.. */
1111da177e4SLinus Torvalds 			if (count) {
1121da177e4SLinus Torvalds 				unsigned long carry = 0;
1131da177e4SLinus Torvalds 				do {
1141da177e4SLinus Torvalds 					unsigned long w = *(unsigned long *) buff;
1151da177e4SLinus Torvalds 					count--;
1161da177e4SLinus Torvalds 					buff += 8;
1171da177e4SLinus Torvalds 					result += carry;
1181da177e4SLinus Torvalds 					result += w;
1191da177e4SLinus Torvalds 					carry = (w > result);
1201da177e4SLinus Torvalds 				} while (count);
1211da177e4SLinus Torvalds 				result += carry;
1221da177e4SLinus Torvalds 				result = (result & 0xffffffff) + (result >> 32);
1231da177e4SLinus Torvalds 			}
1241da177e4SLinus Torvalds 			if (len & 4) {
1251da177e4SLinus Torvalds 				result += *(unsigned int *) buff;
1261da177e4SLinus Torvalds 				buff += 4;
1271da177e4SLinus Torvalds 			}
1281da177e4SLinus Torvalds 		}
1291da177e4SLinus Torvalds 		if (len & 2) {
1301da177e4SLinus Torvalds 			result += *(unsigned short *) buff;
1311da177e4SLinus Torvalds 			buff += 2;
1321da177e4SLinus Torvalds 		}
1331da177e4SLinus Torvalds 	}
1341da177e4SLinus Torvalds 	if (len & 1)
1351da177e4SLinus Torvalds 		result += *buff;
1361da177e4SLinus Torvalds 	result = from64to16(result);
1371da177e4SLinus Torvalds 	if (odd)
1381da177e4SLinus Torvalds 		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
1391da177e4SLinus Torvalds out:
1401da177e4SLinus Torvalds 	return result;
1411da177e4SLinus Torvalds }
1421da177e4SLinus Torvalds 
1431da177e4SLinus Torvalds /*
1441da177e4SLinus Torvalds  *	This is a version of ip_compute_csum() optimized for IP headers,
1451da177e4SLinus Torvalds  *	which always checksum on 4 octet boundaries.
1461da177e4SLinus Torvalds  */
1479be259aaSAl Viro __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
1481da177e4SLinus Torvalds {
1499be259aaSAl Viro 	return (__force __sum16)~do_csum(iph,ihl*4);
1501da177e4SLinus Torvalds }
1511da177e4SLinus Torvalds 
1521da177e4SLinus Torvalds /*
1531da177e4SLinus Torvalds  * computes the checksum of a memory block at buff, length len,
1541da177e4SLinus Torvalds  * and adds in "sum" (32-bit)
1551da177e4SLinus Torvalds  *
1561da177e4SLinus Torvalds  * returns a 32-bit number suitable for feeding into itself
1571da177e4SLinus Torvalds  * or csum_tcpudp_magic
1581da177e4SLinus Torvalds  *
1591da177e4SLinus Torvalds  * this function must be called with even lengths, except
1601da177e4SLinus Torvalds  * for the last fragment, which may be odd
1611da177e4SLinus Torvalds  *
1621da177e4SLinus Torvalds  * it's best to have buff aligned on a 32-bit boundary
1631da177e4SLinus Torvalds  */
1649be259aaSAl Viro __wsum csum_partial(const void *buff, int len, __wsum sum)
1651da177e4SLinus Torvalds {
1661da177e4SLinus Torvalds 	unsigned long result = do_csum(buff, len);
1671da177e4SLinus Torvalds 
1681da177e4SLinus Torvalds 	/* add in old sum, and carry.. */
1699be259aaSAl Viro 	result += (__force u32)sum;
1701da177e4SLinus Torvalds 	/* 32+c bits -> 32 bits */
1711da177e4SLinus Torvalds 	result = (result & 0xffffffff) + (result >> 32);
1729be259aaSAl Viro 	return (__force __wsum)result;
1731da177e4SLinus Torvalds }
1741da177e4SLinus Torvalds 
1751da177e4SLinus Torvalds EXPORT_SYMBOL(csum_partial);
1761da177e4SLinus Torvalds 
1771da177e4SLinus Torvalds /*
1781da177e4SLinus Torvalds  * this routine is used for miscellaneous IP-like checksums, mainly
1791da177e4SLinus Torvalds  * in icmp.c
1801da177e4SLinus Torvalds  */
1819be259aaSAl Viro __sum16 ip_compute_csum(const void *buff, int len)
1821da177e4SLinus Torvalds {
1839be259aaSAl Viro 	return (__force __sum16)~from64to16(do_csum(buff,len));
1841da177e4SLinus Torvalds }
185