xref: /openbmc/linux/arch/s390/include/asm/checksum.h (revision 31b90347)
1 /*
2  *    S390 fast network checksum routines
3  *
4  *  S390 version
5  *    Copyright IBM Corp. 1999
6  *    Author(s): Ulrich Hild        (first version)
7  *               Martin Schwidefsky (heavily optimized CKSM version)
8  *               D.J. Barrow        (third attempt)
9  */
10 
11 #ifndef _S390_CHECKSUM_H
12 #define _S390_CHECKSUM_H
13 
14 #include <asm/uaccess.h>
15 
16 /*
17  * computes the checksum of a memory block at buff, length len,
18  * and adds in "sum" (32-bit)
19  *
20  * returns a 32-bit number suitable for feeding into itself
21  * or csum_tcpudp_magic
22  *
23  * this function must be called with even lengths, except
24  * for the last fragment, which may be odd
25  *
26  * it's best to have buff aligned on a 32-bit boundary
27  */
28 static inline __wsum
29 csum_partial(const void *buff, int len, __wsum sum)
30 {
31 	register unsigned long reg2 asm("2") = (unsigned long) buff;
32 	register unsigned long reg3 asm("3") = (unsigned long) len;
33 
34 	asm volatile(
35 		"0:	cksm	%0,%1\n"	/* do checksum on longs */
36 		"	jo	0b\n"
37 		: "+d" (sum), "+d" (reg2), "+d" (reg3) : : "cc", "memory");
38 	return sum;
39 }
40 
41 /*
42  * the same as csum_partial_copy, but copies from user space.
43  *
44  * here even more important to align src and dst on a 32-bit (or even
45  * better 64-bit) boundary
46  *
47  * Copy from userspace and compute checksum.  If we catch an exception
48  * then zero the rest of the buffer.
49  */
50 static inline __wsum
51 csum_partial_copy_from_user(const void __user *src, void *dst,
52                                           int len, __wsum sum,
53                                           int *err_ptr)
54 {
55 	int missing;
56 
57 	missing = copy_from_user(dst, src, len);
58 	if (missing) {
59 		memset(dst + len - missing, 0, missing);
60 		*err_ptr = -EFAULT;
61 	}
62 
63 	return csum_partial(dst, len, sum);
64 }
65 
66 
67 static inline __wsum
68 csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
69 {
70         memcpy(dst,src,len);
71 	return csum_partial(dst, len, sum);
72 }
73 
74 /*
75  *      Fold a partial checksum without adding pseudo headers
76  */
77 static inline __sum16 csum_fold(__wsum sum)
78 {
79 	u32 csum = (__force u32) sum;
80 
81 	csum += (csum >> 16) + (csum << 16);
82 	csum >>= 16;
83 	return (__force __sum16) ~csum;
84 }
85 
86 /*
87  *	This is a version of ip_compute_csum() optimized for IP headers,
88  *	which always checksum on 4 octet boundaries.
89  *
90  */
91 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
92 {
93 	return csum_fold(csum_partial(iph, ihl*4, 0));
94 }
95 
96 /*
97  * computes the checksum of the TCP/UDP pseudo-header
98  * returns a 32-bit checksum
99  */
100 static inline __wsum
101 csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
102                    unsigned short len, unsigned short proto,
103                    __wsum sum)
104 {
105 	__u32 csum = (__force __u32)sum;
106 
107 	csum += (__force __u32)saddr;
108 	if (csum < (__force __u32)saddr)
109 		csum++;
110 
111 	csum += (__force __u32)daddr;
112 	if (csum < (__force __u32)daddr)
113 		csum++;
114 
115 	csum += len + proto;
116 	if (csum < len + proto)
117 		csum++;
118 
119 	return (__force __wsum)csum;
120 }
121 
122 /*
123  * computes the checksum of the TCP/UDP pseudo-header
124  * returns a 16-bit checksum, already complemented
125  */
126 
127 static inline __sum16
128 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
129                   unsigned short len, unsigned short proto,
130                   __wsum sum)
131 {
132 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
133 }
134 
135 /*
136  * this routine is used for miscellaneous IP-like checksums, mainly
137  * in icmp.c
138  */
139 
140 static inline __sum16 ip_compute_csum(const void *buff, int len)
141 {
142 	return csum_fold(csum_partial(buff, len, 0));
143 }
144 
145 #endif /* _S390_CHECKSUM_H */
146 
147 
148