xref: /openbmc/linux/arch/mips/include/asm/checksum.h (revision c819e2cf)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1995, 96, 97, 98, 99, 2001 by Ralf Baechle
7  * Copyright (C) 1999 Silicon Graphics, Inc.
8  * Copyright (C) 2001 Thiemo Seufer.
9  * Copyright (C) 2002 Maciej W. Rozycki
10  * Copyright (C) 2014 Imagination Technologies Ltd.
11  */
12 #ifndef _ASM_CHECKSUM_H
13 #define _ASM_CHECKSUM_H
14 
15 #include <linux/in6.h>
16 
17 #include <asm/uaccess.h>
18 
19 /*
20  * computes the checksum of a memory block at buff, length len,
21  * and adds in "sum" (32-bit)
22  *
23  * returns a 32-bit number suitable for feeding into itself
24  * or csum_tcpudp_magic
25  *
26  * this function must be called with even lengths, except
27  * for the last fragment, which may be odd
28  *
29  * it's best to have buff aligned on a 32-bit boundary
30  */
31 __wsum csum_partial(const void *buff, int len, __wsum sum);
32 
33 __wsum __csum_partial_copy_kernel(const void *src, void *dst,
34 				  int len, __wsum sum, int *err_ptr);
35 
36 __wsum __csum_partial_copy_from_user(const void *src, void *dst,
37 				     int len, __wsum sum, int *err_ptr);
38 __wsum __csum_partial_copy_to_user(const void *src, void *dst,
39 				   int len, __wsum sum, int *err_ptr);
40 /*
41  * this is a new version of the above that records errors it finds in *errp,
42  * but continues and zeros the rest of the buffer.
43  */
44 static inline
45 __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
46 				   __wsum sum, int *err_ptr)
47 {
48 	might_fault();
49 	if (segment_eq(get_fs(), get_ds()))
50 		return __csum_partial_copy_kernel((__force void *)src, dst,
51 						  len, sum, err_ptr);
52 	else
53 		return __csum_partial_copy_from_user((__force void *)src, dst,
54 						     len, sum, err_ptr);
55 }
56 
57 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
58 static inline
59 __wsum csum_and_copy_from_user(const void __user *src, void *dst,
60 			       int len, __wsum sum, int *err_ptr)
61 {
62 	if (access_ok(VERIFY_READ, src, len))
63 		return csum_partial_copy_from_user(src, dst, len, sum,
64 						   err_ptr);
65 	if (len)
66 		*err_ptr = -EFAULT;
67 
68 	return sum;
69 }
70 
71 /*
72  * Copy and checksum to user
73  */
74 #define HAVE_CSUM_COPY_USER
75 static inline
76 __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
77 			     __wsum sum, int *err_ptr)
78 {
79 	might_fault();
80 	if (access_ok(VERIFY_WRITE, dst, len)) {
81 		if (segment_eq(get_fs(), get_ds()))
82 			return __csum_partial_copy_kernel(src,
83 							  (__force void *)dst,
84 							  len, sum, err_ptr);
85 		else
86 			return __csum_partial_copy_to_user(src,
87 							   (__force void *)dst,
88 							   len, sum, err_ptr);
89 	}
90 	if (len)
91 		*err_ptr = -EFAULT;
92 
93 	return (__force __wsum)-1; /* invalid checksum */
94 }
95 
96 /*
97  * the same as csum_partial, but copies from user space (but on MIPS
98  * we have just one address space, so this is identical to the above)
99  */
100 __wsum csum_partial_copy_nocheck(const void *src, void *dst,
101 				       int len, __wsum sum);
102 
103 /*
104  *	Fold a partial checksum without adding pseudo headers
105  */
106 static inline __sum16 csum_fold(__wsum sum)
107 {
108 	__asm__(
109 	"	.set	push		# csum_fold\n"
110 	"	.set	noat		\n"
111 	"	sll	$1, %0, 16	\n"
112 	"	addu	%0, $1		\n"
113 	"	sltu	$1, %0, $1	\n"
114 	"	srl	%0, %0, 16	\n"
115 	"	addu	%0, $1		\n"
116 	"	xori	%0, 0xffff	\n"
117 	"	.set	pop"
118 	: "=r" (sum)
119 	: "0" (sum));
120 
121 	return (__force __sum16)sum;
122 }
123 
124 /*
125  *	This is a version of ip_compute_csum() optimized for IP headers,
126  *	which always checksum on 4 octet boundaries.
127  *
128  *	By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
129  *	Arnt Gulbrandsen.
130  */
131 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
132 {
133 	const unsigned int *word = iph;
134 	const unsigned int *stop = word + ihl;
135 	unsigned int csum;
136 	int carry;
137 
138 	csum = word[0];
139 	csum += word[1];
140 	carry = (csum < word[1]);
141 	csum += carry;
142 
143 	csum += word[2];
144 	carry = (csum < word[2]);
145 	csum += carry;
146 
147 	csum += word[3];
148 	carry = (csum < word[3]);
149 	csum += carry;
150 
151 	word += 4;
152 	do {
153 		csum += *word;
154 		carry = (csum < *word);
155 		csum += carry;
156 		word++;
157 	} while (word != stop);
158 
159 	return csum_fold(csum);
160 }
161 
162 static inline __wsum csum_tcpudp_nofold(__be32 saddr,
163 	__be32 daddr, unsigned short len, unsigned short proto,
164 	__wsum sum)
165 {
166 	__asm__(
167 	"	.set	push		# csum_tcpudp_nofold\n"
168 	"	.set	noat		\n"
169 #ifdef CONFIG_32BIT
170 	"	addu	%0, %2		\n"
171 	"	sltu	$1, %0, %2	\n"
172 	"	addu	%0, $1		\n"
173 
174 	"	addu	%0, %3		\n"
175 	"	sltu	$1, %0, %3	\n"
176 	"	addu	%0, $1		\n"
177 
178 	"	addu	%0, %4		\n"
179 	"	sltu	$1, %0, %4	\n"
180 	"	addu	%0, $1		\n"
181 #endif
182 #ifdef CONFIG_64BIT
183 	"	daddu	%0, %2		\n"
184 	"	daddu	%0, %3		\n"
185 	"	daddu	%0, %4		\n"
186 	"	dsll32	$1, %0, 0	\n"
187 	"	daddu	%0, $1		\n"
188 	"	dsra32	%0, %0, 0	\n"
189 #endif
190 	"	.set	pop"
191 	: "=r" (sum)
192 	: "0" ((__force unsigned long)daddr),
193 	  "r" ((__force unsigned long)saddr),
194 #ifdef __MIPSEL__
195 	  "r" ((proto + len) << 8),
196 #else
197 	  "r" (proto + len),
198 #endif
199 	  "r" ((__force unsigned long)sum));
200 
201 	return sum;
202 }
203 
204 /*
205  * computes the checksum of the TCP/UDP pseudo-header
206  * returns a 16-bit checksum, already complemented
207  */
208 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
209 						   unsigned short len,
210 						   unsigned short proto,
211 						   __wsum sum)
212 {
213 	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
214 }
215 
216 /*
217  * this routine is used for miscellaneous IP-like checksums, mainly
218  * in icmp.c
219  */
220 static inline __sum16 ip_compute_csum(const void *buff, int len)
221 {
222 	return csum_fold(csum_partial(buff, len, 0));
223 }
224 
225 #define _HAVE_ARCH_IPV6_CSUM
226 static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
227 					  const struct in6_addr *daddr,
228 					  __u32 len, unsigned short proto,
229 					  __wsum sum)
230 {
231 	__asm__(
232 	"	.set	push		# csum_ipv6_magic\n"
233 	"	.set	noreorder	\n"
234 	"	.set	noat		\n"
235 	"	addu	%0, %5		# proto (long in network byte order)\n"
236 	"	sltu	$1, %0, %5	\n"
237 	"	addu	%0, $1		\n"
238 
239 	"	addu	%0, %6		# csum\n"
240 	"	sltu	$1, %0, %6	\n"
241 	"	lw	%1, 0(%2)	# four words source address\n"
242 	"	addu	%0, $1		\n"
243 	"	addu	%0, %1		\n"
244 	"	sltu	$1, %0, %1	\n"
245 
246 	"	lw	%1, 4(%2)	\n"
247 	"	addu	%0, $1		\n"
248 	"	addu	%0, %1		\n"
249 	"	sltu	$1, %0, %1	\n"
250 
251 	"	lw	%1, 8(%2)	\n"
252 	"	addu	%0, $1		\n"
253 	"	addu	%0, %1		\n"
254 	"	sltu	$1, %0, %1	\n"
255 
256 	"	lw	%1, 12(%2)	\n"
257 	"	addu	%0, $1		\n"
258 	"	addu	%0, %1		\n"
259 	"	sltu	$1, %0, %1	\n"
260 
261 	"	lw	%1, 0(%3)	\n"
262 	"	addu	%0, $1		\n"
263 	"	addu	%0, %1		\n"
264 	"	sltu	$1, %0, %1	\n"
265 
266 	"	lw	%1, 4(%3)	\n"
267 	"	addu	%0, $1		\n"
268 	"	addu	%0, %1		\n"
269 	"	sltu	$1, %0, %1	\n"
270 
271 	"	lw	%1, 8(%3)	\n"
272 	"	addu	%0, $1		\n"
273 	"	addu	%0, %1		\n"
274 	"	sltu	$1, %0, %1	\n"
275 
276 	"	lw	%1, 12(%3)	\n"
277 	"	addu	%0, $1		\n"
278 	"	addu	%0, %1		\n"
279 	"	sltu	$1, %0, %1	\n"
280 
281 	"	addu	%0, $1		# Add final carry\n"
282 	"	.set	pop"
283 	: "=r" (sum), "=r" (proto)
284 	: "r" (saddr), "r" (daddr),
285 	  "0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
286 
287 	return csum_fold(sum);
288 }
289 
290 #endif /* _ASM_CHECKSUM_H */
291