xref: /openbmc/linux/arch/mips/include/asm/checksum.h (revision 12eb4683)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1995, 96, 97, 98, 99, 2001 by Ralf Baechle
7  * Copyright (C) 1999 Silicon Graphics, Inc.
8  * Copyright (C) 2001 Thiemo Seufer.
9  * Copyright (C) 2002 Maciej W. Rozycki
10  */
11 #ifndef _ASM_CHECKSUM_H
12 #define _ASM_CHECKSUM_H
13 
14 #include <linux/in6.h>
15 
16 #include <asm/uaccess.h>
17 
18 /*
19  * computes the checksum of a memory block at buff, length len,
20  * and adds in "sum" (32-bit)
21  *
22  * returns a 32-bit number suitable for feeding into itself
23  * or csum_tcpudp_magic
24  *
25  * this function must be called with even lengths, except
26  * for the last fragment, which may be odd
27  *
28  * it's best to have buff aligned on a 32-bit boundary
29  */
30 __wsum csum_partial(const void *buff, int len, __wsum sum);
31 
32 __wsum __csum_partial_copy_user(const void *src, void *dst,
33 				int len, __wsum sum, int *err_ptr);
34 
35 /*
36  * this is a new version of the above that records errors it finds in *errp,
37  * but continues and zeros the rest of the buffer.
38  */
39 static inline
40 __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
41 				   __wsum sum, int *err_ptr)
42 {
43 	might_fault();
44 	return __csum_partial_copy_user((__force void *)src, dst,
45 					len, sum, err_ptr);
46 }
47 
48 /*
49  * Copy and checksum to user
50  */
51 #define HAVE_CSUM_COPY_USER
52 static inline
53 __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
54 			     __wsum sum, int *err_ptr)
55 {
56 	might_fault();
57 	if (access_ok(VERIFY_WRITE, dst, len))
58 		return __csum_partial_copy_user(src, (__force void *)dst,
59 						len, sum, err_ptr);
60 	if (len)
61 		*err_ptr = -EFAULT;
62 
63 	return (__force __wsum)-1; /* invalid checksum */
64 }
65 
66 /*
67  * the same as csum_partial, but copies from user space (but on MIPS
68  * we have just one address space, so this is identical to the above)
69  */
70 __wsum csum_partial_copy_nocheck(const void *src, void *dst,
71 				       int len, __wsum sum);
72 
73 /*
74  *	Fold a partial checksum without adding pseudo headers
75  */
76 static inline __sum16 csum_fold(__wsum sum)
77 {
78 	__asm__(
79 	"	.set	push		# csum_fold\n"
80 	"	.set	noat		\n"
81 	"	sll	$1, %0, 16	\n"
82 	"	addu	%0, $1		\n"
83 	"	sltu	$1, %0, $1	\n"
84 	"	srl	%0, %0, 16	\n"
85 	"	addu	%0, $1		\n"
86 	"	xori	%0, 0xffff	\n"
87 	"	.set	pop"
88 	: "=r" (sum)
89 	: "0" (sum));
90 
91 	return (__force __sum16)sum;
92 }
93 
94 /*
95  *	This is a version of ip_compute_csum() optimized for IP headers,
96  *	which always checksum on 4 octet boundaries.
97  *
98  *	By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
99  *	Arnt Gulbrandsen.
100  */
101 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
102 {
103 	const unsigned int *word = iph;
104 	const unsigned int *stop = word + ihl;
105 	unsigned int csum;
106 	int carry;
107 
108 	csum = word[0];
109 	csum += word[1];
110 	carry = (csum < word[1]);
111 	csum += carry;
112 
113 	csum += word[2];
114 	carry = (csum < word[2]);
115 	csum += carry;
116 
117 	csum += word[3];
118 	carry = (csum < word[3]);
119 	csum += carry;
120 
121 	word += 4;
122 	do {
123 		csum += *word;
124 		carry = (csum < *word);
125 		csum += carry;
126 		word++;
127 	} while (word != stop);
128 
129 	return csum_fold(csum);
130 }
131 
132 static inline __wsum csum_tcpudp_nofold(__be32 saddr,
133 	__be32 daddr, unsigned short len, unsigned short proto,
134 	__wsum sum)
135 {
136 	__asm__(
137 	"	.set	push		# csum_tcpudp_nofold\n"
138 	"	.set	noat		\n"
139 #ifdef CONFIG_32BIT
140 	"	addu	%0, %2		\n"
141 	"	sltu	$1, %0, %2	\n"
142 	"	addu	%0, $1		\n"
143 
144 	"	addu	%0, %3		\n"
145 	"	sltu	$1, %0, %3	\n"
146 	"	addu	%0, $1		\n"
147 
148 	"	addu	%0, %4		\n"
149 	"	sltu	$1, %0, %4	\n"
150 	"	addu	%0, $1		\n"
151 #endif
152 #ifdef CONFIG_64BIT
153 	"	daddu	%0, %2		\n"
154 	"	daddu	%0, %3		\n"
155 	"	daddu	%0, %4		\n"
156 	"	dsll32	$1, %0, 0	\n"
157 	"	daddu	%0, $1		\n"
158 	"	dsra32	%0, %0, 0	\n"
159 #endif
160 	"	.set	pop"
161 	: "=r" (sum)
162 	: "0" ((__force unsigned long)daddr),
163 	  "r" ((__force unsigned long)saddr),
164 #ifdef __MIPSEL__
165 	  "r" ((proto + len) << 8),
166 #else
167 	  "r" (proto + len),
168 #endif
169 	  "r" ((__force unsigned long)sum));
170 
171 	return sum;
172 }
173 
174 /*
175  * computes the checksum of the TCP/UDP pseudo-header
176  * returns a 16-bit checksum, already complemented
177  */
178 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
179 						   unsigned short len,
180 						   unsigned short proto,
181 						   __wsum sum)
182 {
183 	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
184 }
185 
186 /*
187  * this routine is used for miscellaneous IP-like checksums, mainly
188  * in icmp.c
189  */
190 static inline __sum16 ip_compute_csum(const void *buff, int len)
191 {
192 	return csum_fold(csum_partial(buff, len, 0));
193 }
194 
195 #define _HAVE_ARCH_IPV6_CSUM
196 static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
197 					  const struct in6_addr *daddr,
198 					  __u32 len, unsigned short proto,
199 					  __wsum sum)
200 {
201 	__asm__(
202 	"	.set	push		# csum_ipv6_magic\n"
203 	"	.set	noreorder	\n"
204 	"	.set	noat		\n"
205 	"	addu	%0, %5		# proto (long in network byte order)\n"
206 	"	sltu	$1, %0, %5	\n"
207 	"	addu	%0, $1		\n"
208 
209 	"	addu	%0, %6		# csum\n"
210 	"	sltu	$1, %0, %6	\n"
211 	"	lw	%1, 0(%2)	# four words source address\n"
212 	"	addu	%0, $1		\n"
213 	"	addu	%0, %1		\n"
214 	"	sltu	$1, %0, %1	\n"
215 
216 	"	lw	%1, 4(%2)	\n"
217 	"	addu	%0, $1		\n"
218 	"	addu	%0, %1		\n"
219 	"	sltu	$1, %0, %1	\n"
220 
221 	"	lw	%1, 8(%2)	\n"
222 	"	addu	%0, $1		\n"
223 	"	addu	%0, %1		\n"
224 	"	sltu	$1, %0, %1	\n"
225 
226 	"	lw	%1, 12(%2)	\n"
227 	"	addu	%0, $1		\n"
228 	"	addu	%0, %1		\n"
229 	"	sltu	$1, %0, %1	\n"
230 
231 	"	lw	%1, 0(%3)	\n"
232 	"	addu	%0, $1		\n"
233 	"	addu	%0, %1		\n"
234 	"	sltu	$1, %0, %1	\n"
235 
236 	"	lw	%1, 4(%3)	\n"
237 	"	addu	%0, $1		\n"
238 	"	addu	%0, %1		\n"
239 	"	sltu	$1, %0, %1	\n"
240 
241 	"	lw	%1, 8(%3)	\n"
242 	"	addu	%0, $1		\n"
243 	"	addu	%0, %1		\n"
244 	"	sltu	$1, %0, %1	\n"
245 
246 	"	lw	%1, 12(%3)	\n"
247 	"	addu	%0, $1		\n"
248 	"	addu	%0, %1		\n"
249 	"	sltu	$1, %0, %1	\n"
250 
251 	"	addu	%0, $1		# Add final carry\n"
252 	"	.set	pop"
253 	: "=r" (sum), "=r" (proto)
254 	: "r" (saddr), "r" (daddr),
255 	  "0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
256 
257 	return csum_fold(sum);
258 }
259 
260 #endif /* _ASM_CHECKSUM_H */
261