1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __SPARC_CHECKSUM_H
3 #define __SPARC_CHECKSUM_H
4 
5 /*  checksum.h:  IP/UDP/TCP checksum routines on the Sparc.
6  *
7  *  Copyright(C) 1995 Linus Torvalds
8  *  Copyright(C) 1995 Miguel de Icaza
9  *  Copyright(C) 1996 David S. Miller
10  *  Copyright(C) 1996 Eddie C. Dost
11  *  Copyright(C) 1997 Jakub Jelinek
12  *
13  * derived from:
14  *	Alpha checksum c-code
15  *      ix86 inline assembly
16  *      RFC1071 Computing the Internet Checksum
17  */
18 
19 #include <linux/in6.h>
20 #include <linux/uaccess.h>
21 
22 /* computes the checksum of a memory block at buff, length len,
23  * and adds in "sum" (32-bit)
24  *
25  * returns a 32-bit number suitable for feeding into itself
26  * or csum_tcpudp_magic
27  *
28  * this function must be called with even lengths, except
29  * for the last fragment, which may be odd
30  *
31  * it's best to have buff aligned on a 32-bit boundary
32  */
33 __wsum csum_partial(const void *buff, int len, __wsum sum);
34 
35 /* the same as csum_partial, but copies from fs:src while it
36  * checksums
37  *
38  * here even more important to align src and dst on a 32-bit (or even
39  * better 64-bit) boundary
40  */
41 
42 unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
43 
44 static inline __wsum
45 csum_partial_copy_nocheck(const void *src, void *dst, int len)
46 {
47 	register unsigned int ret asm("o0") = (unsigned int)src;
48 	register char *d asm("o1") = dst;
49 	register int l asm("g1") = len;
50 
51 	__asm__ __volatile__ (
52 		"call __csum_partial_copy_sparc_generic\n\t"
53 		" mov -1, %%g7\n"
54 	: "=&r" (ret), "=&r" (d), "=&r" (l)
55 	: "0" (ret), "1" (d), "2" (l)
56 	: "o2", "o3", "o4", "o5", "o7",
57 	  "g2", "g3", "g4", "g5", "g7",
58 	  "memory", "cc");
59 	return (__force __wsum)ret;
60 }
61 
62 static inline __wsum
63 csum_and_copy_from_user(const void __user *src, void *dst, int len)
64 {
65 	if (unlikely(!access_ok(src, len)))
66 		return 0;
67 	return csum_partial_copy_nocheck((__force void *)src, dst, len);
68 }
69 
70 static inline __wsum
71 csum_and_copy_to_user(const void *src, void __user *dst, int len)
72 {
73 	if (!access_ok(dst, len))
74 		return 0;
75 	return csum_partial_copy_nocheck(src, (__force void *)dst, len);
76 }
77 
78 /* ihl is always 5 or greater, almost always is 5, and iph is word aligned
79  * the majority of the time.
80  */
81 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
82 {
83 	__sum16 sum;
84 
85 	/* Note: We must read %2 before we touch %0 for the first time,
86 	 *       because GCC can legitimately use the same register for
87 	 *       both operands.
88 	 */
89 	__asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
90 			     "ld\t[%1 + 0x00], %0\n\t"
91 			     "ld\t[%1 + 0x04], %%g2\n\t"
92 			     "ld\t[%1 + 0x08], %%g3\n\t"
93 			     "addcc\t%%g2, %0, %0\n\t"
94 			     "addxcc\t%%g3, %0, %0\n\t"
95 			     "ld\t[%1 + 0x0c], %%g2\n\t"
96 			     "ld\t[%1 + 0x10], %%g3\n\t"
97 			     "addxcc\t%%g2, %0, %0\n\t"
98 			     "addx\t%0, %%g0, %0\n"
99 			     "1:\taddcc\t%%g3, %0, %0\n\t"
100 			     "add\t%1, 4, %1\n\t"
101 			     "addxcc\t%0, %%g0, %0\n\t"
102 			     "subcc\t%%g4, 1, %%g4\n\t"
103 			     "be,a\t2f\n\t"
104 			     "sll\t%0, 16, %%g2\n\t"
105 			     "b\t1b\n\t"
106 			     "ld\t[%1 + 0x10], %%g3\n"
107 			     "2:\taddcc\t%0, %%g2, %%g2\n\t"
108 			     "srl\t%%g2, 16, %0\n\t"
109 			     "addx\t%0, %%g0, %0\n\t"
110 			     "xnor\t%%g0, %0, %0"
111 			     : "=r" (sum), "=&r" (iph)
112 			     : "r" (ihl), "1" (iph)
113 			     : "g2", "g3", "g4", "cc", "memory");
114 	return sum;
115 }
116 
117 /* Fold a partial checksum without adding pseudo headers. */
118 static inline __sum16 csum_fold(__wsum sum)
119 {
120 	unsigned int tmp;
121 
122 	__asm__ __volatile__("addcc\t%0, %1, %1\n\t"
123 			     "srl\t%1, 16, %1\n\t"
124 			     "addx\t%1, %%g0, %1\n\t"
125 			     "xnor\t%%g0, %1, %0"
126 			     : "=&r" (sum), "=r" (tmp)
127 			     : "0" (sum), "1" ((__force u32)sum<<16)
128 			     : "cc");
129 	return (__force __sum16)sum;
130 }
131 
132 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
133 					__u32 len, __u8 proto,
134 					__wsum sum)
135 {
136 	__asm__ __volatile__("addcc\t%1, %0, %0\n\t"
137 			     "addxcc\t%2, %0, %0\n\t"
138 			     "addxcc\t%3, %0, %0\n\t"
139 			     "addx\t%0, %%g0, %0\n\t"
140 			     : "=r" (sum), "=r" (saddr)
141 			     : "r" (daddr), "r" (proto + len), "0" (sum),
142 			       "1" (saddr)
143 			     : "cc");
144 	return sum;
145 }
146 
147 /*
148  * computes the checksum of the TCP/UDP pseudo-header
149  * returns a 16-bit checksum, already complemented
150  */
151 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
152 					__u32 len, __u8 proto,
153 					__wsum sum)
154 {
155 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
156 }
157 
158 #define _HAVE_ARCH_IPV6_CSUM
159 
160 static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
161 				      const struct in6_addr *daddr,
162 				      __u32 len, __u8 proto, __wsum sum)
163 {
164 	__asm__ __volatile__ (
165 		"addcc	%3, %4, %%g4\n\t"
166 		"addxcc	%5, %%g4, %%g4\n\t"
167 		"ld	[%2 + 0x0c], %%g2\n\t"
168 		"ld	[%2 + 0x08], %%g3\n\t"
169 		"addxcc	%%g2, %%g4, %%g4\n\t"
170 		"ld	[%2 + 0x04], %%g2\n\t"
171 		"addxcc	%%g3, %%g4, %%g4\n\t"
172 		"ld	[%2 + 0x00], %%g3\n\t"
173 		"addxcc	%%g2, %%g4, %%g4\n\t"
174 		"ld	[%1 + 0x0c], %%g2\n\t"
175 		"addxcc	%%g3, %%g4, %%g4\n\t"
176 		"ld	[%1 + 0x08], %%g3\n\t"
177 		"addxcc	%%g2, %%g4, %%g4\n\t"
178 		"ld	[%1 + 0x04], %%g2\n\t"
179 		"addxcc	%%g3, %%g4, %%g4\n\t"
180 		"ld	[%1 + 0x00], %%g3\n\t"
181 		"addxcc	%%g2, %%g4, %%g4\n\t"
182 		"addxcc	%%g3, %%g4, %0\n\t"
183 		"addx	0, %0, %0\n"
184 		: "=&r" (sum)
185 		: "r" (saddr), "r" (daddr),
186 		  "r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
187 		: "g2", "g3", "g4", "cc");
188 
189 	return csum_fold(sum);
190 }
191 
192 /* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
193 static inline __sum16 ip_compute_csum(const void *buff, int len)
194 {
195 	return csum_fold(csum_partial(buff, len, 0));
196 }
197 
198 #define HAVE_ARCH_CSUM_ADD
199 static inline __wsum csum_add(__wsum csum, __wsum addend)
200 {
201 	__asm__ __volatile__(
202 		"addcc   %0, %1, %0\n"
203 		"addx    %0, %%g0, %0"
204 		: "=r" (csum)
205 		: "r" (addend), "0" (csum));
206 
207 	return csum;
208 }
209 
210 #endif /* !(__SPARC_CHECKSUM_H) */
211