1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_CHECKSUM_H
3 #define _ASM_POWERPC_CHECKSUM_H
4 #ifdef __KERNEL__
5 
6 /*
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/in6.h>
11 /*
12  * Computes the checksum of a memory block at src, length len,
13  * and adds in "sum" (32-bit), while copying the block to dst.
14  * If an access exception occurs on src or dst, it stores -EFAULT
15  * to *src_err or *dst_err respectively (if that pointer is not
16  * NULL), and, for an error on src, zeroes the rest of dst.
17  *
18  * Like csum_partial, this must be called with even lengths,
19  * except for the last fragment.
20  */
21 extern __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
22 
23 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
24 extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
25 				      int len);
26 #define HAVE_CSUM_COPY_USER
27 extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
28 				    int len);
29 
30 #define _HAVE_ARCH_CSUM_AND_COPY
31 #define csum_partial_copy_nocheck(src, dst, len)   \
32         csum_partial_copy_generic((src), (dst), (len))
33 
34 
35 /*
36  * turns a 32-bit partial checksum (e.g. from csum_partial) into a
37  * 1's complement 16-bit checksum.
38  */
csum_fold(__wsum sum)39 static inline __sum16 csum_fold(__wsum sum)
40 {
41 	u32 tmp = (__force u32)sum;
42 
43 	/*
44 	 * swap the two 16-bit halves of sum
45 	 * if there is a carry from adding the two 16-bit halves,
46 	 * it will carry from the lower half into the upper half,
47 	 * giving us the correct sum in the upper half.
48 	 */
49 	return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16);
50 }
51 
from64to32(u64 x)52 static inline u32 from64to32(u64 x)
53 {
54 	return (x + ror64(x, 32)) >> 32;
55 }
56 
csum_tcpudp_nofold(__be32 saddr,__be32 daddr,__u32 len,__u8 proto,__wsum sum)57 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
58 					__u8 proto, __wsum sum)
59 {
60 #ifdef __powerpc64__
61 	u64 s = (__force u32)sum;
62 
63 	s += (__force u32)saddr;
64 	s += (__force u32)daddr;
65 #ifdef __BIG_ENDIAN__
66 	s += proto + len;
67 #else
68 	s += (proto + len) << 8;
69 #endif
70 	return (__force __wsum) from64to32(s);
71 #else
72     __asm__("\n\
73 	addc %0,%0,%1 \n\
74 	adde %0,%0,%2 \n\
75 	adde %0,%0,%3 \n\
76 	addze %0,%0 \n\
77 	"
78 	: "=r" (sum)
79 	: "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum));
80 	return sum;
81 #endif
82 }
83 
84 /*
85  * computes the checksum of the TCP/UDP pseudo-header
86  * returns a 16-bit checksum, already complemented
87  */
csum_tcpudp_magic(__be32 saddr,__be32 daddr,__u32 len,__u8 proto,__wsum sum)88 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
89 					__u8 proto, __wsum sum)
90 {
91 	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
92 }
93 
94 #define HAVE_ARCH_CSUM_ADD
csum_add(__wsum csum,__wsum addend)95 static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
96 {
97 #ifdef __powerpc64__
98 	u64 res = (__force u64)csum;
99 
100 	res += (__force u64)addend;
101 	return (__force __wsum)((u32)res + (res >> 32));
102 #else
103 	if (__builtin_constant_p(csum) && csum == 0)
104 		return addend;
105 	if (__builtin_constant_p(addend) && addend == 0)
106 		return csum;
107 
108 	asm("addc %0,%0,%1;"
109 	    "addze %0,%0;"
110 	    : "+r" (csum) : "r" (addend) : "xer");
111 	return csum;
112 #endif
113 }
114 
115 #define HAVE_ARCH_CSUM_SHIFT
csum_shift(__wsum sum,int offset)116 static __always_inline __wsum csum_shift(__wsum sum, int offset)
117 {
118 	/* rotate sum to align it with a 16b boundary */
119 	return (__force __wsum)rol32((__force u32)sum, (offset & 1) << 3);
120 }
121 
122 /*
123  * This is a version of ip_compute_csum() optimized for IP headers,
124  * which always checksum on 4 octet boundaries.  ihl is the number
125  * of 32-bit words and is always >= 5.
126  */
ip_fast_csum_nofold(const void * iph,unsigned int ihl)127 static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl)
128 {
129 	const u32 *ptr = (const u32 *)iph + 1;
130 #ifdef __powerpc64__
131 	unsigned int i;
132 	u64 s = *(const u32 *)iph;
133 
134 	for (i = 0; i < ihl - 1; i++, ptr++)
135 		s += *ptr;
136 	return (__force __wsum)from64to32(s);
137 #else
138 	__wsum sum, tmp;
139 
140 	asm("mtctr %3;"
141 	    "addc %0,%4,%5;"
142 	    "1: lwzu %1, 4(%2);"
143 	    "adde %0,%0,%1;"
144 	    "bdnz 1b;"
145 	    "addze %0,%0;"
146 	    : "=r" (sum), "=r" (tmp), "+b" (ptr)
147 	    : "r" (ihl - 2), "r" (*(const u32 *)iph), "r" (*ptr)
148 	    : "ctr", "xer", "memory");
149 
150 	return sum;
151 #endif
152 }
153 
ip_fast_csum(const void * iph,unsigned int ihl)154 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
155 {
156 	return csum_fold(ip_fast_csum_nofold(iph, ihl));
157 }
158 
159 /*
160  * computes the checksum of a memory block at buff, length len,
161  * and adds in "sum" (32-bit)
162  *
163  * returns a 32-bit number suitable for feeding into itself
164  * or csum_tcpudp_magic
165  *
166  * this function must be called with even lengths, except
167  * for the last fragment, which may be odd
168  *
169  * it's best to have buff aligned on a 32-bit boundary
170  */
171 __wsum __csum_partial(const void *buff, int len, __wsum sum);
172 
csum_partial(const void * buff,int len,__wsum sum)173 static __always_inline __wsum csum_partial(const void *buff, int len, __wsum sum)
174 {
175 	if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) {
176 		if (len == 2)
177 			sum = csum_add(sum, (__force __wsum)*(const u16 *)buff);
178 		if (len >= 4)
179 			sum = csum_add(sum, (__force __wsum)*(const u32 *)buff);
180 		if (len == 6)
181 			sum = csum_add(sum, (__force __wsum)
182 					    *(const u16 *)(buff + 4));
183 		if (len >= 8)
184 			sum = csum_add(sum, (__force __wsum)
185 					    *(const u32 *)(buff + 4));
186 		if (len == 10)
187 			sum = csum_add(sum, (__force __wsum)
188 					    *(const u16 *)(buff + 8));
189 		if (len >= 12)
190 			sum = csum_add(sum, (__force __wsum)
191 					    *(const u32 *)(buff + 8));
192 		if (len == 14)
193 			sum = csum_add(sum, (__force __wsum)
194 					    *(const u16 *)(buff + 12));
195 		if (len >= 16)
196 			sum = csum_add(sum, (__force __wsum)
197 					    *(const u32 *)(buff + 12));
198 	} else if (__builtin_constant_p(len) && (len & 3) == 0) {
199 		sum = csum_add(sum, ip_fast_csum_nofold(buff, len >> 2));
200 	} else {
201 		sum = __csum_partial(buff, len, sum);
202 	}
203 	return sum;
204 }
205 
206 /*
207  * this routine is used for miscellaneous IP-like checksums, mainly
208  * in icmp.c
209  */
ip_compute_csum(const void * buff,int len)210 static inline __sum16 ip_compute_csum(const void *buff, int len)
211 {
212 	return csum_fold(csum_partial(buff, len, 0));
213 }
214 
215 #define _HAVE_ARCH_IPV6_CSUM
216 __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
217 			const struct in6_addr *daddr,
218 			__u32 len, __u8 proto, __wsum sum);
219 
220 #endif /* __KERNEL__ */
221 #endif
222