1 #ifndef _ASM_POWERPC_CHECKSUM_H
2 #define _ASM_POWERPC_CHECKSUM_H
3 #ifdef __KERNEL__
4 
5 /*
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #ifdef CONFIG_GENERIC_CSUM
13 #include <asm-generic/checksum.h>
14 #else
15 /*
16  * Computes the checksum of a memory block at src, length len,
17  * and adds in "sum" (32-bit), while copying the block to dst.
18  * If an access exception occurs on src or dst, it stores -EFAULT
19  * to *src_err or *dst_err respectively (if that pointer is not
20  * NULL), and, for an error on src, zeroes the rest of dst.
21  *
22  * Like csum_partial, this must be called with even lengths,
23  * except for the last fragment.
24  */
25 extern __wsum csum_partial_copy_generic(const void *src, void *dst,
26 					      int len, __wsum sum,
27 					      int *src_err, int *dst_err);
28 
29 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
30 extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
31 				      int len, __wsum sum, int *err_ptr);
32 #define HAVE_CSUM_COPY_USER
33 extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
34 				    int len, __wsum sum, int *err_ptr);
35 
36 #define csum_partial_copy_nocheck(src, dst, len, sum)   \
37         csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
38 
39 
40 /*
41  * turns a 32-bit partial checksum (e.g. from csum_partial) into a
42  * 1's complement 16-bit checksum.
43  */
44 static inline __sum16 csum_fold(__wsum sum)
45 {
46 	unsigned int tmp;
47 
48 	/* swap the two 16-bit halves of sum */
49 	__asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
50 	/* if there is a carry from adding the two 16-bit halves,
51 	   it will carry from the lower half into the upper half,
52 	   giving us the correct sum in the upper half. */
53 	return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
54 }
55 
56 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
57                                      unsigned short len,
58                                      unsigned short proto,
59                                      __wsum sum)
60 {
61 #ifdef __powerpc64__
62 	unsigned long s = (__force u32)sum;
63 
64 	s += (__force u32)saddr;
65 	s += (__force u32)daddr;
66 	s += proto + len;
67 	s += (s >> 32);
68 	return (__force __wsum) s;
69 #else
70     __asm__("\n\
71 	addc %0,%0,%1 \n\
72 	adde %0,%0,%2 \n\
73 	adde %0,%0,%3 \n\
74 	addze %0,%0 \n\
75 	"
76 	: "=r" (sum)
77 	: "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum));
78 	return sum;
79 #endif
80 }
81 
82 /*
83  * computes the checksum of the TCP/UDP pseudo-header
84  * returns a 16-bit checksum, already complemented
85  */
86 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
87 					unsigned short len,
88 					unsigned short proto,
89 					__wsum sum)
90 {
91 	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
92 }
93 
94 #define HAVE_ARCH_CSUM_ADD
95 static inline __wsum csum_add(__wsum csum, __wsum addend)
96 {
97 #ifdef __powerpc64__
98 	u64 res = (__force u64)csum;
99 #endif
100 	if (__builtin_constant_p(csum) && csum == 0)
101 		return addend;
102 	if (__builtin_constant_p(addend) && addend == 0)
103 		return csum;
104 
105 #ifdef __powerpc64__
106 	res += (__force u64)addend;
107 	return (__force __wsum)((u32)res + (res >> 32));
108 #else
109 	asm("addc %0,%0,%1;"
110 	    "addze %0,%0;"
111 	    : "+r" (csum) : "r" (addend) : "xer");
112 	return csum;
113 #endif
114 }
115 
116 /*
117  * This is a version of ip_compute_csum() optimized for IP headers,
118  * which always checksum on 4 octet boundaries.  ihl is the number
119  * of 32-bit words and is always >= 5.
120  */
121 static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl)
122 {
123 	const u32 *ptr = (const u32 *)iph + 1;
124 #ifdef __powerpc64__
125 	unsigned int i;
126 	u64 s = *(const u32 *)iph;
127 
128 	for (i = 0; i < ihl - 1; i++, ptr++)
129 		s += *ptr;
130 	s += (s >> 32);
131 	return (__force __wsum)s;
132 #else
133 	__wsum sum, tmp;
134 
135 	asm("mtctr %3;"
136 	    "addc %0,%4,%5;"
137 	    "1: lwzu %1, 4(%2);"
138 	    "adde %0,%0,%1;"
139 	    "bdnz 1b;"
140 	    "addze %0,%0;"
141 	    : "=r" (sum), "=r" (tmp), "+b" (ptr)
142 	    : "r" (ihl - 2), "r" (*(const u32 *)iph), "r" (*ptr)
143 	    : "ctr", "xer", "memory");
144 
145 	return sum;
146 #endif
147 }
148 
149 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
150 {
151 	return csum_fold(ip_fast_csum_nofold(iph, ihl));
152 }
153 
154 /*
155  * computes the checksum of a memory block at buff, length len,
156  * and adds in "sum" (32-bit)
157  *
158  * returns a 32-bit number suitable for feeding into itself
159  * or csum_tcpudp_magic
160  *
161  * this function must be called with even lengths, except
162  * for the last fragment, which may be odd
163  *
164  * it's best to have buff aligned on a 32-bit boundary
165  */
166 __wsum __csum_partial(const void *buff, int len, __wsum sum);
167 
168 static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
169 {
170 	if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) {
171 		if (len == 2)
172 			sum = csum_add(sum, (__force __wsum)*(const u16 *)buff);
173 		if (len >= 4)
174 			sum = csum_add(sum, (__force __wsum)*(const u32 *)buff);
175 		if (len == 6)
176 			sum = csum_add(sum, (__force __wsum)
177 					    *(const u16 *)(buff + 4));
178 		if (len >= 8)
179 			sum = csum_add(sum, (__force __wsum)
180 					    *(const u32 *)(buff + 4));
181 		if (len == 10)
182 			sum = csum_add(sum, (__force __wsum)
183 					    *(const u16 *)(buff + 8));
184 		if (len >= 12)
185 			sum = csum_add(sum, (__force __wsum)
186 					    *(const u32 *)(buff + 8));
187 		if (len == 14)
188 			sum = csum_add(sum, (__force __wsum)
189 					    *(const u16 *)(buff + 12));
190 		if (len >= 16)
191 			sum = csum_add(sum, (__force __wsum)
192 					    *(const u32 *)(buff + 12));
193 	} else if (__builtin_constant_p(len) && (len & 3) == 0) {
194 		sum = csum_add(sum, ip_fast_csum_nofold(buff, len >> 2));
195 	} else {
196 		sum = __csum_partial(buff, len, sum);
197 	}
198 	return sum;
199 }
200 
201 /*
202  * this routine is used for miscellaneous IP-like checksums, mainly
203  * in icmp.c
204  */
205 static inline __sum16 ip_compute_csum(const void *buff, int len)
206 {
207 	return csum_fold(csum_partial(buff, len, 0));
208 }
209 
210 #endif
211 #endif /* __KERNEL__ */
212 #endif
213