1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _PARISC_CHECKSUM_H 3 #define _PARISC_CHECKSUM_H 4 5 #include <linux/in6.h> 6 7 /* 8 * computes the checksum of a memory block at buff, length len, 9 * and adds in "sum" (32-bit) 10 * 11 * returns a 32-bit number suitable for feeding into itself 12 * or csum_tcpudp_magic 13 * 14 * this function must be called with even lengths, except 15 * for the last fragment, which may be odd 16 * 17 * it's best to have buff aligned on a 32-bit boundary 18 */ 19 extern __wsum csum_partial(const void *, int, __wsum); 20 21 /* 22 * The same as csum_partial, but copies from src while it checksums. 23 * 24 * Here even more important to align src and dst on a 32-bit (or even 25 * better 64-bit) boundary 26 */ 27 extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum); 28 29 /* 30 * this is a new version of the above that records errors it finds in *errp, 31 * but continues and zeros the rest of the buffer. 32 */ 33 extern __wsum csum_partial_copy_from_user(const void __user *src, 34 void *dst, int len, __wsum sum, int *errp); 35 36 /* 37 * Optimized for IP headers, which always checksum on 4 octet boundaries. 38 * 39 * Written by Randolph Chung <tausq@debian.org>, and then mucked with by 40 * LaMont Jones <lamont@debian.org> 41 */ 42 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 43 { 44 unsigned int sum; 45 unsigned long t0, t1, t2; 46 47 __asm__ __volatile__ ( 48 " ldws,ma 4(%1), %0\n" 49 " addib,<= -4, %2, 2f\n" 50 "\n" 51 " ldws 4(%1), %4\n" 52 " ldws 8(%1), %5\n" 53 " add %0, %4, %0\n" 54 " ldws,ma 12(%1), %3\n" 55 " addc %0, %5, %0\n" 56 " addc %0, %3, %0\n" 57 "1: ldws,ma 4(%1), %3\n" 58 " addib,< 0, %2, 1b\n" 59 " addc %0, %3, %0\n" 60 "\n" 61 " extru %0, 31, 16, %4\n" 62 " extru %0, 15, 16, %5\n" 63 " addc %4, %5, %0\n" 64 " extru %0, 15, 16, %5\n" 65 " add %0, %5, %0\n" 66 " subi -1, %0, %0\n" 67 "2:\n" 68 : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (t0), "=r" (t1), "=r" (t2) 69 : "1" (iph), "2" (ihl) 70 : "memory"); 71 72 return (__force __sum16)sum; 73 } 74 75 /* 76 * Fold a partial checksum 77 */ 78 static inline __sum16 csum_fold(__wsum csum) 79 { 80 u32 sum = (__force u32)csum; 81 /* add the swapped two 16-bit halves of sum, 82 a possible carry from adding the two 16-bit halves, 83 will carry from the lower half into the upper half, 84 giving us the correct sum in the upper half. */ 85 sum += (sum << 16) + (sum >> 16); 86 return (__force __sum16)(~sum >> 16); 87 } 88 89 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 90 __u32 len, __u8 proto, 91 __wsum sum) 92 { 93 __asm__( 94 " add %1, %0, %0\n" 95 " addc %2, %0, %0\n" 96 " addc %3, %0, %0\n" 97 " addc %%r0, %0, %0\n" 98 : "=r" (sum) 99 : "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum)); 100 return sum; 101 } 102 103 /* 104 * computes the checksum of the TCP/UDP pseudo-header 105 * returns a 16-bit checksum, already complemented 106 */ 107 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, 108 __u32 len, __u8 proto, 109 __wsum sum) 110 { 111 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 112 } 113 114 /* 115 * this routine is used for miscellaneous IP-like checksums, mainly 116 * in icmp.c 117 */ 118 static inline __sum16 ip_compute_csum(const void *buf, int len) 119 { 120 return csum_fold (csum_partial(buf, len, 0)); 121 } 122 123 124 #define _HAVE_ARCH_IPV6_CSUM 125 static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, 126 const struct in6_addr *daddr, 127 __u32 len, __u8 proto, 128 __wsum sum) 129 { 130 unsigned long t0, t1, t2, t3; 131 132 len += proto; /* add 16-bit proto + len */ 133 134 __asm__ __volatile__ ( 135 136 #if BITS_PER_LONG > 32 137 138 /* 139 ** We can execute two loads and two adds per cycle on PA 8000. 140 ** But add insn's get serialized waiting for the carry bit. 141 ** Try to keep 4 registers with "live" values ahead of the ALU. 142 */ 143 144 " ldd,ma 8(%1), %4\n" /* get 1st saddr word */ 145 " ldd,ma 8(%2), %5\n" /* get 1st daddr word */ 146 " add %4, %0, %0\n" 147 " ldd,ma 8(%1), %6\n" /* 2nd saddr */ 148 " ldd,ma 8(%2), %7\n" /* 2nd daddr */ 149 " add,dc %5, %0, %0\n" 150 " add,dc %6, %0, %0\n" 151 " add,dc %7, %0, %0\n" 152 " add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */ 153 " extrd,u %0, 31, 32, %4\n"/* copy upper half down */ 154 " depdi 0, 31, 32, %0\n"/* clear upper half */ 155 " add %4, %0, %0\n" /* fold into 32-bits */ 156 " addc 0, %0, %0\n" /* add carry */ 157 158 #else 159 160 /* 161 ** For PA 1.x, the insn order doesn't matter as much. 162 ** Insn stream is serialized on the carry bit here too. 163 ** result from the previous operation (eg r0 + x) 164 */ 165 " ldw,ma 4(%1), %4\n" /* get 1st saddr word */ 166 " ldw,ma 4(%2), %5\n" /* get 1st daddr word */ 167 " add %4, %0, %0\n" 168 " ldw,ma 4(%1), %6\n" /* 2nd saddr */ 169 " addc %5, %0, %0\n" 170 " ldw,ma 4(%2), %7\n" /* 2nd daddr */ 171 " addc %6, %0, %0\n" 172 " ldw,ma 4(%1), %4\n" /* 3rd saddr */ 173 " addc %7, %0, %0\n" 174 " ldw,ma 4(%2), %5\n" /* 3rd daddr */ 175 " addc %4, %0, %0\n" 176 " ldw,ma 4(%1), %6\n" /* 4th saddr */ 177 " addc %5, %0, %0\n" 178 " ldw,ma 4(%2), %7\n" /* 4th daddr */ 179 " addc %6, %0, %0\n" 180 " addc %7, %0, %0\n" 181 " addc %3, %0, %0\n" /* fold in proto+len, catch carry */ 182 183 #endif 184 : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len), 185 "=r" (t0), "=r" (t1), "=r" (t2), "=r" (t3) 186 : "0" (sum), "1" (saddr), "2" (daddr), "3" (len) 187 : "memory"); 188 return csum_fold(sum); 189 } 190 191 /* 192 * Copy and checksum to user 193 */ 194 #define HAVE_CSUM_COPY_USER 195 static __inline__ __wsum csum_and_copy_to_user(const void *src, 196 void __user *dst, 197 int len, __wsum sum, 198 int *err_ptr) 199 { 200 /* code stolen from include/asm-mips64 */ 201 sum = csum_partial(src, len, sum); 202 203 if (copy_to_user(dst, src, len)) { 204 *err_ptr = -EFAULT; 205 return (__force __wsum)-1; 206 } 207 208 return sum; 209 } 210 211 #endif 212 213