1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _PARISC_CHECKSUM_H 3 #define _PARISC_CHECKSUM_H 4 5 #include <linux/in6.h> 6 7 /* 8 * computes the checksum of a memory block at buff, length len, 9 * and adds in "sum" (32-bit) 10 * 11 * returns a 32-bit number suitable for feeding into itself 12 * or csum_tcpudp_magic 13 * 14 * this function must be called with even lengths, except 15 * for the last fragment, which may be odd 16 * 17 * it's best to have buff aligned on a 32-bit boundary 18 */ 19 extern __wsum csum_partial(const void *, int, __wsum); 20 21 /* 22 * The same as csum_partial, but copies from src while it checksums. 23 * 24 * Here even more important to align src and dst on a 32-bit (or even 25 * better 64-bit) boundary 26 */ 27 extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum); 28 29 /* 30 * this is a new version of the above that records errors it finds in *errp, 31 * but continues and zeros the rest of the buffer. 32 */ 33 extern __wsum csum_partial_copy_from_user(const void __user *src, 34 void *dst, int len, __wsum sum, int *errp); 35 36 /* 37 * Optimized for IP headers, which always checksum on 4 octet boundaries. 38 * 39 * Written by Randolph Chung <tausq@debian.org>, and then mucked with by 40 * LaMont Jones <lamont@debian.org> 41 */ 42 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 43 { 44 unsigned int sum; 45 46 __asm__ __volatile__ ( 47 " ldws,ma 4(%1), %0\n" 48 " addib,<= -4, %2, 2f\n" 49 "\n" 50 " ldws 4(%1), %%r20\n" 51 " ldws 8(%1), %%r21\n" 52 " add %0, %%r20, %0\n" 53 " ldws,ma 12(%1), %%r19\n" 54 " addc %0, %%r21, %0\n" 55 " addc %0, %%r19, %0\n" 56 "1: ldws,ma 4(%1), %%r19\n" 57 " addib,< 0, %2, 1b\n" 58 " addc %0, %%r19, %0\n" 59 "\n" 60 " extru %0, 31, 16, %%r20\n" 61 " extru %0, 15, 16, %%r21\n" 62 " addc %%r20, %%r21, %0\n" 63 " extru %0, 15, 16, %%r21\n" 64 " add %0, %%r21, %0\n" 65 " subi -1, %0, %0\n" 66 "2:\n" 67 : "=r" (sum), "=r" (iph), "=r" (ihl) 68 : "1" (iph), "2" (ihl) 69 : "r19", "r20", "r21", "memory"); 70 71 return (__force __sum16)sum; 72 } 73 74 /* 75 * Fold a partial checksum 76 */ 77 static inline __sum16 csum_fold(__wsum csum) 78 { 79 u32 sum = (__force u32)csum; 80 /* add the swapped two 16-bit halves of sum, 81 a possible carry from adding the two 16-bit halves, 82 will carry from the lower half into the upper half, 83 giving us the correct sum in the upper half. */ 84 sum += (sum << 16) + (sum >> 16); 85 return (__force __sum16)(~sum >> 16); 86 } 87 88 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 89 __u32 len, __u8 proto, 90 __wsum sum) 91 { 92 __asm__( 93 " add %1, %0, %0\n" 94 " addc %2, %0, %0\n" 95 " addc %3, %0, %0\n" 96 " addc %%r0, %0, %0\n" 97 : "=r" (sum) 98 : "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum)); 99 return sum; 100 } 101 102 /* 103 * computes the checksum of the TCP/UDP pseudo-header 104 * returns a 16-bit checksum, already complemented 105 */ 106 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, 107 __u32 len, __u8 proto, 108 __wsum sum) 109 { 110 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 111 } 112 113 /* 114 * this routine is used for miscellaneous IP-like checksums, mainly 115 * in icmp.c 116 */ 117 static inline __sum16 ip_compute_csum(const void *buf, int len) 118 { 119 return csum_fold (csum_partial(buf, len, 0)); 120 } 121 122 123 #define _HAVE_ARCH_IPV6_CSUM 124 static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, 125 const struct in6_addr *daddr, 126 __u32 len, __u8 proto, 127 __wsum sum) 128 { 129 __asm__ __volatile__ ( 130 131 #if BITS_PER_LONG > 32 132 133 /* 134 ** We can execute two loads and two adds per cycle on PA 8000. 135 ** But add insn's get serialized waiting for the carry bit. 136 ** Try to keep 4 registers with "live" values ahead of the ALU. 137 */ 138 139 " ldd,ma 8(%1), %%r19\n" /* get 1st saddr word */ 140 " ldd,ma 8(%2), %%r20\n" /* get 1st daddr word */ 141 " add %8, %3, %3\n"/* add 16-bit proto + len */ 142 " add %%r19, %0, %0\n" 143 " ldd,ma 8(%1), %%r21\n" /* 2cd saddr */ 144 " ldd,ma 8(%2), %%r22\n" /* 2cd daddr */ 145 " add,dc %%r20, %0, %0\n" 146 " add,dc %%r21, %0, %0\n" 147 " add,dc %%r22, %0, %0\n" 148 " add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */ 149 " extrd,u %0, 31, 32, %%r19\n" /* copy upper half down */ 150 " depdi 0, 31, 32, %0\n" /* clear upper half */ 151 " add %%r19, %0, %0\n" /* fold into 32-bits */ 152 " addc 0, %0, %0\n" /* add carry */ 153 154 #else 155 156 /* 157 ** For PA 1.x, the insn order doesn't matter as much. 158 ** Insn stream is serialized on the carry bit here too. 159 ** result from the previous operation (eg r0 + x) 160 */ 161 162 " ldw,ma 4(%1), %%r19\n" /* get 1st saddr word */ 163 " ldw,ma 4(%2), %%r20\n" /* get 1st daddr word */ 164 " add %8, %3, %3\n" /* add 16-bit proto + len */ 165 " add %%r19, %0, %0\n" 166 " ldw,ma 4(%1), %%r21\n" /* 2cd saddr */ 167 " addc %%r20, %0, %0\n" 168 " ldw,ma 4(%2), %%r22\n" /* 2cd daddr */ 169 " addc %%r21, %0, %0\n" 170 " ldw,ma 4(%1), %%r19\n" /* 3rd saddr */ 171 " addc %%r22, %0, %0\n" 172 " ldw,ma 4(%2), %%r20\n" /* 3rd daddr */ 173 " addc %%r19, %0, %0\n" 174 " ldw,ma 4(%1), %%r21\n" /* 4th saddr */ 175 " addc %%r20, %0, %0\n" 176 " ldw,ma 4(%2), %%r22\n" /* 4th daddr */ 177 " addc %%r21, %0, %0\n" 178 " addc %%r22, %0, %0\n" 179 " addc %3, %0, %0\n" /* fold in proto+len, catch carry */ 180 181 #endif 182 : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len) 183 : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto) 184 : "r19", "r20", "r21", "r22", "memory"); 185 return csum_fold(sum); 186 } 187 188 /* 189 * Copy and checksum to user 190 */ 191 #define HAVE_CSUM_COPY_USER 192 static __inline__ __wsum csum_and_copy_to_user(const void *src, 193 void __user *dst, 194 int len, __wsum sum, 195 int *err_ptr) 196 { 197 /* code stolen from include/asm-mips64 */ 198 sum = csum_partial(src, len, sum); 199 200 if (copy_to_user(dst, src, len)) { 201 *err_ptr = -EFAULT; 202 return (__force __wsum)-1; 203 } 204 205 return sum; 206 } 207 208 #endif 209 210