1 #ifndef __SPARC64_CHECKSUM_H 2 #define __SPARC64_CHECKSUM_H 3 4 /* checksum.h: IP/UDP/TCP checksum routines on the V9. 5 * 6 * Copyright(C) 1995 Linus Torvalds 7 * Copyright(C) 1995 Miguel de Icaza 8 * Copyright(C) 1996 David S. Miller 9 * Copyright(C) 1996 Eddie C. Dost 10 * Copyright(C) 1997 Jakub Jelinek 11 * 12 * derived from: 13 * Alpha checksum c-code 14 * ix86 inline assembly 15 * RFC1071 Computing the Internet Checksum 16 */ 17 18 #include <linux/in6.h> 19 #include <asm/uaccess.h> 20 21 /* computes the checksum of a memory block at buff, length len, 22 * and adds in "sum" (32-bit) 23 * 24 * returns a 32-bit number suitable for feeding into itself 25 * or csum_tcpudp_magic 26 * 27 * this function must be called with even lengths, except 28 * for the last fragment, which may be odd 29 * 30 * it's best to have buff aligned on a 32-bit boundary 31 */ 32 __wsum csum_partial(const void * buff, int len, __wsum sum); 33 34 /* the same as csum_partial, but copies from user space while it 35 * checksums 36 * 37 * here even more important to align src and dst on a 32-bit (or even 38 * better 64-bit) boundary 39 */ 40 __wsum csum_partial_copy_nocheck(const void *src, void *dst, 41 int len, __wsum sum); 42 43 long __csum_partial_copy_from_user(const void __user *src, 44 void *dst, int len, 45 __wsum sum); 46 47 static inline __wsum 48 csum_partial_copy_from_user(const void __user *src, 49 void *dst, int len, 50 __wsum sum, int *err) 51 { 52 long ret = __csum_partial_copy_from_user(src, dst, len, sum); 53 if (ret < 0) 54 *err = -EFAULT; 55 return (__force __wsum) ret; 56 } 57 58 /* 59 * Copy and checksum to user 60 */ 61 #define HAVE_CSUM_COPY_USER 62 long __csum_partial_copy_to_user(const void *src, 63 void __user *dst, int len, 64 __wsum sum); 65 66 static inline __wsum 67 csum_and_copy_to_user(const void *src, 68 void __user *dst, int len, 69 __wsum sum, int *err) 70 { 71 long ret = __csum_partial_copy_to_user(src, dst, len, sum); 72 if (ret < 0) 73 *err = -EFAULT; 74 return (__force __wsum) ret; 75 } 76 77 /* ihl is always 5 or greater, almost always is 5, and iph is word aligned 78 * the majority of the time. 79 */ 80 __sum16 ip_fast_csum(const void *iph, unsigned int ihl); 81 82 /* Fold a partial checksum without adding pseudo headers. */ 83 static inline __sum16 csum_fold(__wsum sum) 84 { 85 unsigned int tmp; 86 87 __asm__ __volatile__( 88 " addcc %0, %1, %1\n" 89 " srl %1, 16, %1\n" 90 " addc %1, %%g0, %1\n" 91 " xnor %%g0, %1, %0\n" 92 : "=&r" (sum), "=r" (tmp) 93 : "0" (sum), "1" ((__force u32)sum<<16) 94 : "cc"); 95 return (__force __sum16)sum; 96 } 97 98 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 99 unsigned int len, 100 unsigned short proto, 101 __wsum sum) 102 { 103 __asm__ __volatile__( 104 " addcc %1, %0, %0\n" 105 " addccc %2, %0, %0\n" 106 " addccc %3, %0, %0\n" 107 " addc %0, %%g0, %0\n" 108 : "=r" (sum), "=r" (saddr) 109 : "r" (daddr), "r" (proto + len), "0" (sum), "1" (saddr) 110 : "cc"); 111 return sum; 112 } 113 114 /* 115 * computes the checksum of the TCP/UDP pseudo-header 116 * returns a 16-bit checksum, already complemented 117 */ 118 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, 119 unsigned short len, 120 unsigned short proto, 121 __wsum sum) 122 { 123 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 124 } 125 126 #define _HAVE_ARCH_IPV6_CSUM 127 128 static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, 129 const struct in6_addr *daddr, 130 __u32 len, unsigned short proto, 131 __wsum sum) 132 { 133 __asm__ __volatile__ ( 134 " addcc %3, %4, %%g7\n" 135 " addccc %5, %%g7, %%g7\n" 136 " lduw [%2 + 0x0c], %%g2\n" 137 " lduw [%2 + 0x08], %%g3\n" 138 " addccc %%g2, %%g7, %%g7\n" 139 " lduw [%2 + 0x04], %%g2\n" 140 " addccc %%g3, %%g7, %%g7\n" 141 " lduw [%2 + 0x00], %%g3\n" 142 " addccc %%g2, %%g7, %%g7\n" 143 " lduw [%1 + 0x0c], %%g2\n" 144 " addccc %%g3, %%g7, %%g7\n" 145 " lduw [%1 + 0x08], %%g3\n" 146 " addccc %%g2, %%g7, %%g7\n" 147 " lduw [%1 + 0x04], %%g2\n" 148 " addccc %%g3, %%g7, %%g7\n" 149 " lduw [%1 + 0x00], %%g3\n" 150 " addccc %%g2, %%g7, %%g7\n" 151 " addccc %%g3, %%g7, %0\n" 152 " addc 0, %0, %0\n" 153 : "=&r" (sum) 154 : "r" (saddr), "r" (daddr), "r"(htonl(len)), 155 "r"(htonl(proto)), "r"(sum) 156 : "g2", "g3", "g7", "cc"); 157 158 return csum_fold(sum); 159 } 160 161 /* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */ 162 static inline __sum16 ip_compute_csum(const void *buff, int len) 163 { 164 return csum_fold(csum_partial(buff, len, 0)); 165 } 166 167 #define HAVE_ARCH_CSUM_ADD 168 static inline __wsum csum_add(__wsum csum, __wsum addend) 169 { 170 __asm__ __volatile__( 171 "addcc %0, %1, %0\n" 172 "addx %0, %%g0, %0" 173 : "=r" (csum) 174 : "r" (addend), "0" (csum)); 175 176 return csum; 177 } 178 179 #endif /* !(__SPARC64_CHECKSUM_H) */ 180