1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __SPARC64_CHECKSUM_H 3 #define __SPARC64_CHECKSUM_H 4 5 /* checksum.h: IP/UDP/TCP checksum routines on the V9. 6 * 7 * Copyright(C) 1995 Linus Torvalds 8 * Copyright(C) 1995 Miguel de Icaza 9 * Copyright(C) 1996 David S. Miller 10 * Copyright(C) 1996 Eddie C. Dost 11 * Copyright(C) 1997 Jakub Jelinek 12 * 13 * derived from: 14 * Alpha checksum c-code 15 * ix86 inline assembly 16 * RFC1071 Computing the Internet Checksum 17 */ 18 19 #include <linux/in6.h> 20 #include <linux/uaccess.h> 21 22 /* computes the checksum of a memory block at buff, length len, 23 * and adds in "sum" (32-bit) 24 * 25 * returns a 32-bit number suitable for feeding into itself 26 * or csum_tcpudp_magic 27 * 28 * this function must be called with even lengths, except 29 * for the last fragment, which may be odd 30 * 31 * it's best to have buff aligned on a 32-bit boundary 32 */ 33 __wsum csum_partial(const void * buff, int len, __wsum sum); 34 35 /* the same as csum_partial, but copies from user space while it 36 * checksums 37 * 38 * here even more important to align src and dst on a 32-bit (or even 39 * better 64-bit) boundary 40 */ 41 __wsum csum_partial_copy_nocheck(const void *src, void *dst, 42 int len, __wsum sum); 43 44 long __csum_partial_copy_from_user(const void __user *src, 45 void *dst, int len, 46 __wsum sum); 47 48 static inline __wsum 49 csum_and_copy_from_user(const void __user *src, 50 void *dst, int len, 51 __wsum sum, int *err) 52 { 53 long ret = __csum_partial_copy_from_user(src, dst, len, sum); 54 if (ret < 0) 55 *err = -EFAULT; 56 return (__force __wsum) ret; 57 } 58 59 /* 60 * Copy and checksum to user 61 */ 62 #define HAVE_CSUM_COPY_USER 63 long __csum_partial_copy_to_user(const void *src, 64 void __user *dst, int len, 65 __wsum sum); 66 67 static inline __wsum 68 csum_and_copy_to_user(const void *src, 69 void __user *dst, int len, 70 __wsum sum, int *err) 71 { 72 long ret = __csum_partial_copy_to_user(src, dst, len, sum); 73 if (ret < 0) 74 *err = -EFAULT; 75 return (__force __wsum) ret; 76 } 77 78 /* ihl is always 5 or greater, almost always is 5, and iph is word aligned 79 * the majority of the time. 80 */ 81 __sum16 ip_fast_csum(const void *iph, unsigned int ihl); 82 83 /* Fold a partial checksum without adding pseudo headers. */ 84 static inline __sum16 csum_fold(__wsum sum) 85 { 86 unsigned int tmp; 87 88 __asm__ __volatile__( 89 " addcc %0, %1, %1\n" 90 " srl %1, 16, %1\n" 91 " addc %1, %%g0, %1\n" 92 " xnor %%g0, %1, %0\n" 93 : "=&r" (sum), "=r" (tmp) 94 : "0" (sum), "1" ((__force u32)sum<<16) 95 : "cc"); 96 return (__force __sum16)sum; 97 } 98 99 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 100 __u32 len, __u8 proto, 101 __wsum sum) 102 { 103 __asm__ __volatile__( 104 " addcc %1, %0, %0\n" 105 " addccc %2, %0, %0\n" 106 " addccc %3, %0, %0\n" 107 " addc %0, %%g0, %0\n" 108 : "=r" (sum), "=r" (saddr) 109 : "r" (daddr), "r" (proto + len), "0" (sum), "1" (saddr) 110 : "cc"); 111 return sum; 112 } 113 114 /* 115 * computes the checksum of the TCP/UDP pseudo-header 116 * returns a 16-bit checksum, already complemented 117 */ 118 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, 119 __u32 len, __u8 proto, 120 __wsum sum) 121 { 122 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 123 } 124 125 #define _HAVE_ARCH_IPV6_CSUM 126 127 static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, 128 const struct in6_addr *daddr, 129 __u32 len, __u8 proto, __wsum sum) 130 { 131 __asm__ __volatile__ ( 132 " addcc %3, %4, %%g7\n" 133 " addccc %5, %%g7, %%g7\n" 134 " lduw [%2 + 0x0c], %%g2\n" 135 " lduw [%2 + 0x08], %%g3\n" 136 " addccc %%g2, %%g7, %%g7\n" 137 " lduw [%2 + 0x04], %%g2\n" 138 " addccc %%g3, %%g7, %%g7\n" 139 " lduw [%2 + 0x00], %%g3\n" 140 " addccc %%g2, %%g7, %%g7\n" 141 " lduw [%1 + 0x0c], %%g2\n" 142 " addccc %%g3, %%g7, %%g7\n" 143 " lduw [%1 + 0x08], %%g3\n" 144 " addccc %%g2, %%g7, %%g7\n" 145 " lduw [%1 + 0x04], %%g2\n" 146 " addccc %%g3, %%g7, %%g7\n" 147 " lduw [%1 + 0x00], %%g3\n" 148 " addccc %%g2, %%g7, %%g7\n" 149 " addccc %%g3, %%g7, %0\n" 150 " addc 0, %0, %0\n" 151 : "=&r" (sum) 152 : "r" (saddr), "r" (daddr), "r"(htonl(len)), 153 "r"(htonl(proto)), "r"(sum) 154 : "g2", "g3", "g7", "cc"); 155 156 return csum_fold(sum); 157 } 158 159 /* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */ 160 static inline __sum16 ip_compute_csum(const void *buff, int len) 161 { 162 return csum_fold(csum_partial(buff, len, 0)); 163 } 164 165 #define HAVE_ARCH_CSUM_ADD 166 static inline __wsum csum_add(__wsum csum, __wsum addend) 167 { 168 __asm__ __volatile__( 169 "addcc %0, %1, %0\n" 170 "addx %0, %%g0, %0" 171 : "=r" (csum) 172 : "r" (addend), "0" (csum)); 173 174 return csum; 175 } 176 177 #endif /* !(__SPARC64_CHECKSUM_H) */ 178