1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * S390 fast network checksum routines 4 * 5 * S390 version 6 * Copyright IBM Corp. 1999 7 * Author(s): Ulrich Hild (first version) 8 * Martin Schwidefsky (heavily optimized CKSM version) 9 * D.J. Barrow (third attempt) 10 */ 11 12 #ifndef _S390_CHECKSUM_H 13 #define _S390_CHECKSUM_H 14 15 #include <linux/uaccess.h> 16 17 /* 18 * computes the checksum of a memory block at buff, length len, 19 * and adds in "sum" (32-bit) 20 * 21 * returns a 32-bit number suitable for feeding into itself 22 * or csum_tcpudp_magic 23 * 24 * this function must be called with even lengths, except 25 * for the last fragment, which may be odd 26 * 27 * it's best to have buff aligned on a 32-bit boundary 28 */ 29 static inline __wsum 30 csum_partial(const void *buff, int len, __wsum sum) 31 { 32 register unsigned long reg2 asm("2") = (unsigned long) buff; 33 register unsigned long reg3 asm("3") = (unsigned long) len; 34 35 asm volatile( 36 "0: cksm %0,%1\n" /* do checksum on longs */ 37 " jo 0b\n" 38 : "+d" (sum), "+d" (reg2), "+d" (reg3) : : "cc", "memory"); 39 return sum; 40 } 41 42 /* 43 * the same as csum_partial_copy, but copies from user space. 44 * 45 * here even more important to align src and dst on a 32-bit (or even 46 * better 64-bit) boundary 47 * 48 * Copy from userspace and compute checksum. 49 */ 50 static inline __wsum 51 csum_partial_copy_from_user(const void __user *src, void *dst, 52 int len, __wsum sum, 53 int *err_ptr) 54 { 55 if (unlikely(copy_from_user(dst, src, len))) 56 *err_ptr = -EFAULT; 57 return csum_partial(dst, len, sum); 58 } 59 60 61 static inline __wsum 62 csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum) 63 { 64 memcpy(dst,src,len); 65 return csum_partial(dst, len, sum); 66 } 67 68 /* 69 * Fold a partial checksum without adding pseudo headers 70 */ 71 static inline __sum16 csum_fold(__wsum sum) 72 { 73 u32 csum = (__force u32) sum; 74 75 csum += (csum >> 16) + (csum << 16); 76 csum >>= 16; 77 return (__force __sum16) ~csum; 78 } 79 80 /* 81 * This is a version of ip_compute_csum() optimized for IP headers, 82 * which always checksum on 4 octet boundaries. 83 * 84 */ 85 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 86 { 87 return csum_fold(csum_partial(iph, ihl*4, 0)); 88 } 89 90 /* 91 * computes the checksum of the TCP/UDP pseudo-header 92 * returns a 32-bit checksum 93 */ 94 static inline __wsum 95 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, 96 __wsum sum) 97 { 98 __u32 csum = (__force __u32)sum; 99 100 csum += (__force __u32)saddr; 101 if (csum < (__force __u32)saddr) 102 csum++; 103 104 csum += (__force __u32)daddr; 105 if (csum < (__force __u32)daddr) 106 csum++; 107 108 csum += len + proto; 109 if (csum < len + proto) 110 csum++; 111 112 return (__force __wsum)csum; 113 } 114 115 /* 116 * computes the checksum of the TCP/UDP pseudo-header 117 * returns a 16-bit checksum, already complemented 118 */ 119 120 static inline __sum16 121 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, 122 __wsum sum) 123 { 124 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 125 } 126 127 /* 128 * this routine is used for miscellaneous IP-like checksums, mainly 129 * in icmp.c 130 */ 131 132 static inline __sum16 ip_compute_csum(const void *buff, int len) 133 { 134 return csum_fold(csum_partial(buff, len, 0)); 135 } 136 137 #endif /* _S390_CHECKSUM_H */ 138 139 140