xref: /openbmc/linux/arch/x86/lib/csum-partial_64.c (revision 85250a24)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * arch/x86_64/lib/csum-partial.c
4  *
5  * This file contains network checksum routines that are better done
6  * in an architecture-specific manner due to speed.
7  */
8 
9 #include <linux/compiler.h>
10 #include <linux/export.h>
11 #include <asm/checksum.h>
12 #include <asm/word-at-a-time.h>
13 
14 static inline unsigned short from32to16(unsigned a)
15 {
16 	unsigned short b = a >> 16;
17 	asm("addw %w2,%w0\n\t"
18 	    "adcw $0,%w0\n"
19 	    : "=r" (b)
20 	    : "0" (b), "r" (a));
21 	return b;
22 }
23 
24 /*
25  * Do a checksum on an arbitrary memory area.
26  * Returns a 32bit checksum.
27  *
28  * This isn't as time critical as it used to be because many NICs
29  * do hardware checksumming these days.
30  *
31  * Still, with CHECKSUM_COMPLETE this is called to compute
32  * checksums on IPv6 headers (40 bytes) and other small parts.
33  * it's best to have buff aligned on a 64-bit boundary
34  */
35 __wsum csum_partial(const void *buff, int len, __wsum sum)
36 {
37 	u64 temp64 = (__force u64)sum;
38 	unsigned odd, result;
39 
40 	odd = 1 & (unsigned long) buff;
41 	if (unlikely(odd)) {
42 		if (unlikely(len == 0))
43 			return sum;
44 		temp64 = ror32((__force u32)sum, 8);
45 		temp64 += (*(unsigned char *)buff << 8);
46 		len--;
47 		buff++;
48 	}
49 
50 	while (unlikely(len >= 64)) {
51 		asm("addq 0*8(%[src]),%[res]\n\t"
52 		    "adcq 1*8(%[src]),%[res]\n\t"
53 		    "adcq 2*8(%[src]),%[res]\n\t"
54 		    "adcq 3*8(%[src]),%[res]\n\t"
55 		    "adcq 4*8(%[src]),%[res]\n\t"
56 		    "adcq 5*8(%[src]),%[res]\n\t"
57 		    "adcq 6*8(%[src]),%[res]\n\t"
58 		    "adcq 7*8(%[src]),%[res]\n\t"
59 		    "adcq $0,%[res]"
60 		    : [res] "+r" (temp64)
61 		    : [src] "r" (buff)
62 		    : "memory");
63 		buff += 64;
64 		len -= 64;
65 	}
66 
67 	if (len & 32) {
68 		asm("addq 0*8(%[src]),%[res]\n\t"
69 		    "adcq 1*8(%[src]),%[res]\n\t"
70 		    "adcq 2*8(%[src]),%[res]\n\t"
71 		    "adcq 3*8(%[src]),%[res]\n\t"
72 		    "adcq $0,%[res]"
73 			: [res] "+r" (temp64)
74 			: [src] "r" (buff)
75 			: "memory");
76 		buff += 32;
77 	}
78 	if (len & 16) {
79 		asm("addq 0*8(%[src]),%[res]\n\t"
80 		    "adcq 1*8(%[src]),%[res]\n\t"
81 		    "adcq $0,%[res]"
82 			: [res] "+r" (temp64)
83 			: [src] "r" (buff)
84 			: "memory");
85 		buff += 16;
86 	}
87 	if (len & 8) {
88 		asm("addq 0*8(%[src]),%[res]\n\t"
89 		    "adcq $0,%[res]"
90 			: [res] "+r" (temp64)
91 			: [src] "r" (buff)
92 			: "memory");
93 		buff += 8;
94 	}
95 	if (len & 7) {
96 		unsigned int shift = (8 - (len & 7)) * 8;
97 		unsigned long trail;
98 
99 		trail = (load_unaligned_zeropad(buff) << shift) >> shift;
100 
101 		asm("addq %[trail],%[res]\n\t"
102 		    "adcq $0,%[res]"
103 			: [res] "+r" (temp64)
104 			: [trail] "r" (trail));
105 	}
106 	result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
107 	if (unlikely(odd)) {
108 		result = from32to16(result);
109 		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
110 	}
111 	return (__force __wsum)result;
112 }
113 EXPORT_SYMBOL(csum_partial);
114 
115 /*
116  * this routine is used for miscellaneous IP-like checksums, mainly
117  * in icmp.c
118  */
119 __sum16 ip_compute_csum(const void *buff, int len)
120 {
121 	return csum_fold(csum_partial(buff,len,0));
122 }
123 EXPORT_SYMBOL(ip_compute_csum);
124