xref: /openbmc/linux/arch/arc/include/asm/checksum.h (revision 762f99f4f3cb41a775b5157dd761217beba65873)
1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2ca15c8ecSVineet Gupta /*
3ca15c8ecSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4ca15c8ecSVineet Gupta  *
5ca15c8ecSVineet Gupta  * Joern Rennecke  <joern.rennecke@embecosm.com>: Jan 2012
6ca15c8ecSVineet Gupta  *  -Insn Scheduling improvements to csum core routines.
7ca15c8ecSVineet Gupta  *      = csum_fold( ) largely derived from ARM version.
8ca15c8ecSVineet Gupta  *      = ip_fast_cum( ) to have module scheduling
9ca15c8ecSVineet Gupta  *  -gcc 4.4.x broke networking. Alias analysis needed to be primed.
10ca15c8ecSVineet Gupta  *   worked around by adding memory clobber to ip_fast_csum( )
11ca15c8ecSVineet Gupta  *
12ca15c8ecSVineet Gupta  * vineetg: May 2010
13ca15c8ecSVineet Gupta  *  -Rewrote ip_fast_cscum( ) and csum_fold( ) with fast inline asm
14ca15c8ecSVineet Gupta  */
15ca15c8ecSVineet Gupta 
16ca15c8ecSVineet Gupta #ifndef _ASM_ARC_CHECKSUM_H
17ca15c8ecSVineet Gupta #define _ASM_ARC_CHECKSUM_H
18ca15c8ecSVineet Gupta 
19ca15c8ecSVineet Gupta /*
20ca15c8ecSVineet Gupta  *	Fold a partial checksum
21ca15c8ecSVineet Gupta  *
22ca15c8ecSVineet Gupta  *  The 2 swords comprising the 32bit sum are added, any carry to 16th bit
23ca15c8ecSVineet Gupta  *  added back and final sword result inverted.
24ca15c8ecSVineet Gupta  */
csum_fold(__wsum s)25ca15c8ecSVineet Gupta static inline __sum16 csum_fold(__wsum s)
26ca15c8ecSVineet Gupta {
27*d4067395SJinchao Wang 	unsigned int r = s << 16 | s >> 16;	/* ror */
28ca15c8ecSVineet Gupta 	s = ~s;
29ca15c8ecSVineet Gupta 	s -= r;
30ca15c8ecSVineet Gupta 	return s >> 16;
31ca15c8ecSVineet Gupta }
32ca15c8ecSVineet Gupta 
33ca15c8ecSVineet Gupta /*
34ca15c8ecSVineet Gupta  *	This is a version of ip_compute_csum() optimized for IP headers,
35ca15c8ecSVineet Gupta  *	which always checksum on 4 octet boundaries.
36ca15c8ecSVineet Gupta  */
37ca15c8ecSVineet Gupta static inline __sum16
ip_fast_csum(const void * iph,unsigned int ihl)38ca15c8ecSVineet Gupta ip_fast_csum(const void *iph, unsigned int ihl)
39ca15c8ecSVineet Gupta {
40ca15c8ecSVineet Gupta 	const void *ptr = iph;
41ca15c8ecSVineet Gupta 	unsigned int tmp, tmp2, sum;
42ca15c8ecSVineet Gupta 
43ca15c8ecSVineet Gupta 	__asm__(
44ca15c8ecSVineet Gupta 	"	ld.ab  %0, [%3, 4]		\n"
45ca15c8ecSVineet Gupta 	"	ld.ab  %2, [%3, 4]		\n"
46ca15c8ecSVineet Gupta 	"	sub    %1, %4, 2		\n"
47ca15c8ecSVineet Gupta 	"	lsr.f  lp_count, %1, 1		\n"
48ca15c8ecSVineet Gupta 	"	bcc    0f			\n"
49ca15c8ecSVineet Gupta 	"	add.f  %0, %0, %2		\n"
50ca15c8ecSVineet Gupta 	"	ld.ab  %2, [%3, 4]		\n"
51ca15c8ecSVineet Gupta 	"0:	lp     1f			\n"
52ca15c8ecSVineet Gupta 	"	ld.ab  %1, [%3, 4]		\n"
53ca15c8ecSVineet Gupta 	"	adc.f  %0, %0, %2		\n"
54ca15c8ecSVineet Gupta 	"	ld.ab  %2, [%3, 4]		\n"
55ca15c8ecSVineet Gupta 	"	adc.f  %0, %0, %1		\n"
56ca15c8ecSVineet Gupta 	"1:	adc.f  %0, %0, %2		\n"
57ca15c8ecSVineet Gupta 	"	add.cs %0,%0,1			\n"
58ca15c8ecSVineet Gupta 	: "=&r"(sum), "=r"(tmp), "=&r"(tmp2), "+&r" (ptr)
59ca15c8ecSVineet Gupta 	: "r"(ihl)
60ca15c8ecSVineet Gupta 	: "cc", "lp_count", "memory");
61ca15c8ecSVineet Gupta 
62ca15c8ecSVineet Gupta 	return csum_fold(sum);
63ca15c8ecSVineet Gupta }
64ca15c8ecSVineet Gupta 
65ca15c8ecSVineet Gupta /*
66ca15c8ecSVineet Gupta  * TCP pseudo Header is 12 bytes:
67ca15c8ecSVineet Gupta  * SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2]
68ca15c8ecSVineet Gupta  */
69ca15c8ecSVineet Gupta static inline __wsum
csum_tcpudp_nofold(__be32 saddr,__be32 daddr,__u32 len,__u8 proto,__wsum sum)7001cfbad7SAlexander Duyck csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
7101cfbad7SAlexander Duyck 		   __u8 proto, __wsum sum)
72ca15c8ecSVineet Gupta {
73ca15c8ecSVineet Gupta 	__asm__ __volatile__(
74ca15c8ecSVineet Gupta 	"	add.f %0, %0, %1	\n"
75ca15c8ecSVineet Gupta 	"	adc.f %0, %0, %2	\n"
76ca15c8ecSVineet Gupta 	"	adc.f %0, %0, %3	\n"
77ca15c8ecSVineet Gupta 	"	adc.f %0, %0, %4	\n"
78ca15c8ecSVineet Gupta 	"	adc   %0, %0, 0		\n"
79ca15c8ecSVineet Gupta 	: "+&r"(sum)
80ca15c8ecSVineet Gupta 	: "r"(saddr), "r"(daddr),
81ca15c8ecSVineet Gupta #ifdef CONFIG_CPU_BIG_ENDIAN
82ca15c8ecSVineet Gupta 	  "r"(len),
83ca15c8ecSVineet Gupta #else
84ca15c8ecSVineet Gupta 	  "r"(len << 8),
85ca15c8ecSVineet Gupta #endif
86ca15c8ecSVineet Gupta 	  "r"(htons(proto))
87ca15c8ecSVineet Gupta 	: "cc");
88ca15c8ecSVineet Gupta 
89ca15c8ecSVineet Gupta 	return sum;
90ca15c8ecSVineet Gupta }
91ca15c8ecSVineet Gupta 
92ca15c8ecSVineet Gupta #define csum_fold csum_fold
93ca15c8ecSVineet Gupta #define ip_fast_csum ip_fast_csum
94ca15c8ecSVineet Gupta #define csum_tcpudp_nofold csum_tcpudp_nofold
95ca15c8ecSVineet Gupta 
96ca15c8ecSVineet Gupta #include <asm-generic/checksum.h>
97ca15c8ecSVineet Gupta 
98ca15c8ecSVineet Gupta #endif /* _ASM_ARC_CHECKSUM_H */
99