xref: /openbmc/linux/arch/arc/include/uapi/asm/swab.h (revision e2be04c7)
1e2be04c7SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
28c2f4a8dSVineet Gupta /*
38c2f4a8dSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
48c2f4a8dSVineet Gupta  *
58c2f4a8dSVineet Gupta  * This program is free software; you can redistribute it and/or modify
68c2f4a8dSVineet Gupta  * it under the terms of the GNU General Public License version 2 as
78c2f4a8dSVineet Gupta  * published by the Free Software Foundation.
88c2f4a8dSVineet Gupta  *
98c2f4a8dSVineet Gupta  * vineetg: May 2011
108c2f4a8dSVineet Gupta  *  -Support single cycle endian-swap insn in ARC700 4.10
118c2f4a8dSVineet Gupta  *
128c2f4a8dSVineet Gupta  * vineetg: June 2009
138c2f4a8dSVineet Gupta  *  -Better htonl implementation (5 instead of 9 ALU instructions)
148c2f4a8dSVineet Gupta  *  -Hardware assisted single cycle bswap (Use Case of ARC custom instrn)
158c2f4a8dSVineet Gupta  */
168c2f4a8dSVineet Gupta 
178c2f4a8dSVineet Gupta #ifndef __ASM_ARC_SWAB_H
188c2f4a8dSVineet Gupta #define __ASM_ARC_SWAB_H
198c2f4a8dSVineet Gupta 
208c2f4a8dSVineet Gupta #include <linux/types.h>
218c2f4a8dSVineet Gupta 
228c2f4a8dSVineet Gupta /* Native single cycle endian swap insn */
238c2f4a8dSVineet Gupta #ifdef CONFIG_ARC_HAS_SWAPE
248c2f4a8dSVineet Gupta 
258c2f4a8dSVineet Gupta #define __arch_swab32(x)		\
268c2f4a8dSVineet Gupta ({					\
278c2f4a8dSVineet Gupta 	unsigned int tmp = x;		\
288c2f4a8dSVineet Gupta 	__asm__(			\
298c2f4a8dSVineet Gupta 	"	swape	%0, %1	\n"	\
308c2f4a8dSVineet Gupta 	: "=r" (tmp)			\
318c2f4a8dSVineet Gupta 	: "r" (tmp));			\
328c2f4a8dSVineet Gupta 	tmp;				\
338c2f4a8dSVineet Gupta })
348c2f4a8dSVineet Gupta 
358c2f4a8dSVineet Gupta #else
368c2f4a8dSVineet Gupta 
378c2f4a8dSVineet Gupta /* Several ways of Endian-Swap Emulation for ARC
388c2f4a8dSVineet Gupta  * 0: kernel generic
398c2f4a8dSVineet Gupta  * 1: ARC optimised "C"
408c2f4a8dSVineet Gupta  * 2: ARC Custom instruction
418c2f4a8dSVineet Gupta  */
428c2f4a8dSVineet Gupta #define ARC_BSWAP_TYPE	1
438c2f4a8dSVineet Gupta 
448c2f4a8dSVineet Gupta #if (ARC_BSWAP_TYPE == 1)		/******* Software only ********/
458c2f4a8dSVineet Gupta 
468c2f4a8dSVineet Gupta /* The kernel default implementation of htonl is
478c2f4a8dSVineet Gupta  *		return  x<<24 | x>>24 |
488c2f4a8dSVineet Gupta  *		 (x & (__u32)0x0000ff00UL)<<8 | (x & (__u32)0x00ff0000UL)>>8;
498c2f4a8dSVineet Gupta  *
508c2f4a8dSVineet Gupta  * This generates 9 instructions on ARC (excluding the ld/st)
518c2f4a8dSVineet Gupta  *
528c2f4a8dSVineet Gupta  * 8051fd8c:	ld     r3,[r7,20]	; Mem op : Get the value to be swapped
538c2f4a8dSVineet Gupta  * 8051fd98:	asl    r5,r3,24		; get  3rd Byte
548c2f4a8dSVineet Gupta  * 8051fd9c:	lsr    r2,r3,24		; get  0th Byte
558c2f4a8dSVineet Gupta  * 8051fda0:	and    r4,r3,0xff00
568c2f4a8dSVineet Gupta  * 8051fda8:	asl    r4,r4,8		; get 1st Byte
578c2f4a8dSVineet Gupta  * 8051fdac:	and    r3,r3,0x00ff0000
588c2f4a8dSVineet Gupta  * 8051fdb4:	or     r2,r2,r5		; combine 0th and 3rd Bytes
598c2f4a8dSVineet Gupta  * 8051fdb8:	lsr    r3,r3,8		; 2nd Byte at correct place in Dst Reg
608c2f4a8dSVineet Gupta  * 8051fdbc:	or     r2,r2,r4		; combine 0,3 Bytes with 1st Byte
618c2f4a8dSVineet Gupta  * 8051fdc0:	or     r2,r2,r3		; combine 0,3,1 Bytes with 2nd Byte
628c2f4a8dSVineet Gupta  * 8051fdc4:	st     r2,[r1,20]	; Mem op : save result back to mem
638c2f4a8dSVineet Gupta  *
648c2f4a8dSVineet Gupta  * Joern suggested a better "C" algorithm which is great since
658c2f4a8dSVineet Gupta  * (1) It is portable to any architecure
668c2f4a8dSVineet Gupta  * (2) At the same time it takes advantage of ARC ISA (rotate intrns)
678c2f4a8dSVineet Gupta  */
688c2f4a8dSVineet Gupta 
698c2f4a8dSVineet Gupta #define __arch_swab32(x)					\
708c2f4a8dSVineet Gupta ({	unsigned long __in = (x), __tmp;			\
718c2f4a8dSVineet Gupta 	__tmp = __in << 8 | __in >> 24; /* ror tmp,in,24 */	\
728c2f4a8dSVineet Gupta 	__in = __in << 24 | __in >> 8; /* ror in,in,8 */	\
738c2f4a8dSVineet Gupta 	__tmp ^= __in;						\
748c2f4a8dSVineet Gupta 	__tmp &= 0xff00ff;					\
758c2f4a8dSVineet Gupta 	__tmp ^ __in;						\
768c2f4a8dSVineet Gupta })
778c2f4a8dSVineet Gupta 
782547476aSAndrea Gelmini #elif (ARC_BSWAP_TYPE == 2)	/* Custom single cycle bswap instruction */
798c2f4a8dSVineet Gupta 
808c2f4a8dSVineet Gupta #define __arch_swab32(x)						\
818c2f4a8dSVineet Gupta ({									\
828c2f4a8dSVineet Gupta 	unsigned int tmp = x;						\
838c2f4a8dSVineet Gupta 	__asm__(							\
848c2f4a8dSVineet Gupta 	"	.extInstruction	bswap, 7, 0x00, SUFFIX_NONE, SYNTAX_2OP	\n"\
858c2f4a8dSVineet Gupta 	"	bswap  %0, %1						\n"\
868c2f4a8dSVineet Gupta 	: "=r" (tmp)							\
878c2f4a8dSVineet Gupta 	: "r" (tmp));							\
888c2f4a8dSVineet Gupta 	tmp;								\
898c2f4a8dSVineet Gupta })
908c2f4a8dSVineet Gupta 
918c2f4a8dSVineet Gupta #endif /* ARC_BSWAP_TYPE=zzz */
928c2f4a8dSVineet Gupta 
938c2f4a8dSVineet Gupta #endif /* CONFIG_ARC_HAS_SWAPE */
948c2f4a8dSVineet Gupta 
958c2f4a8dSVineet Gupta #if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
968c2f4a8dSVineet Gupta #define __SWAB_64_THRU_32__
978c2f4a8dSVineet Gupta #endif
988c2f4a8dSVineet Gupta 
998c2f4a8dSVineet Gupta #endif
100