1*920be88eSRamon Fried /* SPDX-License-Identifier: GPL-2.0 */
2c609719bSwdenk #ifndef _LINUX_BYTEORDER_GENERIC_H
3c609719bSwdenk #define _LINUX_BYTEORDER_GENERIC_H
4c609719bSwdenk 
5c609719bSwdenk /*
6*920be88eSRamon Fried  * linux/byteorder/generic.h
7c609719bSwdenk  * Generic Byte-reordering support
8c609719bSwdenk  *
9*920be88eSRamon Fried  * The "... p" macros, like le64_to_cpup, can be used with pointers
10*920be88eSRamon Fried  * to unaligned data, but there will be a performance penalty on
11*920be88eSRamon Fried  * some architectures.  Use get_unaligned for unaligned data.
12*920be88eSRamon Fried  *
13c609719bSwdenk  * Francois-Rene Rideau <fare@tunes.org> 19970707
14c609719bSwdenk  *    gathered all the good ideas from all asm-foo/byteorder.h into one file,
15c609719bSwdenk  *    cleaned them up.
16c609719bSwdenk  *    I hope it is compliant with non-GCC compilers.
17c609719bSwdenk  *    I decided to put __BYTEORDER_HAS_U64__ in byteorder.h,
18c609719bSwdenk  *    because I wasn't sure it would be ok to put it in types.h
19c609719bSwdenk  *    Upgraded it to 2.1.43
20c609719bSwdenk  * Francois-Rene Rideau <fare@tunes.org> 19971012
21c609719bSwdenk  *    Upgraded it to 2.1.57
22c609719bSwdenk  *    to please Linus T., replaced huge #ifdef's between little/big endian
23c609719bSwdenk  *    by nestedly #include'd files.
24c609719bSwdenk  * Francois-Rene Rideau <fare@tunes.org> 19971205
25c609719bSwdenk  *    Made it to 2.1.71; now a facelift:
26c609719bSwdenk  *    Put files under include/linux/byteorder/
27c609719bSwdenk  *    Split swab from generic support.
28c609719bSwdenk  *
29c609719bSwdenk  * TODO:
30c609719bSwdenk  *   = Regular kernel maintainers could also replace all these manual
31c609719bSwdenk  *    byteswap macros that remain, disseminated among drivers,
32c609719bSwdenk  *    after some grep or the sources...
33c609719bSwdenk  *   = Linus might want to rename all these macros and files to fit his taste,
34c609719bSwdenk  *    to fit his personal naming scheme.
35c609719bSwdenk  *   = it seems that a few drivers would also appreciate
36c609719bSwdenk  *    nybble swapping support...
37c609719bSwdenk  *   = every architecture could add their byteswap macro in asm/byteorder.h
38c609719bSwdenk  *    see how some architectures already do (i386, alpha, ppc, etc)
39c609719bSwdenk  *   = cpu_to_beXX and beXX_to_cpu might some day need to be well
40c609719bSwdenk  *    distinguished throughout the kernel. This is not the case currently,
41c609719bSwdenk  *    since little endian, big endian, and pdp endian machines needn't it.
42c609719bSwdenk  *    But this might be the case for, say, a port of Linux to 20/21 bit
43c609719bSwdenk  *    architectures (and F21 Linux addict around?).
44c609719bSwdenk  */
45c609719bSwdenk 
46c609719bSwdenk /*
47c609719bSwdenk  * The following macros are to be defined by <asm/byteorder.h>:
48c609719bSwdenk  *
49c609719bSwdenk  * Conversion of long and short int between network and host format
50c609719bSwdenk  *	ntohl(__u32 x)
51c609719bSwdenk  *	ntohs(__u16 x)
52c609719bSwdenk  *	htonl(__u32 x)
53c609719bSwdenk  *	htons(__u16 x)
54c609719bSwdenk  * It seems that some programs (which? where? or perhaps a standard? POSIX?)
55c609719bSwdenk  * might like the above to be functions, not macros (why?).
56c609719bSwdenk  * if that's true, then detect them, and take measures.
57c609719bSwdenk  * Anyway, the measure is: define only ___ntohl as a macro instead,
58c609719bSwdenk  * and in a separate file, have
59c609719bSwdenk  * unsigned long inline ntohl(x){return ___ntohl(x);}
60c609719bSwdenk  *
61c609719bSwdenk  * The same for constant arguments
62c609719bSwdenk  *	__constant_ntohl(__u32 x)
63c609719bSwdenk  *	__constant_ntohs(__u16 x)
64c609719bSwdenk  *	__constant_htonl(__u32 x)
65c609719bSwdenk  *	__constant_htons(__u16 x)
66c609719bSwdenk  *
67c609719bSwdenk  * Conversion of XX-bit integers (16- 32- or 64-)
68c609719bSwdenk  * between native CPU format and little/big endian format
69c609719bSwdenk  * 64-bit stuff only defined for proper architectures
70c609719bSwdenk  *	cpu_to_[bl]eXX(__uXX x)
71c609719bSwdenk  *	[bl]eXX_to_cpu(__uXX x)
72c609719bSwdenk  *
73c609719bSwdenk  * The same, but takes a pointer to the value to convert
74c609719bSwdenk  *	cpu_to_[bl]eXXp(__uXX x)
75c609719bSwdenk  *	[bl]eXX_to_cpup(__uXX x)
76c609719bSwdenk  *
77c609719bSwdenk  * The same, but change in situ
78c609719bSwdenk  *	cpu_to_[bl]eXXs(__uXX x)
79c609719bSwdenk  *	[bl]eXX_to_cpus(__uXX x)
80c609719bSwdenk  *
81c609719bSwdenk  * See asm-foo/byteorder.h for examples of how to provide
82c609719bSwdenk  * architecture-optimized versions
83c609719bSwdenk  *
84c609719bSwdenk  */
85c609719bSwdenk 
86c609719bSwdenk #define cpu_to_le64 __cpu_to_le64
87c609719bSwdenk #define le64_to_cpu __le64_to_cpu
88c609719bSwdenk #define cpu_to_le32 __cpu_to_le32
89c609719bSwdenk #define le32_to_cpu __le32_to_cpu
90c609719bSwdenk #define cpu_to_le16 __cpu_to_le16
91c609719bSwdenk #define le16_to_cpu __le16_to_cpu
92c609719bSwdenk #define cpu_to_be64 __cpu_to_be64
93c609719bSwdenk #define be64_to_cpu __be64_to_cpu
94c609719bSwdenk #define cpu_to_be32 __cpu_to_be32
95c609719bSwdenk #define be32_to_cpu __be32_to_cpu
96c609719bSwdenk #define cpu_to_be16 __cpu_to_be16
97c609719bSwdenk #define be16_to_cpu __be16_to_cpu
98c609719bSwdenk #define cpu_to_le64p __cpu_to_le64p
99c609719bSwdenk #define le64_to_cpup __le64_to_cpup
100c609719bSwdenk #define cpu_to_le32p __cpu_to_le32p
101c609719bSwdenk #define le32_to_cpup __le32_to_cpup
102c609719bSwdenk #define cpu_to_le16p __cpu_to_le16p
103c609719bSwdenk #define le16_to_cpup __le16_to_cpup
104c609719bSwdenk #define cpu_to_be64p __cpu_to_be64p
105c609719bSwdenk #define be64_to_cpup __be64_to_cpup
106c609719bSwdenk #define cpu_to_be32p __cpu_to_be32p
107c609719bSwdenk #define be32_to_cpup __be32_to_cpup
108c609719bSwdenk #define cpu_to_be16p __cpu_to_be16p
109c609719bSwdenk #define be16_to_cpup __be16_to_cpup
110c609719bSwdenk #define cpu_to_le64s __cpu_to_le64s
111c609719bSwdenk #define le64_to_cpus __le64_to_cpus
112c609719bSwdenk #define cpu_to_le32s __cpu_to_le32s
113c609719bSwdenk #define le32_to_cpus __le32_to_cpus
114c609719bSwdenk #define cpu_to_le16s __cpu_to_le16s
115c609719bSwdenk #define le16_to_cpus __le16_to_cpus
116c609719bSwdenk #define cpu_to_be64s __cpu_to_be64s
117c609719bSwdenk #define be64_to_cpus __be64_to_cpus
118c609719bSwdenk #define cpu_to_be32s __cpu_to_be32s
119c609719bSwdenk #define be32_to_cpus __be32_to_cpus
120c609719bSwdenk #define cpu_to_be16s __cpu_to_be16s
121c609719bSwdenk #define be16_to_cpus __be16_to_cpus
122c609719bSwdenk 
123c609719bSwdenk /*
124c609719bSwdenk  * They have to be macros in order to do the constant folding
125c609719bSwdenk  * correctly - if the argument passed into a inline function
126c609719bSwdenk  * it is no longer constant according to gcc..
127c609719bSwdenk  */
128c609719bSwdenk 
129c609719bSwdenk #undef ntohl
130c609719bSwdenk #undef ntohs
131c609719bSwdenk #undef htonl
132c609719bSwdenk #undef htons
133c609719bSwdenk 
134c609719bSwdenk #define ___htonl(x) __cpu_to_be32(x)
135c609719bSwdenk #define ___htons(x) __cpu_to_be16(x)
136c609719bSwdenk #define ___ntohl(x) __be32_to_cpu(x)
137c609719bSwdenk #define ___ntohs(x) __be16_to_cpu(x)
138c609719bSwdenk 
139c609719bSwdenk #define htonl(x) ___htonl(x)
140c609719bSwdenk #define ntohl(x) ___ntohl(x)
141c609719bSwdenk #define htons(x) ___htons(x)
142c609719bSwdenk #define ntohs(x) ___ntohs(x)
143c609719bSwdenk 
le16_add_cpu(__le16 * var,u16 val)144*920be88eSRamon Fried static inline void le16_add_cpu(__le16 *var, u16 val)
145*920be88eSRamon Fried {
146*920be88eSRamon Fried 	*var = cpu_to_le16(le16_to_cpu(*var) + val);
147*920be88eSRamon Fried }
148c609719bSwdenk 
le32_add_cpu(__le32 * var,u32 val)149*920be88eSRamon Fried static inline void le32_add_cpu(__le32 *var, u32 val)
150*920be88eSRamon Fried {
151*920be88eSRamon Fried 	*var = cpu_to_le32(le32_to_cpu(*var) + val);
152*920be88eSRamon Fried }
153*920be88eSRamon Fried 
le64_add_cpu(__le64 * var,u64 val)154*920be88eSRamon Fried static inline void le64_add_cpu(__le64 *var, u64 val)
155*920be88eSRamon Fried {
156*920be88eSRamon Fried 	*var = cpu_to_le64(le64_to_cpu(*var) + val);
157*920be88eSRamon Fried }
158*920be88eSRamon Fried 
159*920be88eSRamon Fried /* XXX: this stuff can be optimized */
le32_to_cpu_array(u32 * buf,unsigned int words)160*920be88eSRamon Fried static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
161*920be88eSRamon Fried {
162*920be88eSRamon Fried 	while (words--) {
163*920be88eSRamon Fried 		__le32_to_cpus(buf);
164*920be88eSRamon Fried 		buf++;
165*920be88eSRamon Fried 	}
166*920be88eSRamon Fried }
167*920be88eSRamon Fried 
cpu_to_le32_array(u32 * buf,unsigned int words)168*920be88eSRamon Fried static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
169*920be88eSRamon Fried {
170*920be88eSRamon Fried 	while (words--) {
171*920be88eSRamon Fried 		__cpu_to_le32s(buf);
172*920be88eSRamon Fried 		buf++;
173*920be88eSRamon Fried 	}
174*920be88eSRamon Fried }
175*920be88eSRamon Fried 
be16_add_cpu(__be16 * var,u16 val)176*920be88eSRamon Fried static inline void be16_add_cpu(__be16 *var, u16 val)
177*920be88eSRamon Fried {
178*920be88eSRamon Fried 	*var = cpu_to_be16(be16_to_cpu(*var) + val);
179*920be88eSRamon Fried }
180*920be88eSRamon Fried 
be32_add_cpu(__be32 * var,u32 val)181*920be88eSRamon Fried static inline void be32_add_cpu(__be32 *var, u32 val)
182*920be88eSRamon Fried {
183*920be88eSRamon Fried 	*var = cpu_to_be32(be32_to_cpu(*var) + val);
184*920be88eSRamon Fried }
185*920be88eSRamon Fried 
be64_add_cpu(__be64 * var,u64 val)186*920be88eSRamon Fried static inline void be64_add_cpu(__be64 *var, u64 val)
187*920be88eSRamon Fried {
188*920be88eSRamon Fried 	*var = cpu_to_be64(be64_to_cpu(*var) + val);
189*920be88eSRamon Fried }
190*920be88eSRamon Fried 
cpu_to_be32_array(__be32 * dst,const u32 * src,size_t len)191*920be88eSRamon Fried static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
192*920be88eSRamon Fried {
193*920be88eSRamon Fried 	int i;
194*920be88eSRamon Fried 
195*920be88eSRamon Fried 	for (i = 0; i < len; i++)
196*920be88eSRamon Fried 		dst[i] = cpu_to_be32(src[i]);
197*920be88eSRamon Fried }
198*920be88eSRamon Fried 
be32_to_cpu_array(u32 * dst,const __be32 * src,size_t len)199*920be88eSRamon Fried static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len)
200*920be88eSRamon Fried {
201*920be88eSRamon Fried 	int i;
202*920be88eSRamon Fried 
203*920be88eSRamon Fried 	for (i = 0; i < len; i++)
204*920be88eSRamon Fried 		dst[i] = be32_to_cpu(src[i]);
205*920be88eSRamon Fried }
206c609719bSwdenk 
207c609719bSwdenk #endif /* _LINUX_BYTEORDER_GENERIC_H */
208