1 #ifndef BSWAP_H
2 #define BSWAP_H
3
4 #undef bswap16
5 #define bswap16(_x) __builtin_bswap16(_x)
6 #undef bswap32
7 #define bswap32(_x) __builtin_bswap32(_x)
8 #undef bswap64
9 #define bswap64(_x) __builtin_bswap64(_x)
10
bswap24(uint32_t x)11 static inline uint32_t bswap24(uint32_t x)
12 {
13 return (((x & 0x000000ffU) << 16) |
14 ((x & 0x0000ff00U) << 0) |
15 ((x & 0x00ff0000U) >> 16));
16 }
17
bswap16s(uint16_t * s)18 static inline void bswap16s(uint16_t *s)
19 {
20 *s = __builtin_bswap16(*s);
21 }
22
bswap24s(uint32_t * s)23 static inline void bswap24s(uint32_t *s)
24 {
25 *s = bswap24(*s & 0x00ffffffU);
26 }
27
bswap32s(uint32_t * s)28 static inline void bswap32s(uint32_t *s)
29 {
30 *s = __builtin_bswap32(*s);
31 }
32
bswap64s(uint64_t * s)33 static inline void bswap64s(uint64_t *s)
34 {
35 *s = __builtin_bswap64(*s);
36 }
37
38 #if HOST_BIG_ENDIAN
39 #define be_bswap(v, size) (v)
40 #define le_bswap(v, size) glue(__builtin_bswap, size)(v)
41 #define be_bswap24(v) (v)
42 #define le_bswap24(v) bswap24(v)
43 #define be_bswaps(v, size)
44 #define le_bswaps(p, size) \
45 do { *p = glue(__builtin_bswap, size)(*p); } while (0)
46 #else
47 #define le_bswap(v, size) (v)
48 #define be_bswap24(v) bswap24(v)
49 #define le_bswap24(v) (v)
50 #define be_bswap(v, size) glue(__builtin_bswap, size)(v)
51 #define le_bswaps(v, size)
52 #define be_bswaps(p, size) \
53 do { *p = glue(__builtin_bswap, size)(*p); } while (0)
54 #endif
55
56 /**
57 * Endianness conversion functions between host cpu and specified endianness.
58 * (We list the complete set of prototypes produced by the macros below
59 * to assist people who search the headers to find their definitions.)
60 *
61 * uint16_t le16_to_cpu(uint16_t v);
62 * uint32_t le32_to_cpu(uint32_t v);
63 * uint64_t le64_to_cpu(uint64_t v);
64 * uint16_t be16_to_cpu(uint16_t v);
65 * uint32_t be32_to_cpu(uint32_t v);
66 * uint64_t be64_to_cpu(uint64_t v);
67 *
68 * Convert the value @v from the specified format to the native
69 * endianness of the host CPU by byteswapping if necessary, and
70 * return the converted value.
71 *
72 * uint16_t cpu_to_le16(uint16_t v);
73 * uint32_t cpu_to_le32(uint32_t v);
74 * uint64_t cpu_to_le64(uint64_t v);
75 * uint16_t cpu_to_be16(uint16_t v);
76 * uint32_t cpu_to_be32(uint32_t v);
77 * uint64_t cpu_to_be64(uint64_t v);
78 *
79 * Convert the value @v from the native endianness of the host CPU to
80 * the specified format by byteswapping if necessary, and return
81 * the converted value.
82 *
83 * void le16_to_cpus(uint16_t *v);
84 * void le32_to_cpus(uint32_t *v);
85 * void le64_to_cpus(uint64_t *v);
86 * void be16_to_cpus(uint16_t *v);
87 * void be32_to_cpus(uint32_t *v);
88 * void be64_to_cpus(uint64_t *v);
89 *
90 * Do an in-place conversion of the value pointed to by @v from the
91 * specified format to the native endianness of the host CPU.
92 *
93 * void cpu_to_le16s(uint16_t *v);
94 * void cpu_to_le32s(uint32_t *v);
95 * void cpu_to_le64s(uint64_t *v);
96 * void cpu_to_be16s(uint16_t *v);
97 * void cpu_to_be32s(uint32_t *v);
98 * void cpu_to_be64s(uint64_t *v);
99 *
100 * Do an in-place conversion of the value pointed to by @v from the
101 * native endianness of the host CPU to the specified format.
102 *
103 * Both X_to_cpu() and cpu_to_X() perform the same operation; you
104 * should use whichever one is better documenting of the function your
105 * code is performing.
106 *
107 * Do not use these functions for conversion of values which are in guest
108 * memory, since the data may not be sufficiently aligned for the host CPU's
109 * load and store instructions. Instead you should use the ld*_p() and
110 * st*_p() functions, which perform loads and stores of data of any
111 * required size and endianness and handle possible misalignment.
112 */
113
114 #define CPU_CONVERT(endian, size, type)\
115 static inline type endian ## size ## _to_cpu(type v)\
116 {\
117 return glue(endian, _bswap)(v, size);\
118 }\
119 \
120 static inline type cpu_to_ ## endian ## size(type v)\
121 {\
122 return glue(endian, _bswap)(v, size);\
123 }\
124 \
125 static inline void endian ## size ## _to_cpus(type *p)\
126 {\
127 glue(endian, _bswaps)(p, size);\
128 }\
129 \
130 static inline void cpu_to_ ## endian ## size ## s(type *p)\
131 {\
132 glue(endian, _bswaps)(p, size);\
133 }
134
135 CPU_CONVERT(be, 16, uint16_t)
136 CPU_CONVERT(be, 32, uint32_t)
137 CPU_CONVERT(be, 64, uint64_t)
138
139 CPU_CONVERT(le, 16, uint16_t)
140 CPU_CONVERT(le, 32, uint32_t)
141 CPU_CONVERT(le, 64, uint64_t)
142
143 /*
144 * Same as cpu_to_le{16,32,64}, except that gcc will figure the result is
145 * a compile-time constant if you pass in a constant. So this can be
146 * used to initialize static variables.
147 */
148 #if HOST_BIG_ENDIAN
149 # define const_le64(_x) \
150 ((((_x) & 0x00000000000000ffULL) << 56) | \
151 (((_x) & 0x000000000000ff00ULL) << 40) | \
152 (((_x) & 0x0000000000ff0000ULL) << 24) | \
153 (((_x) & 0x00000000ff000000ULL) << 8) | \
154 (((_x) & 0x000000ff00000000ULL) >> 8) | \
155 (((_x) & 0x0000ff0000000000ULL) >> 24) | \
156 (((_x) & 0x00ff000000000000ULL) >> 40) | \
157 (((_x) & 0xff00000000000000ULL) >> 56))
158 # define const_le32(_x) \
159 ((((_x) & 0x000000ffU) << 24) | \
160 (((_x) & 0x0000ff00U) << 8) | \
161 (((_x) & 0x00ff0000U) >> 8) | \
162 (((_x) & 0xff000000U) >> 24))
163 # define const_le16(_x) \
164 ((((_x) & 0x00ff) << 8) | \
165 (((_x) & 0xff00) >> 8))
166 #else
167 # define const_le64(_x) (_x)
168 # define const_le32(_x) (_x)
169 # define const_le16(_x) (_x)
170 #endif
171
172 /* unaligned/endian-independent pointer access */
173
174 /*
175 * the generic syntax is:
176 *
177 * load: ld{type}{sign}{size}_{endian}_p(ptr)
178 *
179 * store: st{type}{size}_{endian}_p(ptr, val)
180 *
181 * Note there are small differences with the softmmu access API!
182 *
183 * type is:
184 * (empty): integer access
185 * f : float access
186 *
187 * sign is:
188 * (empty): for 32 or 64 bit sizes (including floats and doubles)
189 * u : unsigned
190 * s : signed
191 *
192 * size is:
193 * b: 8 bits
194 * w: 16 bits
195 * 24: 24 bits
196 * l: 32 bits
197 * q: 64 bits
198 *
199 * endian is:
200 * he : host endian
201 * be : big endian
202 * le : little endian
203 * te : target endian
204 * (except for byte accesses, which have no endian infix).
205 *
206 * The target endian accessors are obviously only available to source
207 * files which are built per-target; they are defined in cpu-all.h.
208 *
209 * In all cases these functions take a host pointer.
210 * For accessors that take a guest address rather than a
211 * host address, see the cpu_{ld,st}_* accessors defined in
212 * cpu_ldst.h.
213 *
214 * For cases where the size to be used is not fixed at compile time,
215 * there are
216 * stn_{endian}_p(ptr, sz, val)
217 * which stores @val to @ptr as an @endian-order number @sz bytes in size
218 * and
219 * ldn_{endian}_p(ptr, sz)
220 * which loads @sz bytes from @ptr as an unsigned @endian-order number
221 * and returns it in a uint64_t.
222 */
223
ldub_p(const void * ptr)224 static inline int ldub_p(const void *ptr)
225 {
226 return *(uint8_t *)ptr;
227 }
228
ldsb_p(const void * ptr)229 static inline int ldsb_p(const void *ptr)
230 {
231 return *(int8_t *)ptr;
232 }
233
stb_p(void * ptr,uint8_t v)234 static inline void stb_p(void *ptr, uint8_t v)
235 {
236 *(uint8_t *)ptr = v;
237 }
238
239 /*
240 * Any compiler worth its salt will turn these memcpy into native unaligned
241 * operations. Thus we don't need to play games with packed attributes, or
242 * inline byte-by-byte stores.
243 * Some compilation environments (eg some fortify-source implementations)
244 * may intercept memcpy() in a way that defeats the compiler optimization,
245 * though, so we use __builtin_memcpy() to give ourselves the best chance
246 * of good performance.
247 */
248
lduw_he_p(const void * ptr)249 static inline int lduw_he_p(const void *ptr)
250 {
251 uint16_t r;
252 __builtin_memcpy(&r, ptr, sizeof(r));
253 return r;
254 }
255
ldsw_he_p(const void * ptr)256 static inline int ldsw_he_p(const void *ptr)
257 {
258 int16_t r;
259 __builtin_memcpy(&r, ptr, sizeof(r));
260 return r;
261 }
262
stw_he_p(void * ptr,uint16_t v)263 static inline void stw_he_p(void *ptr, uint16_t v)
264 {
265 __builtin_memcpy(ptr, &v, sizeof(v));
266 }
267
st24_he_p(void * ptr,uint32_t v)268 static inline void st24_he_p(void *ptr, uint32_t v)
269 {
270 __builtin_memcpy(ptr, &v, 3);
271 }
272
ldl_he_p(const void * ptr)273 static inline int ldl_he_p(const void *ptr)
274 {
275 int32_t r;
276 __builtin_memcpy(&r, ptr, sizeof(r));
277 return r;
278 }
279
stl_he_p(void * ptr,uint32_t v)280 static inline void stl_he_p(void *ptr, uint32_t v)
281 {
282 __builtin_memcpy(ptr, &v, sizeof(v));
283 }
284
ldq_he_p(const void * ptr)285 static inline uint64_t ldq_he_p(const void *ptr)
286 {
287 uint64_t r;
288 __builtin_memcpy(&r, ptr, sizeof(r));
289 return r;
290 }
291
stq_he_p(void * ptr,uint64_t v)292 static inline void stq_he_p(void *ptr, uint64_t v)
293 {
294 __builtin_memcpy(ptr, &v, sizeof(v));
295 }
296
lduw_le_p(const void * ptr)297 static inline int lduw_le_p(const void *ptr)
298 {
299 return (uint16_t)le_bswap(lduw_he_p(ptr), 16);
300 }
301
ldsw_le_p(const void * ptr)302 static inline int ldsw_le_p(const void *ptr)
303 {
304 return (int16_t)le_bswap(lduw_he_p(ptr), 16);
305 }
306
ldl_le_p(const void * ptr)307 static inline int ldl_le_p(const void *ptr)
308 {
309 return le_bswap(ldl_he_p(ptr), 32);
310 }
311
ldq_le_p(const void * ptr)312 static inline uint64_t ldq_le_p(const void *ptr)
313 {
314 return le_bswap(ldq_he_p(ptr), 64);
315 }
316
stw_le_p(void * ptr,uint16_t v)317 static inline void stw_le_p(void *ptr, uint16_t v)
318 {
319 stw_he_p(ptr, le_bswap(v, 16));
320 }
321
st24_le_p(void * ptr,uint32_t v)322 static inline void st24_le_p(void *ptr, uint32_t v)
323 {
324 st24_he_p(ptr, le_bswap24(v));
325 }
326
stl_le_p(void * ptr,uint32_t v)327 static inline void stl_le_p(void *ptr, uint32_t v)
328 {
329 stl_he_p(ptr, le_bswap(v, 32));
330 }
331
stq_le_p(void * ptr,uint64_t v)332 static inline void stq_le_p(void *ptr, uint64_t v)
333 {
334 stq_he_p(ptr, le_bswap(v, 64));
335 }
336
lduw_be_p(const void * ptr)337 static inline int lduw_be_p(const void *ptr)
338 {
339 return (uint16_t)be_bswap(lduw_he_p(ptr), 16);
340 }
341
ldsw_be_p(const void * ptr)342 static inline int ldsw_be_p(const void *ptr)
343 {
344 return (int16_t)be_bswap(lduw_he_p(ptr), 16);
345 }
346
ldl_be_p(const void * ptr)347 static inline int ldl_be_p(const void *ptr)
348 {
349 return be_bswap(ldl_he_p(ptr), 32);
350 }
351
ldq_be_p(const void * ptr)352 static inline uint64_t ldq_be_p(const void *ptr)
353 {
354 return be_bswap(ldq_he_p(ptr), 64);
355 }
356
stw_be_p(void * ptr,uint16_t v)357 static inline void stw_be_p(void *ptr, uint16_t v)
358 {
359 stw_he_p(ptr, be_bswap(v, 16));
360 }
361
st24_be_p(void * ptr,uint32_t v)362 static inline void st24_be_p(void *ptr, uint32_t v)
363 {
364 st24_he_p(ptr, be_bswap24(v));
365 }
366
stl_be_p(void * ptr,uint32_t v)367 static inline void stl_be_p(void *ptr, uint32_t v)
368 {
369 stl_he_p(ptr, be_bswap(v, 32));
370 }
371
stq_be_p(void * ptr,uint64_t v)372 static inline void stq_be_p(void *ptr, uint64_t v)
373 {
374 stq_he_p(ptr, be_bswap(v, 64));
375 }
376
leul_to_cpu(unsigned long v)377 static inline unsigned long leul_to_cpu(unsigned long v)
378 {
379 #if HOST_LONG_BITS == 32
380 return le_bswap(v, 32);
381 #elif HOST_LONG_BITS == 64
382 return le_bswap(v, 64);
383 #else
384 # error Unknown sizeof long
385 #endif
386 }
387
388 /* Store v to p as a sz byte value in host order */
389 #define DO_STN_LDN_P(END) \
390 static inline void stn_## END ## _p(void *ptr, int sz, uint64_t v) \
391 { \
392 switch (sz) { \
393 case 1: \
394 stb_p(ptr, v); \
395 break; \
396 case 2: \
397 stw_ ## END ## _p(ptr, v); \
398 break; \
399 case 4: \
400 stl_ ## END ## _p(ptr, v); \
401 break; \
402 case 8: \
403 stq_ ## END ## _p(ptr, v); \
404 break; \
405 default: \
406 g_assert_not_reached(); \
407 } \
408 } \
409 static inline uint64_t ldn_## END ## _p(const void *ptr, int sz) \
410 { \
411 switch (sz) { \
412 case 1: \
413 return ldub_p(ptr); \
414 case 2: \
415 return lduw_ ## END ## _p(ptr); \
416 case 4: \
417 return (uint32_t)ldl_ ## END ## _p(ptr); \
418 case 8: \
419 return ldq_ ## END ## _p(ptr); \
420 default: \
421 g_assert_not_reached(); \
422 } \
423 }
424
425 DO_STN_LDN_P(he)
426 DO_STN_LDN_P(le)
427 DO_STN_LDN_P(be)
428
429 #undef DO_STN_LDN_P
430
431 #undef le_bswap
432 #undef be_bswap
433 #undef le_bswaps
434 #undef be_bswaps
435
436 #endif /* BSWAP_H */
437