1 #ifndef BSWAP_H
2 #define BSWAP_H
3
4 #include "qemu/target-info.h"
5
6 #undef bswap16
7 #define bswap16(_x) __builtin_bswap16(_x)
8 #undef bswap32
9 #define bswap32(_x) __builtin_bswap32(_x)
10 #undef bswap64
11 #define bswap64(_x) __builtin_bswap64(_x)
12
bswap24(uint32_t x)13 static inline uint32_t bswap24(uint32_t x)
14 {
15 return (((x & 0x000000ffU) << 16) |
16 ((x & 0x0000ff00U) << 0) |
17 ((x & 0x00ff0000U) >> 16));
18 }
19
bswap16s(uint16_t * s)20 static inline void bswap16s(uint16_t *s)
21 {
22 *s = __builtin_bswap16(*s);
23 }
24
bswap24s(uint32_t * s)25 static inline void bswap24s(uint32_t *s)
26 {
27 *s = bswap24(*s & 0x00ffffffU);
28 }
29
bswap32s(uint32_t * s)30 static inline void bswap32s(uint32_t *s)
31 {
32 *s = __builtin_bswap32(*s);
33 }
34
bswap64s(uint64_t * s)35 static inline void bswap64s(uint64_t *s)
36 {
37 *s = __builtin_bswap64(*s);
38 }
39
40 #if HOST_BIG_ENDIAN
41 #define be_bswap(v, size) (v)
42 #define le_bswap(v, size) glue(__builtin_bswap, size)(v)
43 #define be_bswap24(v) (v)
44 #define le_bswap24(v) bswap24(v)
45 #define be_bswaps(v, size)
46 #define le_bswaps(p, size) \
47 do { *p = glue(__builtin_bswap, size)(*p); } while (0)
48 #else
49 #define le_bswap(v, size) (v)
50 #define be_bswap24(v) bswap24(v)
51 #define le_bswap24(v) (v)
52 #define be_bswap(v, size) glue(__builtin_bswap, size)(v)
53 #define le_bswaps(v, size)
54 #define be_bswaps(p, size) \
55 do { *p = glue(__builtin_bswap, size)(*p); } while (0)
56 #endif
57
58 /**
59 * Endianness conversion functions between host cpu and specified endianness.
60 * (We list the complete set of prototypes produced by the macros below
61 * to assist people who search the headers to find their definitions.)
62 *
63 * uint16_t le16_to_cpu(uint16_t v);
64 * uint32_t le32_to_cpu(uint32_t v);
65 * uint64_t le64_to_cpu(uint64_t v);
66 * uint16_t be16_to_cpu(uint16_t v);
67 * uint32_t be32_to_cpu(uint32_t v);
68 * uint64_t be64_to_cpu(uint64_t v);
69 *
70 * Convert the value @v from the specified format to the native
71 * endianness of the host CPU by byteswapping if necessary, and
72 * return the converted value.
73 *
74 * uint16_t cpu_to_le16(uint16_t v);
75 * uint32_t cpu_to_le32(uint32_t v);
76 * uint64_t cpu_to_le64(uint64_t v);
77 * uint16_t cpu_to_be16(uint16_t v);
78 * uint32_t cpu_to_be32(uint32_t v);
79 * uint64_t cpu_to_be64(uint64_t v);
80 *
81 * Convert the value @v from the native endianness of the host CPU to
82 * the specified format by byteswapping if necessary, and return
83 * the converted value.
84 *
85 * void le16_to_cpus(uint16_t *v);
86 * void le32_to_cpus(uint32_t *v);
87 * void le64_to_cpus(uint64_t *v);
88 * void be16_to_cpus(uint16_t *v);
89 * void be32_to_cpus(uint32_t *v);
90 * void be64_to_cpus(uint64_t *v);
91 *
92 * Do an in-place conversion of the value pointed to by @v from the
93 * specified format to the native endianness of the host CPU.
94 *
95 * void cpu_to_le16s(uint16_t *v);
96 * void cpu_to_le32s(uint32_t *v);
97 * void cpu_to_le64s(uint64_t *v);
98 * void cpu_to_be16s(uint16_t *v);
99 * void cpu_to_be32s(uint32_t *v);
100 * void cpu_to_be64s(uint64_t *v);
101 *
102 * Do an in-place conversion of the value pointed to by @v from the
103 * native endianness of the host CPU to the specified format.
104 *
105 * Both X_to_cpu() and cpu_to_X() perform the same operation; you
106 * should use whichever one is better documenting of the function your
107 * code is performing.
108 *
109 * Do not use these functions for conversion of values which are in guest
110 * memory, since the data may not be sufficiently aligned for the host CPU's
111 * load and store instructions. Instead you should use the ld*_p() and
112 * st*_p() functions, which perform loads and stores of data of any
113 * required size and endianness and handle possible misalignment.
114 */
115
116 #define CPU_CONVERT(endian, size, type)\
117 static inline type endian ## size ## _to_cpu(type v)\
118 {\
119 return glue(endian, _bswap)(v, size);\
120 }\
121 \
122 static inline type cpu_to_ ## endian ## size(type v)\
123 {\
124 return glue(endian, _bswap)(v, size);\
125 }\
126 \
127 static inline void endian ## size ## _to_cpus(type *p)\
128 {\
129 glue(endian, _bswaps)(p, size);\
130 }\
131 \
132 static inline void cpu_to_ ## endian ## size ## s(type *p)\
133 {\
134 glue(endian, _bswaps)(p, size);\
135 }
136
137 CPU_CONVERT(be, 16, uint16_t)
138 CPU_CONVERT(be, 32, uint32_t)
139 CPU_CONVERT(be, 64, uint64_t)
140
141 CPU_CONVERT(le, 16, uint16_t)
142 CPU_CONVERT(le, 32, uint32_t)
143 CPU_CONVERT(le, 64, uint64_t)
144
145 #undef CPU_CONVERT
146
147 /*
148 * Same as cpu_to_le{16,32,64}, except that gcc will figure the result is
149 * a compile-time constant if you pass in a constant. So this can be
150 * used to initialize static variables.
151 */
152 #if HOST_BIG_ENDIAN
153 # define const_le64(_x) \
154 ((((_x) & 0x00000000000000ffULL) << 56) | \
155 (((_x) & 0x000000000000ff00ULL) << 40) | \
156 (((_x) & 0x0000000000ff0000ULL) << 24) | \
157 (((_x) & 0x00000000ff000000ULL) << 8) | \
158 (((_x) & 0x000000ff00000000ULL) >> 8) | \
159 (((_x) & 0x0000ff0000000000ULL) >> 24) | \
160 (((_x) & 0x00ff000000000000ULL) >> 40) | \
161 (((_x) & 0xff00000000000000ULL) >> 56))
162 # define const_le32(_x) \
163 ((((_x) & 0x000000ffU) << 24) | \
164 (((_x) & 0x0000ff00U) << 8) | \
165 (((_x) & 0x00ff0000U) >> 8) | \
166 (((_x) & 0xff000000U) >> 24))
167 # define const_le16(_x) \
168 ((((_x) & 0x00ff) << 8) | \
169 (((_x) & 0xff00) >> 8))
170 #else
171 # define const_le64(_x) (_x)
172 # define const_le32(_x) (_x)
173 # define const_le16(_x) (_x)
174 #endif
175
176 /* unaligned/endian-independent pointer access */
177
178 /*
179 * the generic syntax is:
180 *
181 * load: ld{type}{sign}{size}_{endian}_p(ptr)
182 *
183 * store: st{type}{size}_{endian}_p(ptr, val)
184 *
185 * Note there are small differences with the softmmu access API!
186 *
187 * type is:
188 * (empty): integer access
189 * f : float access
190 *
191 * sign is:
192 * (empty): for 32 or 64 bit sizes (including floats and doubles)
193 * u : unsigned
194 * s : signed
195 *
196 * size is:
197 * b: 8 bits
198 * w: 16 bits
199 * 24: 24 bits
200 * l: 32 bits
201 * q: 64 bits
202 *
203 * endian is:
204 * he : host endian
205 * be : big endian
206 * le : little endian
207 * te : target endian
208 * (except for byte accesses, which have no endian infix).
209 *
210 * In all cases these functions take a host pointer.
211 * For accessors that take a guest address rather than a
212 * host address, see the cpu_{ld,st}_* accessors defined in
213 * cpu_ldst.h.
214 *
215 * For cases where the size to be used is not fixed at compile time,
216 * there are
217 * stn_{endian}_p(ptr, sz, val)
218 * which stores @val to @ptr as an @endian-order number @sz bytes in size
219 * and
220 * ldn_{endian}_p(ptr, sz)
221 * which loads @sz bytes from @ptr as an unsigned @endian-order number
222 * and returns it in a uint64_t.
223 */
224
ldub_p(const void * ptr)225 static inline int ldub_p(const void *ptr)
226 {
227 return *(uint8_t *)ptr;
228 }
229
ldsb_p(const void * ptr)230 static inline int ldsb_p(const void *ptr)
231 {
232 return *(int8_t *)ptr;
233 }
234
stb_p(void * ptr,uint8_t v)235 static inline void stb_p(void *ptr, uint8_t v)
236 {
237 *(uint8_t *)ptr = v;
238 }
239
240 /*
241 * Any compiler worth its salt will turn these memcpy into native unaligned
242 * operations. Thus we don't need to play games with packed attributes, or
243 * inline byte-by-byte stores.
244 * Some compilation environments (eg some fortify-source implementations)
245 * may intercept memcpy() in a way that defeats the compiler optimization,
246 * though, so we use __builtin_memcpy() to give ourselves the best chance
247 * of good performance.
248 */
249
lduw_he_p(const void * ptr)250 static inline int lduw_he_p(const void *ptr)
251 {
252 uint16_t r;
253 __builtin_memcpy(&r, ptr, sizeof(r));
254 return r;
255 }
256
ldsw_he_p(const void * ptr)257 static inline int ldsw_he_p(const void *ptr)
258 {
259 int16_t r;
260 __builtin_memcpy(&r, ptr, sizeof(r));
261 return r;
262 }
263
stw_he_p(void * ptr,uint16_t v)264 static inline void stw_he_p(void *ptr, uint16_t v)
265 {
266 __builtin_memcpy(ptr, &v, sizeof(v));
267 }
268
st24_he_p(void * ptr,uint32_t v)269 static inline void st24_he_p(void *ptr, uint32_t v)
270 {
271 __builtin_memcpy(ptr, &v, 3);
272 }
273
ldl_he_p(const void * ptr)274 static inline int ldl_he_p(const void *ptr)
275 {
276 int32_t r;
277 __builtin_memcpy(&r, ptr, sizeof(r));
278 return r;
279 }
280
stl_he_p(void * ptr,uint32_t v)281 static inline void stl_he_p(void *ptr, uint32_t v)
282 {
283 __builtin_memcpy(ptr, &v, sizeof(v));
284 }
285
ldq_he_p(const void * ptr)286 static inline uint64_t ldq_he_p(const void *ptr)
287 {
288 uint64_t r;
289 __builtin_memcpy(&r, ptr, sizeof(r));
290 return r;
291 }
292
stq_he_p(void * ptr,uint64_t v)293 static inline void stq_he_p(void *ptr, uint64_t v)
294 {
295 __builtin_memcpy(ptr, &v, sizeof(v));
296 }
297
lduw_le_p(const void * ptr)298 static inline int lduw_le_p(const void *ptr)
299 {
300 return (uint16_t)le_bswap(lduw_he_p(ptr), 16);
301 }
302
ldsw_le_p(const void * ptr)303 static inline int ldsw_le_p(const void *ptr)
304 {
305 return (int16_t)le_bswap(lduw_he_p(ptr), 16);
306 }
307
ldl_le_p(const void * ptr)308 static inline int ldl_le_p(const void *ptr)
309 {
310 return le_bswap(ldl_he_p(ptr), 32);
311 }
312
ldq_le_p(const void * ptr)313 static inline uint64_t ldq_le_p(const void *ptr)
314 {
315 return le_bswap(ldq_he_p(ptr), 64);
316 }
317
stw_le_p(void * ptr,uint16_t v)318 static inline void stw_le_p(void *ptr, uint16_t v)
319 {
320 stw_he_p(ptr, le_bswap(v, 16));
321 }
322
st24_le_p(void * ptr,uint32_t v)323 static inline void st24_le_p(void *ptr, uint32_t v)
324 {
325 st24_he_p(ptr, le_bswap24(v));
326 }
327
stl_le_p(void * ptr,uint32_t v)328 static inline void stl_le_p(void *ptr, uint32_t v)
329 {
330 stl_he_p(ptr, le_bswap(v, 32));
331 }
332
stq_le_p(void * ptr,uint64_t v)333 static inline void stq_le_p(void *ptr, uint64_t v)
334 {
335 stq_he_p(ptr, le_bswap(v, 64));
336 }
337
lduw_be_p(const void * ptr)338 static inline int lduw_be_p(const void *ptr)
339 {
340 return (uint16_t)be_bswap(lduw_he_p(ptr), 16);
341 }
342
ldsw_be_p(const void * ptr)343 static inline int ldsw_be_p(const void *ptr)
344 {
345 return (int16_t)be_bswap(lduw_he_p(ptr), 16);
346 }
347
ldl_be_p(const void * ptr)348 static inline int ldl_be_p(const void *ptr)
349 {
350 return be_bswap(ldl_he_p(ptr), 32);
351 }
352
ldq_be_p(const void * ptr)353 static inline uint64_t ldq_be_p(const void *ptr)
354 {
355 return be_bswap(ldq_he_p(ptr), 64);
356 }
357
stw_be_p(void * ptr,uint16_t v)358 static inline void stw_be_p(void *ptr, uint16_t v)
359 {
360 stw_he_p(ptr, be_bswap(v, 16));
361 }
362
st24_be_p(void * ptr,uint32_t v)363 static inline void st24_be_p(void *ptr, uint32_t v)
364 {
365 st24_he_p(ptr, be_bswap24(v));
366 }
367
stl_be_p(void * ptr,uint32_t v)368 static inline void stl_be_p(void *ptr, uint32_t v)
369 {
370 stl_he_p(ptr, be_bswap(v, 32));
371 }
372
stq_be_p(void * ptr,uint64_t v)373 static inline void stq_be_p(void *ptr, uint64_t v)
374 {
375 stq_he_p(ptr, be_bswap(v, 64));
376 }
377
leul_to_cpu(unsigned long v)378 static inline unsigned long leul_to_cpu(unsigned long v)
379 {
380 #if HOST_LONG_BITS == 32
381 return le_bswap(v, 32);
382 #elif HOST_LONG_BITS == 64
383 return le_bswap(v, 64);
384 #else
385 # error Unknown sizeof long
386 #endif
387 }
388
389 /* Store v to p as a sz byte value in host order */
390 #define DO_STN_LDN_P(END) \
391 static inline void stn_## END ## _p(void *ptr, int sz, uint64_t v) \
392 { \
393 switch (sz) { \
394 case 1: \
395 stb_p(ptr, v); \
396 break; \
397 case 2: \
398 stw_ ## END ## _p(ptr, v); \
399 break; \
400 case 4: \
401 stl_ ## END ## _p(ptr, v); \
402 break; \
403 case 8: \
404 stq_ ## END ## _p(ptr, v); \
405 break; \
406 default: \
407 g_assert_not_reached(); \
408 } \
409 } \
410 static inline uint64_t ldn_## END ## _p(const void *ptr, int sz) \
411 { \
412 switch (sz) { \
413 case 1: \
414 return ldub_p(ptr); \
415 case 2: \
416 return lduw_ ## END ## _p(ptr); \
417 case 4: \
418 return (uint32_t)ldl_ ## END ## _p(ptr); \
419 case 8: \
420 return ldq_ ## END ## _p(ptr); \
421 default: \
422 g_assert_not_reached(); \
423 } \
424 }
425
426 DO_STN_LDN_P(he)
DO_STN_LDN_P(le)427 DO_STN_LDN_P(le)
428 DO_STN_LDN_P(be)
429
430 #undef DO_STN_LDN_P
431
432 #undef le_bswap
433 #undef be_bswap
434 #undef le_bswaps
435 #undef be_bswaps
436
437
438 /* Return ld{word}_{le,be}_p following target endianness. */
439 #define LOAD_IMPL(word, args...) \
440 do { \
441 if (target_big_endian()) { \
442 return glue(glue(ld, word), _be_p)(args); \
443 } else { \
444 return glue(glue(ld, word), _le_p)(args); \
445 } \
446 } while (0)
447
448 static inline int lduw_p(const void *ptr)
449 {
450 LOAD_IMPL(uw, ptr);
451 }
452
ldsw_p(const void * ptr)453 static inline int ldsw_p(const void *ptr)
454 {
455 LOAD_IMPL(sw, ptr);
456 }
457
ldl_p(const void * ptr)458 static inline int ldl_p(const void *ptr)
459 {
460 LOAD_IMPL(l, ptr);
461 }
462
ldq_p(const void * ptr)463 static inline uint64_t ldq_p(const void *ptr)
464 {
465 LOAD_IMPL(q, ptr);
466 }
467
ldn_p(const void * ptr,int sz)468 static inline uint64_t ldn_p(const void *ptr, int sz)
469 {
470 LOAD_IMPL(n, ptr, sz);
471 }
472
473 #undef LOAD_IMPL
474
475 /* Call st{word}_{le,be}_p following target endianness. */
476 #define STORE_IMPL(word, args...) \
477 do { \
478 if (target_big_endian()) { \
479 glue(glue(st, word), _be_p)(args); \
480 } else { \
481 glue(glue(st, word), _le_p)(args); \
482 } \
483 } while (0)
484
485
stw_p(void * ptr,uint16_t v)486 static inline void stw_p(void *ptr, uint16_t v)
487 {
488 STORE_IMPL(w, ptr, v);
489 }
490
stl_p(void * ptr,uint32_t v)491 static inline void stl_p(void *ptr, uint32_t v)
492 {
493 STORE_IMPL(l, ptr, v);
494 }
495
stq_p(void * ptr,uint64_t v)496 static inline void stq_p(void *ptr, uint64_t v)
497 {
498 STORE_IMPL(q, ptr, v);
499 }
500
stn_p(void * ptr,int sz,uint64_t v)501 static inline void stn_p(void *ptr, int sz, uint64_t v)
502 {
503 STORE_IMPL(n, ptr, sz, v);
504 }
505
506 #undef STORE_IMPL
507
508 #endif /* BSWAP_H */
509