xref: /openbmc/qemu/include/exec/memop.h (revision 30b6852c)
1 /*
2  * Constants for memory operations
3  *
4  * Authors:
5  *  Richard Henderson <rth@twiddle.net>
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #ifndef MEMOP_H
13 #define MEMOP_H
14 
15 #include "qemu/host-utils.h"
16 
17 typedef enum MemOp {
18     MO_8     = 0,
19     MO_16    = 1,
20     MO_32    = 2,
21     MO_64    = 3,
22     MO_128   = 4,
23     MO_256   = 5,
24     MO_512   = 6,
25     MO_1024  = 7,
26     MO_SIZE  = 0x07,   /* Mask for the above.  */
27 
28     MO_SIGN  = 0x08,   /* Sign-extended, otherwise zero-extended.  */
29 
30     MO_BSWAP = 0x10,   /* Host reverse endian.  */
31 #ifdef HOST_WORDS_BIGENDIAN
32     MO_LE    = MO_BSWAP,
33     MO_BE    = 0,
34 #else
35     MO_LE    = 0,
36     MO_BE    = MO_BSWAP,
37 #endif
38 #ifdef NEED_CPU_H
39 #ifdef TARGET_WORDS_BIGENDIAN
40     MO_TE    = MO_BE,
41 #else
42     MO_TE    = MO_LE,
43 #endif
44 #endif
45 
46     /*
47      * MO_UNALN accesses are never checked for alignment.
48      * MO_ALIGN accesses will result in a call to the CPU's
49      * do_unaligned_access hook if the guest address is not aligned.
50      * The default depends on whether the target CPU defines
51      * TARGET_ALIGNED_ONLY.
52      *
53      * Some architectures (e.g. ARMv8) need the address which is aligned
54      * to a size more than the size of the memory access.
55      * Some architectures (e.g. SPARCv9) need an address which is aligned,
56      * but less strictly than the natural alignment.
57      *
58      * MO_ALIGN supposes the alignment size is the size of a memory access.
59      *
60      * There are three options:
61      * - unaligned access permitted (MO_UNALN).
62      * - an alignment to the size of an access (MO_ALIGN);
63      * - an alignment to a specified size, which may be more or less than
64      *   the access size (MO_ALIGN_x where 'x' is a size in bytes);
65      */
66     MO_ASHIFT = 5,
67     MO_AMASK = 0x7 << MO_ASHIFT,
68 #ifdef NEED_CPU_H
69 #ifdef TARGET_ALIGNED_ONLY
70     MO_ALIGN = 0,
71     MO_UNALN = MO_AMASK,
72 #else
73     MO_ALIGN = MO_AMASK,
74     MO_UNALN = 0,
75 #endif
76 #endif
77     MO_ALIGN_2  = 1 << MO_ASHIFT,
78     MO_ALIGN_4  = 2 << MO_ASHIFT,
79     MO_ALIGN_8  = 3 << MO_ASHIFT,
80     MO_ALIGN_16 = 4 << MO_ASHIFT,
81     MO_ALIGN_32 = 5 << MO_ASHIFT,
82     MO_ALIGN_64 = 6 << MO_ASHIFT,
83 
84     /* Combinations of the above, for ease of use.  */
85     MO_UB    = MO_8,
86     MO_UW    = MO_16,
87     MO_UL    = MO_32,
88     MO_SB    = MO_SIGN | MO_8,
89     MO_SW    = MO_SIGN | MO_16,
90     MO_SL    = MO_SIGN | MO_32,
91     MO_Q     = MO_64,
92 
93     MO_LEUW  = MO_LE | MO_UW,
94     MO_LEUL  = MO_LE | MO_UL,
95     MO_LESW  = MO_LE | MO_SW,
96     MO_LESL  = MO_LE | MO_SL,
97     MO_LEQ   = MO_LE | MO_Q,
98 
99     MO_BEUW  = MO_BE | MO_UW,
100     MO_BEUL  = MO_BE | MO_UL,
101     MO_BESW  = MO_BE | MO_SW,
102     MO_BESL  = MO_BE | MO_SL,
103     MO_BEQ   = MO_BE | MO_Q,
104 
105 #ifdef NEED_CPU_H
106     MO_TEUW  = MO_TE | MO_UW,
107     MO_TEUL  = MO_TE | MO_UL,
108     MO_TESW  = MO_TE | MO_SW,
109     MO_TESL  = MO_TE | MO_SL,
110     MO_TEQ   = MO_TE | MO_Q,
111 #endif
112 
113     MO_SSIZE = MO_SIZE | MO_SIGN,
114 } MemOp;
115 
116 /* MemOp to size in bytes.  */
117 static inline unsigned memop_size(MemOp op)
118 {
119     return 1 << (op & MO_SIZE);
120 }
121 
122 /* Size in bytes to MemOp.  */
123 static inline MemOp size_memop(unsigned size)
124 {
125 #ifdef CONFIG_DEBUG_TCG
126     /* Power of 2 up to 8.  */
127     assert((size & (size - 1)) == 0 && size >= 1 && size <= 8);
128 #endif
129     return ctz32(size);
130 }
131 
132 /* Big endianness from MemOp.  */
133 static inline bool memop_big_endian(MemOp op)
134 {
135     return (op & MO_BSWAP) == MO_BE;
136 }
137 
138 #endif
139