1 /* 2 * Constants for memory operations 3 * 4 * Authors: 5 * Richard Henderson <rth@twiddle.net> 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 * 10 */ 11 12 #ifndef MEMOP_H 13 #define MEMOP_H 14 15 #include "qemu/host-utils.h" 16 17 typedef enum MemOp { 18 MO_8 = 0, 19 MO_16 = 1, 20 MO_32 = 2, 21 MO_64 = 3, 22 MO_128 = 4, 23 MO_256 = 5, 24 MO_512 = 6, 25 MO_1024 = 7, 26 MO_SIZE = 0x07, /* Mask for the above. */ 27 28 MO_SIGN = 0x08, /* Sign-extended, otherwise zero-extended. */ 29 30 MO_BSWAP = 0x10, /* Host reverse endian. */ 31 #if HOST_BIG_ENDIAN 32 MO_LE = MO_BSWAP, 33 MO_BE = 0, 34 #else 35 MO_LE = 0, 36 MO_BE = MO_BSWAP, 37 #endif 38 #ifdef COMPILING_PER_TARGET 39 #if TARGET_BIG_ENDIAN 40 MO_TE = MO_BE, 41 #else 42 MO_TE = MO_LE, 43 #endif 44 #endif 45 46 /* 47 * MO_UNALN accesses are never checked for alignment. 48 * MO_ALIGN accesses will result in a call to the CPU's 49 * do_unaligned_access hook if the guest address is not aligned. 50 * 51 * Some architectures (e.g. ARMv8) need the address which is aligned 52 * to a size more than the size of the memory access. 53 * Some architectures (e.g. SPARCv9) need an address which is aligned, 54 * but less strictly than the natural alignment. 55 * 56 * MO_ALIGN supposes the alignment size is the size of a memory access. 57 * 58 * There are three options: 59 * - unaligned access permitted (MO_UNALN). 60 * - an alignment to the size of an access (MO_ALIGN); 61 * - an alignment to a specified size, which may be more or less than 62 * the access size (MO_ALIGN_x where 'x' is a size in bytes); 63 */ 64 MO_ASHIFT = 5, 65 MO_AMASK = 0x7 << MO_ASHIFT, 66 MO_UNALN = 0, 67 MO_ALIGN_2 = 1 << MO_ASHIFT, 68 MO_ALIGN_4 = 2 << MO_ASHIFT, 69 MO_ALIGN_8 = 3 << MO_ASHIFT, 70 MO_ALIGN_16 = 4 << MO_ASHIFT, 71 MO_ALIGN_32 = 5 << MO_ASHIFT, 72 MO_ALIGN_64 = 6 << MO_ASHIFT, 73 MO_ALIGN = MO_AMASK, 74 75 /* 76 * MO_ATOM_* describes the atomicity requirements of the operation: 77 * MO_ATOM_IFALIGN: the operation must be single-copy atomic if it 78 * is aligned; if unaligned there is no atomicity. 79 * MO_ATOM_IFALIGN_PAIR: the entire operation may be considered to 80 * be a pair of half-sized operations which are packed together 81 * for convenience, with single-copy atomicity on each half if 82 * the half is aligned. 83 * This is the atomicity e.g. of Arm pre-FEAT_LSE2 LDP. 84 * MO_ATOM_WITHIN16: the operation is single-copy atomic, even if it 85 * is unaligned, so long as it does not cross a 16-byte boundary; 86 * if it crosses a 16-byte boundary there is no atomicity. 87 * This is the atomicity e.g. of Arm FEAT_LSE2 LDR. 88 * MO_ATOM_WITHIN16_PAIR: the entire operation is single-copy atomic, 89 * if it happens to be within a 16-byte boundary, otherwise it 90 * devolves to a pair of half-sized MO_ATOM_WITHIN16 operations. 91 * Depending on alignment, one or both will be single-copy atomic. 92 * This is the atomicity e.g. of Arm FEAT_LSE2 LDP. 93 * MO_ATOM_SUBALIGN: the operation is single-copy atomic by parts 94 * by the alignment. E.g. if the address is 0 mod 4, then each 95 * 4-byte subobject is single-copy atomic. 96 * This is the atomicity e.g. of IBM Power. 97 * MO_ATOM_NONE: the operation has no atomicity requirements. 98 * 99 * Note the default (i.e. 0) value is single-copy atomic to the 100 * size of the operation, if aligned. This retains the behaviour 101 * from before this field was introduced. 102 */ 103 MO_ATOM_SHIFT = 8, 104 MO_ATOM_IFALIGN = 0 << MO_ATOM_SHIFT, 105 MO_ATOM_IFALIGN_PAIR = 1 << MO_ATOM_SHIFT, 106 MO_ATOM_WITHIN16 = 2 << MO_ATOM_SHIFT, 107 MO_ATOM_WITHIN16_PAIR = 3 << MO_ATOM_SHIFT, 108 MO_ATOM_SUBALIGN = 4 << MO_ATOM_SHIFT, 109 MO_ATOM_NONE = 5 << MO_ATOM_SHIFT, 110 MO_ATOM_MASK = 7 << MO_ATOM_SHIFT, 111 112 /* Combinations of the above, for ease of use. */ 113 MO_UB = MO_8, 114 MO_UW = MO_16, 115 MO_UL = MO_32, 116 MO_UQ = MO_64, 117 MO_UO = MO_128, 118 MO_SB = MO_SIGN | MO_8, 119 MO_SW = MO_SIGN | MO_16, 120 MO_SL = MO_SIGN | MO_32, 121 MO_SQ = MO_SIGN | MO_64, 122 MO_SO = MO_SIGN | MO_128, 123 124 MO_LEUW = MO_LE | MO_UW, 125 MO_LEUL = MO_LE | MO_UL, 126 MO_LEUQ = MO_LE | MO_UQ, 127 MO_LESW = MO_LE | MO_SW, 128 MO_LESL = MO_LE | MO_SL, 129 MO_LESQ = MO_LE | MO_SQ, 130 131 MO_BEUW = MO_BE | MO_UW, 132 MO_BEUL = MO_BE | MO_UL, 133 MO_BEUQ = MO_BE | MO_UQ, 134 MO_BESW = MO_BE | MO_SW, 135 MO_BESL = MO_BE | MO_SL, 136 MO_BESQ = MO_BE | MO_SQ, 137 138 #ifdef COMPILING_PER_TARGET 139 MO_TEUW = MO_TE | MO_UW, 140 MO_TEUL = MO_TE | MO_UL, 141 MO_TEUQ = MO_TE | MO_UQ, 142 MO_TEUO = MO_TE | MO_UO, 143 MO_TESW = MO_TE | MO_SW, 144 MO_TESL = MO_TE | MO_SL, 145 MO_TESQ = MO_TE | MO_SQ, 146 #endif 147 148 MO_SSIZE = MO_SIZE | MO_SIGN, 149 } MemOp; 150 151 /* MemOp to size in bytes. */ 152 static inline unsigned memop_size(MemOp op) 153 { 154 return 1 << (op & MO_SIZE); 155 } 156 157 /* Size in bytes to MemOp. */ 158 static inline MemOp size_memop(unsigned size) 159 { 160 #ifdef CONFIG_DEBUG_TCG 161 /* Power of 2 up to 8. */ 162 assert((size & (size - 1)) == 0 && size >= 1 && size <= 8); 163 #endif 164 return (MemOp)ctz32(size); 165 } 166 167 /** 168 * memop_alignment_bits: 169 * @memop: MemOp value 170 * 171 * Extract the alignment size from the memop. 172 */ 173 static inline unsigned memop_alignment_bits(MemOp memop) 174 { 175 unsigned a = memop & MO_AMASK; 176 177 if (a == MO_UNALN) { 178 /* No alignment required. */ 179 a = 0; 180 } else if (a == MO_ALIGN) { 181 /* A natural alignment requirement. */ 182 a = memop & MO_SIZE; 183 } else { 184 /* A specific alignment requirement. */ 185 a = a >> MO_ASHIFT; 186 } 187 return a; 188 } 189 190 /* 191 * memop_atomicity_bits: 192 * @memop: MemOp value 193 * 194 * Extract the atomicity size from the memop. 195 */ 196 static inline unsigned memop_atomicity_bits(MemOp memop) 197 { 198 unsigned size = memop & MO_SIZE; 199 200 switch (memop & MO_ATOM_MASK) { 201 case MO_ATOM_NONE: 202 size = MO_8; 203 break; 204 case MO_ATOM_IFALIGN_PAIR: 205 case MO_ATOM_WITHIN16_PAIR: 206 size = size ? size - 1 : 0; 207 break; 208 default: 209 break; 210 } 211 return size; 212 } 213 214 #endif 215