1 /* 2 * Decode table flags, mostly based on Intel SDM. 3 * 4 * Copyright (c) 2022 Red Hat, Inc. 5 * 6 * Author: Paolo Bonzini <pbonzini@redhat.com> 7 * 8 * This library is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU Lesser General Public 10 * License as published by the Free Software Foundation; either 11 * version 2.1 of the License, or (at your option) any later version. 12 * 13 * This library is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * Lesser General Public License for more details. 17 * 18 * You should have received a copy of the GNU Lesser General Public 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 typedef enum X86OpType { 23 X86_TYPE_None, 24 25 X86_TYPE_A, /* Implicit */ 26 X86_TYPE_B, /* VEX.vvvv selects a GPR */ 27 X86_TYPE_C, /* REG in the modrm byte selects a control register */ 28 X86_TYPE_D, /* REG in the modrm byte selects a debug register */ 29 X86_TYPE_E, /* ALU modrm operand */ 30 X86_TYPE_F, /* EFLAGS/RFLAGS */ 31 X86_TYPE_G, /* REG in the modrm byte selects a GPR */ 32 X86_TYPE_H, /* For AVX, VEX.vvvv selects an XMM/YMM register */ 33 X86_TYPE_I, /* Immediate */ 34 X86_TYPE_J, /* Relative offset for a jump */ 35 X86_TYPE_L, /* The upper 4 bits of the immediate select a 128-bit register */ 36 X86_TYPE_M, /* modrm byte selects a memory operand */ 37 X86_TYPE_N, /* R/M in the modrm byte selects an MMX register */ 38 X86_TYPE_O, /* Absolute address encoded in the instruction */ 39 X86_TYPE_P, /* reg in the modrm byte selects an MMX register */ 40 X86_TYPE_Q, /* MMX modrm operand */ 41 X86_TYPE_R, /* R/M in the modrm byte selects a register */ 42 X86_TYPE_S, /* reg selects a segment register */ 43 X86_TYPE_U, /* R/M in the modrm byte selects an XMM/YMM register */ 44 X86_TYPE_V, /* reg in the modrm byte selects an XMM/YMM register */ 45 X86_TYPE_W, /* XMM/YMM modrm operand */ 46 X86_TYPE_X, /* string source */ 47 X86_TYPE_Y, /* string destination */ 48 49 /* Custom */ 50 X86_TYPE_EM, /* modrm byte selects an ALU memory operand */ 51 X86_TYPE_WM, /* modrm byte selects an XMM/YMM memory operand */ 52 X86_TYPE_I_unsigned, /* Immediate, zero-extended */ 53 X86_TYPE_nop, /* modrm operand decoded but not loaded into s->T{0,1} */ 54 X86_TYPE_2op, /* 2-operand RMW instruction */ 55 X86_TYPE_LoBits, /* encoded in bits 0-2 of the operand + REX.B */ 56 X86_TYPE_0, /* Hard-coded GPRs (RAX..RDI) */ 57 X86_TYPE_1, 58 X86_TYPE_2, 59 X86_TYPE_3, 60 X86_TYPE_4, 61 X86_TYPE_5, 62 X86_TYPE_6, 63 X86_TYPE_7, 64 X86_TYPE_ES, /* Hard-coded segment registers */ 65 X86_TYPE_CS, 66 X86_TYPE_SS, 67 X86_TYPE_DS, 68 X86_TYPE_FS, 69 X86_TYPE_GS, 70 } X86OpType; 71 72 typedef enum X86OpSize { 73 X86_SIZE_None, 74 75 X86_SIZE_a, /* BOUND operand */ 76 X86_SIZE_b, /* byte */ 77 X86_SIZE_d, /* 32-bit */ 78 X86_SIZE_dq, /* SSE/AVX 128-bit */ 79 X86_SIZE_p, /* Far pointer */ 80 X86_SIZE_pd, /* SSE/AVX packed double precision */ 81 X86_SIZE_pi, /* MMX */ 82 X86_SIZE_ps, /* SSE/AVX packed single precision */ 83 X86_SIZE_q, /* 64-bit */ 84 X86_SIZE_qq, /* AVX 256-bit */ 85 X86_SIZE_s, /* Descriptor */ 86 X86_SIZE_sd, /* SSE/AVX scalar double precision */ 87 X86_SIZE_ss, /* SSE/AVX scalar single precision */ 88 X86_SIZE_si, /* 32-bit GPR */ 89 X86_SIZE_v, /* 16/32/64-bit, based on operand size */ 90 X86_SIZE_w, /* 16-bit */ 91 X86_SIZE_x, /* 128/256-bit, based on operand size */ 92 X86_SIZE_y, /* 32/64-bit, based on operand size */ 93 X86_SIZE_z, /* 16-bit for 16-bit operand size, else 32-bit */ 94 X86_SIZE_z_f64, /* 32-bit for 32-bit operand size or 64-bit mode, else 16-bit */ 95 96 /* Custom */ 97 X86_SIZE_d64, 98 X86_SIZE_f64, 99 X86_SIZE_xh, /* SSE/AVX packed half register */ 100 } X86OpSize; 101 102 typedef enum X86CPUIDFeature { 103 X86_FEAT_None, 104 X86_FEAT_3DNOW, 105 X86_FEAT_ADX, 106 X86_FEAT_AES, 107 X86_FEAT_AVX, 108 X86_FEAT_AVX2, 109 X86_FEAT_BMI1, 110 X86_FEAT_BMI2, 111 X86_FEAT_CMOV, 112 X86_FEAT_CMPCCXADD, 113 X86_FEAT_F16C, 114 X86_FEAT_FMA, 115 X86_FEAT_MOVBE, 116 X86_FEAT_PCLMULQDQ, 117 X86_FEAT_SHA_NI, 118 X86_FEAT_SSE, 119 X86_FEAT_SSE2, 120 X86_FEAT_SSE3, 121 X86_FEAT_SSSE3, 122 X86_FEAT_SSE41, 123 X86_FEAT_SSE42, 124 X86_FEAT_SSE4A, 125 } X86CPUIDFeature; 126 127 /* Execution flags */ 128 129 typedef enum X86OpUnit { 130 X86_OP_SKIP, /* not valid or managed by emission function */ 131 X86_OP_SEG, /* segment selector */ 132 X86_OP_CR, /* control register */ 133 X86_OP_DR, /* debug register */ 134 X86_OP_INT, /* loaded into/stored from s->T0/T1 */ 135 X86_OP_IMM, /* immediate */ 136 X86_OP_SSE, /* address in either s->ptrX or s->A0 depending on has_ea */ 137 X86_OP_MMX, /* address in either s->ptrX or s->A0 depending on has_ea */ 138 } X86OpUnit; 139 140 typedef enum X86InsnCheck { 141 /* Illegal or exclusive to 64-bit mode */ 142 X86_CHECK_i64 = 1, 143 X86_CHECK_o64 = 2, 144 145 /* Fault outside protected mode */ 146 X86_CHECK_prot = 4, 147 148 /* Privileged instruction checks */ 149 X86_CHECK_cpl0 = 8, 150 X86_CHECK_vm86_iopl = 16, 151 X86_CHECK_cpl_iopl = 32, 152 X86_CHECK_iopl = X86_CHECK_cpl_iopl | X86_CHECK_vm86_iopl, 153 154 /* Fault if VEX.L=1 */ 155 X86_CHECK_VEX128 = 64, 156 157 /* Fault if VEX.W=1 */ 158 X86_CHECK_W0 = 128, 159 160 /* Fault if VEX.W=0 */ 161 X86_CHECK_W1 = 256, 162 } X86InsnCheck; 163 164 typedef enum X86InsnSpecial { 165 X86_SPECIAL_None, 166 167 /* Accepts LOCK prefix; LOCKed operations do not load or writeback operand 0 */ 168 X86_SPECIAL_HasLock, 169 170 /* Always locked if it has a memory operand (XCHG) */ 171 X86_SPECIAL_Locked, 172 173 /* Do not apply segment base to effective address */ 174 X86_SPECIAL_NoSeg, 175 /* 176 * Rd/Mb or Rd/Mw in the manual: register operand 0 is treated as 32 bits 177 * (and writeback zero-extends it to 64 bits if applicable). PREFIX_DATA 178 * does not trigger 16-bit writeback and, as a side effect, high-byte 179 * registers are never used. 180 */ 181 X86_SPECIAL_Op0_Rd, 182 183 /* 184 * Ry/Mb in the manual (PINSRB). However, the high bits are never used by 185 * the instruction in either the register or memory cases; the *real* effect 186 * of this modifier is that high-byte registers are never used, even without 187 * a REX prefix. Therefore, PINSRW does not need it despite having Ry/Mw. 188 */ 189 X86_SPECIAL_Op2_Ry, 190 191 /* 192 * Register operand 2 is extended to full width, while a memory operand 193 * is doubled in size if VEX.L=1. 194 */ 195 X86_SPECIAL_AVXExtMov, 196 197 /* 198 * MMX instruction exists with no prefix; if there is no prefix, V/H/W/U operands 199 * become P/P/Q/N, and size "x" becomes "q". 200 */ 201 X86_SPECIAL_MMX, 202 203 /* When loaded into s->T0, register operand 1 is zero/sign extended. */ 204 X86_SPECIAL_SExtT0, 205 X86_SPECIAL_ZExtT0, 206 207 /* Memory operand size of MOV from segment register is MO_16 */ 208 X86_SPECIAL_Op0_Mw, 209 } X86InsnSpecial; 210 211 /* 212 * Special cases for instructions that operate on XMM/YMM registers. Intel 213 * retconned all of them to have VEX exception classes other than 0 and 13, so 214 * all these only matter for instructions that have a VEX exception class. 215 * Based on tables in the "AVX and SSE Instruction Exception Specification" 216 * section of the manual. 217 */ 218 typedef enum X86VEXSpecial { 219 /* Legacy SSE instructions that allow unaligned operands */ 220 X86_VEX_SSEUnaligned, 221 222 /* 223 * Used for instructions that distinguish the XMM operand type with an 224 * instruction prefix; legacy SSE encodings will allow unaligned operands 225 * for scalar operands only (identified by a REP prefix). In this case, 226 * the decoding table uses "x" for the vector operands instead of specifying 227 * pd/ps/sd/ss individually. 228 */ 229 X86_VEX_REPScalar, 230 231 /* 232 * VEX instructions that only support 256-bit operands with AVX2 (Table 2-17 233 * column 3). Columns 2 and 4 (instructions limited to 256- and 127-bit 234 * operands respectively) are implicit in the presence of dq and qq 235 * operands, and thus handled by decode_op_size. 236 */ 237 X86_VEX_AVX2_256, 238 } X86VEXSpecial; 239 240 241 typedef struct X86OpEntry X86OpEntry; 242 typedef struct X86DecodedInsn X86DecodedInsn; 243 244 /* Decode function for multibyte opcodes. */ 245 typedef void (*X86DecodeFunc)(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b); 246 247 /* Code generation function. */ 248 typedef void (*X86GenFunc)(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode); 249 250 struct X86OpEntry { 251 /* Based on the is_decode flags. */ 252 union { 253 X86GenFunc gen; 254 X86DecodeFunc decode; 255 }; 256 /* op0 is always written, op1 and op2 are always read. */ 257 X86OpType op0:8; 258 X86OpSize s0:8; 259 X86OpType op1:8; 260 X86OpSize s1:8; 261 X86OpType op2:8; 262 X86OpSize s2:8; 263 /* Must be I and b respectively if present. */ 264 X86OpType op3:8; 265 X86OpSize s3:8; 266 267 X86InsnSpecial special:8; 268 X86CPUIDFeature cpuid:8; 269 unsigned vex_class:8; 270 X86VEXSpecial vex_special:8; 271 unsigned valid_prefix:16; 272 unsigned check:16; 273 unsigned intercept:8; 274 bool is_decode:1; 275 }; 276 277 typedef struct X86DecodedOp { 278 int8_t n; 279 MemOp ot; /* For b/c/d/p/s/q/v/w/y/z */ 280 X86OpUnit unit; 281 bool has_ea; 282 int offset; /* For MMX and SSE */ 283 284 union { 285 target_ulong imm; 286 /* 287 * This field is used internally by macros OP0_PTR/OP1_PTR/OP2_PTR, 288 * do not access directly! 289 */ 290 TCGv_ptr v_ptr; 291 }; 292 } X86DecodedOp; 293 294 struct X86DecodedInsn { 295 X86OpEntry e; 296 X86DecodedOp op[3]; 297 /* 298 * Rightmost immediate, for convenience since most instructions have 299 * one (and also for 4-operand instructions). 300 */ 301 target_ulong immediate; 302 AddressParts mem; 303 304 TCGv cc_dst, cc_src, cc_src2; 305 TCGv_i32 cc_op_dynamic; 306 int8_t cc_op; 307 308 uint8_t b; 309 }; 310 311