1 /* 2 * Decode table flags, mostly based on Intel SDM. 3 * 4 * Copyright (c) 2022 Red Hat, Inc. 5 * 6 * Author: Paolo Bonzini <pbonzini@redhat.com> 7 * 8 * This library is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU Lesser General Public 10 * License as published by the Free Software Foundation; either 11 * version 2.1 of the License, or (at your option) any later version. 12 * 13 * This library is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * Lesser General Public License for more details. 17 * 18 * You should have received a copy of the GNU Lesser General Public 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 typedef enum X86OpType { 23 X86_TYPE_None, 24 25 X86_TYPE_A, /* Implicit */ 26 X86_TYPE_B, /* VEX.vvvv selects a GPR */ 27 X86_TYPE_C, /* REG in the modrm byte selects a control register */ 28 X86_TYPE_D, /* REG in the modrm byte selects a debug register */ 29 X86_TYPE_E, /* ALU modrm operand */ 30 X86_TYPE_F, /* EFLAGS/RFLAGS */ 31 X86_TYPE_G, /* REG in the modrm byte selects a GPR */ 32 X86_TYPE_H, /* For AVX, VEX.vvvv selects an XMM/YMM register */ 33 X86_TYPE_I, /* Immediate */ 34 X86_TYPE_J, /* Relative offset for a jump */ 35 X86_TYPE_L, /* The upper 4 bits of the immediate select a 128-bit register */ 36 X86_TYPE_M, /* modrm byte selects a memory operand */ 37 X86_TYPE_N, /* R/M in the modrm byte selects an MMX register */ 38 X86_TYPE_O, /* Absolute address encoded in the instruction */ 39 X86_TYPE_P, /* reg in the modrm byte selects an MMX register */ 40 X86_TYPE_Q, /* MMX modrm operand */ 41 X86_TYPE_R, /* R/M in the modrm byte selects a register */ 42 X86_TYPE_S, /* reg selects a segment register */ 43 X86_TYPE_U, /* R/M in the modrm byte selects an XMM/YMM register */ 44 X86_TYPE_V, /* reg in the modrm byte selects an XMM/YMM register */ 45 X86_TYPE_W, /* XMM/YMM modrm operand */ 46 X86_TYPE_X, /* string source */ 47 X86_TYPE_Y, /* string destination */ 48 49 /* Custom */ 50 X86_TYPE_EM, /* modrm byte selects an ALU memory operand */ 51 X86_TYPE_WM, /* modrm byte selects an XMM/YMM memory operand */ 52 X86_TYPE_I_unsigned, /* Immediate, zero-extended */ 53 X86_TYPE_nop, /* modrm operand decoded but not loaded into s->T{0,1} */ 54 X86_TYPE_2op, /* 2-operand RMW instruction */ 55 X86_TYPE_LoBits, /* encoded in bits 0-2 of the operand + REX.B */ 56 X86_TYPE_0, /* Hard-coded GPRs (RAX..RDI) */ 57 X86_TYPE_1, 58 X86_TYPE_2, 59 X86_TYPE_3, 60 X86_TYPE_4, 61 X86_TYPE_5, 62 X86_TYPE_6, 63 X86_TYPE_7, 64 X86_TYPE_ES, /* Hard-coded segment registers */ 65 X86_TYPE_CS, 66 X86_TYPE_SS, 67 X86_TYPE_DS, 68 X86_TYPE_FS, 69 X86_TYPE_GS, 70 } X86OpType; 71 72 typedef enum X86OpSize { 73 X86_SIZE_None, 74 75 X86_SIZE_a, /* BOUND operand */ 76 X86_SIZE_b, /* byte */ 77 X86_SIZE_d, /* 32-bit */ 78 X86_SIZE_dq, /* SSE/AVX 128-bit */ 79 X86_SIZE_p, /* Far pointer */ 80 X86_SIZE_pd, /* SSE/AVX packed double precision */ 81 X86_SIZE_pi, /* MMX */ 82 X86_SIZE_ps, /* SSE/AVX packed single precision */ 83 X86_SIZE_q, /* 64-bit */ 84 X86_SIZE_qq, /* AVX 256-bit */ 85 X86_SIZE_s, /* Descriptor */ 86 X86_SIZE_sd, /* SSE/AVX scalar double precision */ 87 X86_SIZE_ss, /* SSE/AVX scalar single precision */ 88 X86_SIZE_si, /* 32-bit GPR */ 89 X86_SIZE_v, /* 16/32/64-bit, based on operand size */ 90 X86_SIZE_w, /* 16-bit */ 91 X86_SIZE_x, /* 128/256-bit, based on operand size */ 92 X86_SIZE_y, /* 32/64-bit, based on operand size */ 93 X86_SIZE_y_d64, /* 32/64-bit, based on 64-bit mode */ 94 X86_SIZE_z, /* 16-bit for 16-bit operand size, else 32-bit */ 95 X86_SIZE_z_f64, /* 32-bit for 32-bit operand size or 64-bit mode, else 16-bit */ 96 97 /* Custom */ 98 X86_SIZE_d64, 99 X86_SIZE_f64, 100 X86_SIZE_xh, /* SSE/AVX packed half register */ 101 } X86OpSize; 102 103 typedef enum X86CPUIDFeature { 104 X86_FEAT_None, 105 X86_FEAT_3DNOW, 106 X86_FEAT_ADX, 107 X86_FEAT_AES, 108 X86_FEAT_AVX, 109 X86_FEAT_AVX2, 110 X86_FEAT_BMI1, 111 X86_FEAT_BMI2, 112 X86_FEAT_CLFLUSH, 113 X86_FEAT_CLFLUSHOPT, 114 X86_FEAT_CLWB, 115 X86_FEAT_CMOV, 116 X86_FEAT_CMPCCXADD, 117 X86_FEAT_F16C, 118 X86_FEAT_FMA, 119 X86_FEAT_FSGSBASE, 120 X86_FEAT_FXSR, 121 X86_FEAT_MOVBE, 122 X86_FEAT_PCLMULQDQ, 123 X86_FEAT_POPCNT, 124 X86_FEAT_SHA_NI, 125 X86_FEAT_SSE, 126 X86_FEAT_SSE2, 127 X86_FEAT_SSE3, 128 X86_FEAT_SSSE3, 129 X86_FEAT_SSE41, 130 X86_FEAT_SSE42, 131 X86_FEAT_SSE4A, 132 X86_FEAT_XSAVE, 133 X86_FEAT_XSAVEOPT, 134 } X86CPUIDFeature; 135 136 /* Execution flags */ 137 138 typedef enum X86OpUnit { 139 X86_OP_SKIP, /* not valid or managed by emission function */ 140 X86_OP_SEG, /* segment selector */ 141 X86_OP_CR, /* control register */ 142 X86_OP_DR, /* debug register */ 143 X86_OP_INT, /* loaded into/stored from s->T0/T1 */ 144 X86_OP_IMM, /* immediate */ 145 X86_OP_SSE, /* address in either s->ptrX or s->A0 depending on has_ea */ 146 X86_OP_MMX, /* address in either s->ptrX or s->A0 depending on has_ea */ 147 } X86OpUnit; 148 149 typedef enum X86InsnCheck { 150 /* Illegal or exclusive to 64-bit mode */ 151 X86_CHECK_i64 = 1, 152 X86_CHECK_o64 = 2, 153 154 /* Fault in vm86 mode */ 155 X86_CHECK_no_vm86 = 4, 156 157 /* Privileged instruction checks */ 158 X86_CHECK_cpl0 = 8, 159 X86_CHECK_vm86_iopl = 16, 160 X86_CHECK_cpl_iopl = 32, 161 X86_CHECK_iopl = X86_CHECK_cpl_iopl | X86_CHECK_vm86_iopl, 162 163 /* Fault if VEX.L=1 */ 164 X86_CHECK_VEX128 = 64, 165 166 /* Fault if VEX.W=1 */ 167 X86_CHECK_W0 = 128, 168 169 /* Fault if VEX.W=0 */ 170 X86_CHECK_W1 = 256, 171 172 /* Fault outside protected mode, possibly including vm86 mode */ 173 X86_CHECK_prot_or_vm86 = 512, 174 X86_CHECK_prot = X86_CHECK_prot_or_vm86 | X86_CHECK_no_vm86, 175 176 /* Fault outside SMM */ 177 X86_CHECK_smm = 1024, 178 179 /* Vendor-specific checks for Intel/AMD differences */ 180 X86_CHECK_i64_amd = 2048, 181 X86_CHECK_o64_intel = 4096, 182 } X86InsnCheck; 183 184 typedef enum X86InsnSpecial { 185 X86_SPECIAL_None, 186 187 /* Accepts LOCK prefix; LOCKed operations do not load or writeback operand 0 */ 188 X86_SPECIAL_HasLock, 189 190 /* Always locked if it has a memory operand (XCHG) */ 191 X86_SPECIAL_Locked, 192 193 /* Do not load effective address in s->A0 */ 194 X86_SPECIAL_NoLoadEA, 195 196 /* 197 * Rd/Mb or Rd/Mw in the manual: register operand 0 is treated as 32 bits 198 * (and writeback zero-extends it to 64 bits if applicable). PREFIX_DATA 199 * does not trigger 16-bit writeback and, as a side effect, high-byte 200 * registers are never used. 201 */ 202 X86_SPECIAL_Op0_Rd, 203 204 /* 205 * Ry/Mb in the manual (PINSRB). However, the high bits are never used by 206 * the instruction in either the register or memory cases; the *real* effect 207 * of this modifier is that high-byte registers are never used, even without 208 * a REX prefix. Therefore, PINSRW does not need it despite having Ry/Mw. 209 */ 210 X86_SPECIAL_Op2_Ry, 211 212 /* 213 * Register operand 2 is extended to full width, while a memory operand 214 * is doubled in size if VEX.L=1. 215 */ 216 X86_SPECIAL_AVXExtMov, 217 218 /* 219 * MMX instruction exists with no prefix; if there is no prefix, V/H/W/U operands 220 * become P/P/Q/N, and size "x" becomes "q". 221 */ 222 X86_SPECIAL_MMX, 223 224 /* When loaded into s->T0, register operand 1 is zero/sign extended. */ 225 X86_SPECIAL_SExtT0, 226 X86_SPECIAL_ZExtT0, 227 228 /* Memory operand size of MOV from segment register is MO_16 */ 229 X86_SPECIAL_Op0_Mw, 230 } X86InsnSpecial; 231 232 /* 233 * Special cases for instructions that operate on XMM/YMM registers. Intel 234 * retconned all of them to have VEX exception classes other than 0 and 13, so 235 * all these only matter for instructions that have a VEX exception class. 236 * Based on tables in the "AVX and SSE Instruction Exception Specification" 237 * section of the manual. 238 */ 239 typedef enum X86VEXSpecial { 240 /* Legacy SSE instructions that allow unaligned operands */ 241 X86_VEX_SSEUnaligned, 242 243 /* 244 * Used for instructions that distinguish the XMM operand type with an 245 * instruction prefix; legacy SSE encodings will allow unaligned operands 246 * for scalar operands only (identified by a REP prefix). In this case, 247 * the decoding table uses "x" for the vector operands instead of specifying 248 * pd/ps/sd/ss individually. 249 */ 250 X86_VEX_REPScalar, 251 252 /* 253 * VEX instructions that only support 256-bit operands with AVX2 (Table 2-17 254 * column 3). Columns 2 and 4 (instructions limited to 256- and 127-bit 255 * operands respectively) are implicit in the presence of dq and qq 256 * operands, and thus handled by decode_op_size. 257 */ 258 X86_VEX_AVX2_256, 259 } X86VEXSpecial; 260 261 262 typedef struct X86OpEntry X86OpEntry; 263 typedef struct X86DecodedInsn X86DecodedInsn; 264 265 /* Decode function for multibyte opcodes. */ 266 typedef void (*X86DecodeFunc)(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b); 267 268 /* Code generation function. */ 269 typedef void (*X86GenFunc)(DisasContext *s, X86DecodedInsn *decode); 270 271 struct X86OpEntry { 272 /* Based on the is_decode flags. */ 273 union { 274 X86GenFunc gen; 275 X86DecodeFunc decode; 276 }; 277 /* op0 is always written, op1 and op2 are always read. */ 278 X86OpType op0:8; 279 X86OpSize s0:8; 280 X86OpType op1:8; 281 X86OpSize s1:8; 282 X86OpType op2:8; 283 X86OpSize s2:8; 284 /* Must be I and b respectively if present. */ 285 X86OpType op3:8; 286 X86OpSize s3:8; 287 288 X86InsnSpecial special:8; 289 X86CPUIDFeature cpuid:8; 290 unsigned vex_class:8; 291 X86VEXSpecial vex_special:8; 292 unsigned valid_prefix:16; 293 unsigned check:16; 294 unsigned intercept:8; 295 bool has_intercept:1; 296 bool is_decode:1; 297 }; 298 299 typedef struct X86DecodedOp { 300 int8_t n; 301 MemOp ot; /* For b/c/d/p/s/q/v/w/y/z */ 302 X86OpUnit unit; 303 bool has_ea; 304 int offset; /* For MMX and SSE */ 305 306 union { 307 target_ulong imm; 308 /* 309 * This field is used internally by macros OP0_PTR/OP1_PTR/OP2_PTR, 310 * do not access directly! 311 */ 312 TCGv_ptr v_ptr; 313 }; 314 } X86DecodedOp; 315 316 struct X86DecodedInsn { 317 X86OpEntry e; 318 X86DecodedOp op[3]; 319 /* 320 * Rightmost immediate, for convenience since most instructions have 321 * one (and also for 4-operand instructions). 322 */ 323 target_ulong immediate; 324 AddressParts mem; 325 326 TCGv cc_dst, cc_src, cc_src2; 327 TCGv_i32 cc_op_dynamic; 328 int8_t cc_op; 329 330 uint8_t b; 331 }; 332 333