1 /* 2 * i386 virtual CPU header 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef I386_CPU_H 21 #define I386_CPU_H 22 23 #include "sysemu/tcg.h" 24 #include "cpu-qom.h" 25 #include "hyperv-proto.h" 26 #include "exec/cpu-defs.h" 27 #include "qapi/qapi-types-common.h" 28 29 /* The x86 has a strong memory model with some store-after-load re-ordering */ 30 #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) 31 32 /* Maximum instruction code size */ 33 #define TARGET_MAX_INSN_SIZE 16 34 35 /* support for self modifying code even if the modified instruction is 36 close to the modifying instruction */ 37 #define TARGET_HAS_PRECISE_SMC 38 39 #ifdef TARGET_X86_64 40 #define I386_ELF_MACHINE EM_X86_64 41 #define ELF_MACHINE_UNAME "x86_64" 42 #else 43 #define I386_ELF_MACHINE EM_386 44 #define ELF_MACHINE_UNAME "i686" 45 #endif 46 47 enum { 48 R_EAX = 0, 49 R_ECX = 1, 50 R_EDX = 2, 51 R_EBX = 3, 52 R_ESP = 4, 53 R_EBP = 5, 54 R_ESI = 6, 55 R_EDI = 7, 56 R_R8 = 8, 57 R_R9 = 9, 58 R_R10 = 10, 59 R_R11 = 11, 60 R_R12 = 12, 61 R_R13 = 13, 62 R_R14 = 14, 63 R_R15 = 15, 64 65 R_AL = 0, 66 R_CL = 1, 67 R_DL = 2, 68 R_BL = 3, 69 R_AH = 4, 70 R_CH = 5, 71 R_DH = 6, 72 R_BH = 7, 73 }; 74 75 typedef enum X86Seg { 76 R_ES = 0, 77 R_CS = 1, 78 R_SS = 2, 79 R_DS = 3, 80 R_FS = 4, 81 R_GS = 5, 82 R_LDTR = 6, 83 R_TR = 7, 84 } X86Seg; 85 86 /* segment descriptor fields */ 87 #define DESC_G_SHIFT 23 88 #define DESC_G_MASK (1 << DESC_G_SHIFT) 89 #define DESC_B_SHIFT 22 90 #define DESC_B_MASK (1 << DESC_B_SHIFT) 91 #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ 92 #define DESC_L_MASK (1 << DESC_L_SHIFT) 93 #define DESC_AVL_SHIFT 20 94 #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT) 95 #define DESC_P_SHIFT 15 96 #define DESC_P_MASK (1 << DESC_P_SHIFT) 97 #define DESC_DPL_SHIFT 13 98 #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) 99 #define DESC_S_SHIFT 12 100 #define DESC_S_MASK (1 << DESC_S_SHIFT) 101 #define DESC_TYPE_SHIFT 8 102 #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) 103 #define DESC_A_MASK (1 << 8) 104 105 #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ 106 #define DESC_C_MASK (1 << 10) /* code: conforming */ 107 #define DESC_R_MASK (1 << 9) /* code: readable */ 108 109 #define DESC_E_MASK (1 << 10) /* data: expansion direction */ 110 #define DESC_W_MASK (1 << 9) /* data: writable */ 111 112 #define DESC_TSS_BUSY_MASK (1 << 9) 113 114 /* eflags masks */ 115 #define CC_C 0x0001 116 #define CC_P 0x0004 117 #define CC_A 0x0010 118 #define CC_Z 0x0040 119 #define CC_S 0x0080 120 #define CC_O 0x0800 121 122 #define TF_SHIFT 8 123 #define IOPL_SHIFT 12 124 #define VM_SHIFT 17 125 126 #define TF_MASK 0x00000100 127 #define IF_MASK 0x00000200 128 #define DF_MASK 0x00000400 129 #define IOPL_MASK 0x00003000 130 #define NT_MASK 0x00004000 131 #define RF_MASK 0x00010000 132 #define VM_MASK 0x00020000 133 #define AC_MASK 0x00040000 134 #define VIF_MASK 0x00080000 135 #define VIP_MASK 0x00100000 136 #define ID_MASK 0x00200000 137 138 /* hidden flags - used internally by qemu to represent additional cpu 139 states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We 140 avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit 141 positions to ease oring with eflags. */ 142 /* current cpl */ 143 #define HF_CPL_SHIFT 0 144 /* true if hardware interrupts must be disabled for next instruction */ 145 #define HF_INHIBIT_IRQ_SHIFT 3 146 /* 16 or 32 segments */ 147 #define HF_CS32_SHIFT 4 148 #define HF_SS32_SHIFT 5 149 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ 150 #define HF_ADDSEG_SHIFT 6 151 /* copy of CR0.PE (protected mode) */ 152 #define HF_PE_SHIFT 7 153 #define HF_TF_SHIFT 8 /* must be same as eflags */ 154 #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ 155 #define HF_EM_SHIFT 10 156 #define HF_TS_SHIFT 11 157 #define HF_IOPL_SHIFT 12 /* must be same as eflags */ 158 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ 159 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ 160 #define HF_RF_SHIFT 16 /* must be same as eflags */ 161 #define HF_VM_SHIFT 17 /* must be same as eflags */ 162 #define HF_AC_SHIFT 18 /* must be same as eflags */ 163 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ 164 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ 165 #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */ 166 #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ 167 #define HF_SMAP_SHIFT 23 /* CR4.SMAP */ 168 #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */ 169 #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */ 170 #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */ 171 172 #define HF_CPL_MASK (3 << HF_CPL_SHIFT) 173 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) 174 #define HF_CS32_MASK (1 << HF_CS32_SHIFT) 175 #define HF_SS32_MASK (1 << HF_SS32_SHIFT) 176 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) 177 #define HF_PE_MASK (1 << HF_PE_SHIFT) 178 #define HF_TF_MASK (1 << HF_TF_SHIFT) 179 #define HF_MP_MASK (1 << HF_MP_SHIFT) 180 #define HF_EM_MASK (1 << HF_EM_SHIFT) 181 #define HF_TS_MASK (1 << HF_TS_SHIFT) 182 #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) 183 #define HF_LMA_MASK (1 << HF_LMA_SHIFT) 184 #define HF_CS64_MASK (1 << HF_CS64_SHIFT) 185 #define HF_RF_MASK (1 << HF_RF_SHIFT) 186 #define HF_VM_MASK (1 << HF_VM_SHIFT) 187 #define HF_AC_MASK (1 << HF_AC_SHIFT) 188 #define HF_SMM_MASK (1 << HF_SMM_SHIFT) 189 #define HF_SVME_MASK (1 << HF_SVME_SHIFT) 190 #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT) 191 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) 192 #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) 193 #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT) 194 #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT) 195 #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT) 196 197 /* hflags2 */ 198 199 #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ 200 #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ 201 #define HF2_NMI_SHIFT 2 /* CPU serving NMI */ 202 #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ 203 #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */ 204 #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */ 205 #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */ 206 #define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */ 207 208 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) 209 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) 210 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) 211 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) 212 #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT) 213 #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT) 214 #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT) 215 #define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT) 216 217 #define CR0_PE_SHIFT 0 218 #define CR0_MP_SHIFT 1 219 220 #define CR0_PE_MASK (1U << 0) 221 #define CR0_MP_MASK (1U << 1) 222 #define CR0_EM_MASK (1U << 2) 223 #define CR0_TS_MASK (1U << 3) 224 #define CR0_ET_MASK (1U << 4) 225 #define CR0_NE_MASK (1U << 5) 226 #define CR0_WP_MASK (1U << 16) 227 #define CR0_AM_MASK (1U << 18) 228 #define CR0_PG_MASK (1U << 31) 229 230 #define CR4_VME_MASK (1U << 0) 231 #define CR4_PVI_MASK (1U << 1) 232 #define CR4_TSD_MASK (1U << 2) 233 #define CR4_DE_MASK (1U << 3) 234 #define CR4_PSE_MASK (1U << 4) 235 #define CR4_PAE_MASK (1U << 5) 236 #define CR4_MCE_MASK (1U << 6) 237 #define CR4_PGE_MASK (1U << 7) 238 #define CR4_PCE_MASK (1U << 8) 239 #define CR4_OSFXSR_SHIFT 9 240 #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT) 241 #define CR4_OSXMMEXCPT_MASK (1U << 10) 242 #define CR4_LA57_MASK (1U << 12) 243 #define CR4_VMXE_MASK (1U << 13) 244 #define CR4_SMXE_MASK (1U << 14) 245 #define CR4_FSGSBASE_MASK (1U << 16) 246 #define CR4_PCIDE_MASK (1U << 17) 247 #define CR4_OSXSAVE_MASK (1U << 18) 248 #define CR4_SMEP_MASK (1U << 20) 249 #define CR4_SMAP_MASK (1U << 21) 250 #define CR4_PKE_MASK (1U << 22) 251 252 #define DR6_BD (1 << 13) 253 #define DR6_BS (1 << 14) 254 #define DR6_BT (1 << 15) 255 #define DR6_FIXED_1 0xffff0ff0 256 257 #define DR7_GD (1 << 13) 258 #define DR7_TYPE_SHIFT 16 259 #define DR7_LEN_SHIFT 18 260 #define DR7_FIXED_1 0x00000400 261 #define DR7_GLOBAL_BP_MASK 0xaa 262 #define DR7_LOCAL_BP_MASK 0x55 263 #define DR7_MAX_BP 4 264 #define DR7_TYPE_BP_INST 0x0 265 #define DR7_TYPE_DATA_WR 0x1 266 #define DR7_TYPE_IO_RW 0x2 267 #define DR7_TYPE_DATA_RW 0x3 268 269 #define PG_PRESENT_BIT 0 270 #define PG_RW_BIT 1 271 #define PG_USER_BIT 2 272 #define PG_PWT_BIT 3 273 #define PG_PCD_BIT 4 274 #define PG_ACCESSED_BIT 5 275 #define PG_DIRTY_BIT 6 276 #define PG_PSE_BIT 7 277 #define PG_GLOBAL_BIT 8 278 #define PG_PSE_PAT_BIT 12 279 #define PG_PKRU_BIT 59 280 #define PG_NX_BIT 63 281 282 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) 283 #define PG_RW_MASK (1 << PG_RW_BIT) 284 #define PG_USER_MASK (1 << PG_USER_BIT) 285 #define PG_PWT_MASK (1 << PG_PWT_BIT) 286 #define PG_PCD_MASK (1 << PG_PCD_BIT) 287 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) 288 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) 289 #define PG_PSE_MASK (1 << PG_PSE_BIT) 290 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) 291 #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT) 292 #define PG_ADDRESS_MASK 0x000ffffffffff000LL 293 #define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK) 294 #define PG_HI_USER_MASK 0x7ff0000000000000LL 295 #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT) 296 #define PG_NX_MASK (1ULL << PG_NX_BIT) 297 298 #define PG_ERROR_W_BIT 1 299 300 #define PG_ERROR_P_MASK 0x01 301 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) 302 #define PG_ERROR_U_MASK 0x04 303 #define PG_ERROR_RSVD_MASK 0x08 304 #define PG_ERROR_I_D_MASK 0x10 305 #define PG_ERROR_PK_MASK 0x20 306 307 #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ 308 #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ 309 #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */ 310 311 #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) 312 #define MCE_BANKS_DEF 10 313 314 #define MCG_CAP_BANKS_MASK 0xff 315 316 #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ 317 #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ 318 #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ 319 #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */ 320 321 #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */ 322 323 #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ 324 #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ 325 #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ 326 #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ 327 #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ 328 #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ 329 #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ 330 #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ 331 #define MCI_STATUS_AR (1ULL<<55) /* Action required */ 332 333 /* MISC register defines */ 334 #define MCM_ADDR_SEGOFF 0 /* segment offset */ 335 #define MCM_ADDR_LINEAR 1 /* linear address */ 336 #define MCM_ADDR_PHYS 2 /* physical address */ 337 #define MCM_ADDR_MEM 3 /* memory address */ 338 #define MCM_ADDR_GENERIC 7 /* generic */ 339 340 #define MSR_IA32_TSC 0x10 341 #define MSR_IA32_APICBASE 0x1b 342 #define MSR_IA32_APICBASE_BSP (1<<8) 343 #define MSR_IA32_APICBASE_ENABLE (1<<11) 344 #define MSR_IA32_APICBASE_EXTD (1 << 10) 345 #define MSR_IA32_APICBASE_BASE (0xfffffU<<12) 346 #define MSR_IA32_FEATURE_CONTROL 0x0000003a 347 #define MSR_TSC_ADJUST 0x0000003b 348 #define MSR_IA32_SPEC_CTRL 0x48 349 #define MSR_VIRT_SSBD 0xc001011f 350 #define MSR_IA32_PRED_CMD 0x49 351 #define MSR_IA32_CORE_CAPABILITY 0xcf 352 353 #define MSR_IA32_ARCH_CAPABILITIES 0x10a 354 #define ARCH_CAP_TSX_CTRL_MSR (1<<7) 355 356 #define MSR_IA32_TSX_CTRL 0x122 357 #define MSR_IA32_TSCDEADLINE 0x6e0 358 359 #define FEATURE_CONTROL_LOCKED (1<<0) 360 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) 361 #define FEATURE_CONTROL_LMCE (1<<20) 362 363 #define MSR_P6_PERFCTR0 0xc1 364 365 #define MSR_IA32_SMBASE 0x9e 366 #define MSR_SMI_COUNT 0x34 367 #define MSR_MTRRcap 0xfe 368 #define MSR_MTRRcap_VCNT 8 369 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) 370 #define MSR_MTRRcap_WC_SUPPORTED (1 << 10) 371 372 #define MSR_IA32_SYSENTER_CS 0x174 373 #define MSR_IA32_SYSENTER_ESP 0x175 374 #define MSR_IA32_SYSENTER_EIP 0x176 375 376 #define MSR_MCG_CAP 0x179 377 #define MSR_MCG_STATUS 0x17a 378 #define MSR_MCG_CTL 0x17b 379 #define MSR_MCG_EXT_CTL 0x4d0 380 381 #define MSR_P6_EVNTSEL0 0x186 382 383 #define MSR_IA32_PERF_STATUS 0x198 384 385 #define MSR_IA32_MISC_ENABLE 0x1a0 386 /* Indicates good rep/movs microcode on some processors: */ 387 #define MSR_IA32_MISC_ENABLE_DEFAULT 1 388 #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) 389 390 #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) 391 #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) 392 393 #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2) 394 395 #define MSR_MTRRfix64K_00000 0x250 396 #define MSR_MTRRfix16K_80000 0x258 397 #define MSR_MTRRfix16K_A0000 0x259 398 #define MSR_MTRRfix4K_C0000 0x268 399 #define MSR_MTRRfix4K_C8000 0x269 400 #define MSR_MTRRfix4K_D0000 0x26a 401 #define MSR_MTRRfix4K_D8000 0x26b 402 #define MSR_MTRRfix4K_E0000 0x26c 403 #define MSR_MTRRfix4K_E8000 0x26d 404 #define MSR_MTRRfix4K_F0000 0x26e 405 #define MSR_MTRRfix4K_F8000 0x26f 406 407 #define MSR_PAT 0x277 408 409 #define MSR_MTRRdefType 0x2ff 410 411 #define MSR_CORE_PERF_FIXED_CTR0 0x309 412 #define MSR_CORE_PERF_FIXED_CTR1 0x30a 413 #define MSR_CORE_PERF_FIXED_CTR2 0x30b 414 #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d 415 #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e 416 #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f 417 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 418 419 #define MSR_MC0_CTL 0x400 420 #define MSR_MC0_STATUS 0x401 421 #define MSR_MC0_ADDR 0x402 422 #define MSR_MC0_MISC 0x403 423 424 #define MSR_IA32_RTIT_OUTPUT_BASE 0x560 425 #define MSR_IA32_RTIT_OUTPUT_MASK 0x561 426 #define MSR_IA32_RTIT_CTL 0x570 427 #define MSR_IA32_RTIT_STATUS 0x571 428 #define MSR_IA32_RTIT_CR3_MATCH 0x572 429 #define MSR_IA32_RTIT_ADDR0_A 0x580 430 #define MSR_IA32_RTIT_ADDR0_B 0x581 431 #define MSR_IA32_RTIT_ADDR1_A 0x582 432 #define MSR_IA32_RTIT_ADDR1_B 0x583 433 #define MSR_IA32_RTIT_ADDR2_A 0x584 434 #define MSR_IA32_RTIT_ADDR2_B 0x585 435 #define MSR_IA32_RTIT_ADDR3_A 0x586 436 #define MSR_IA32_RTIT_ADDR3_B 0x587 437 #define MAX_RTIT_ADDRS 8 438 439 #define MSR_EFER 0xc0000080 440 441 #define MSR_EFER_SCE (1 << 0) 442 #define MSR_EFER_LME (1 << 8) 443 #define MSR_EFER_LMA (1 << 10) 444 #define MSR_EFER_NXE (1 << 11) 445 #define MSR_EFER_SVME (1 << 12) 446 #define MSR_EFER_FFXSR (1 << 14) 447 448 #define MSR_STAR 0xc0000081 449 #define MSR_LSTAR 0xc0000082 450 #define MSR_CSTAR 0xc0000083 451 #define MSR_FMASK 0xc0000084 452 #define MSR_FSBASE 0xc0000100 453 #define MSR_GSBASE 0xc0000101 454 #define MSR_KERNELGSBASE 0xc0000102 455 #define MSR_TSC_AUX 0xc0000103 456 457 #define MSR_VM_HSAVE_PA 0xc0010117 458 459 #define MSR_IA32_BNDCFGS 0x00000d90 460 #define MSR_IA32_XSS 0x00000da0 461 #define MSR_IA32_UMWAIT_CONTROL 0xe1 462 463 #define MSR_IA32_VMX_BASIC 0x00000480 464 #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 465 #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482 466 #define MSR_IA32_VMX_EXIT_CTLS 0x00000483 467 #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484 468 #define MSR_IA32_VMX_MISC 0x00000485 469 #define MSR_IA32_VMX_CR0_FIXED0 0x00000486 470 #define MSR_IA32_VMX_CR0_FIXED1 0x00000487 471 #define MSR_IA32_VMX_CR4_FIXED0 0x00000488 472 #define MSR_IA32_VMX_CR4_FIXED1 0x00000489 473 #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a 474 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b 475 #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c 476 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d 477 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e 478 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f 479 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 480 #define MSR_IA32_VMX_VMFUNC 0x00000491 481 482 #define XSTATE_FP_BIT 0 483 #define XSTATE_SSE_BIT 1 484 #define XSTATE_YMM_BIT 2 485 #define XSTATE_BNDREGS_BIT 3 486 #define XSTATE_BNDCSR_BIT 4 487 #define XSTATE_OPMASK_BIT 5 488 #define XSTATE_ZMM_Hi256_BIT 6 489 #define XSTATE_Hi16_ZMM_BIT 7 490 #define XSTATE_PKRU_BIT 9 491 492 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT) 493 #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT) 494 #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT) 495 #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT) 496 #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT) 497 #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT) 498 #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT) 499 #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) 500 #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) 501 502 /* CPUID feature words */ 503 typedef enum FeatureWord { 504 FEAT_1_EDX, /* CPUID[1].EDX */ 505 FEAT_1_ECX, /* CPUID[1].ECX */ 506 FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ 507 FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */ 508 FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */ 509 FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */ 510 FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ 511 FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ 512 FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ 513 FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ 514 FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ 515 FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ 516 FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */ 517 FEAT_HYPERV_EAX, /* CPUID[4000_0003].EAX */ 518 FEAT_HYPERV_EBX, /* CPUID[4000_0003].EBX */ 519 FEAT_HYPERV_EDX, /* CPUID[4000_0003].EDX */ 520 FEAT_HV_RECOMM_EAX, /* CPUID[4000_0004].EAX */ 521 FEAT_HV_NESTED_EAX, /* CPUID[4000_000A].EAX */ 522 FEAT_SVM, /* CPUID[8000_000A].EDX */ 523 FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */ 524 FEAT_6_EAX, /* CPUID[6].EAX */ 525 FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */ 526 FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */ 527 FEAT_ARCH_CAPABILITIES, 528 FEAT_CORE_CAPABILITY, 529 FEAT_VMX_PROCBASED_CTLS, 530 FEAT_VMX_SECONDARY_CTLS, 531 FEAT_VMX_PINBASED_CTLS, 532 FEAT_VMX_EXIT_CTLS, 533 FEAT_VMX_ENTRY_CTLS, 534 FEAT_VMX_MISC, 535 FEAT_VMX_EPT_VPID_CAPS, 536 FEAT_VMX_BASIC, 537 FEAT_VMX_VMFUNC, 538 FEATURE_WORDS, 539 } FeatureWord; 540 541 typedef uint64_t FeatureWordArray[FEATURE_WORDS]; 542 543 /* cpuid_features bits */ 544 #define CPUID_FP87 (1U << 0) 545 #define CPUID_VME (1U << 1) 546 #define CPUID_DE (1U << 2) 547 #define CPUID_PSE (1U << 3) 548 #define CPUID_TSC (1U << 4) 549 #define CPUID_MSR (1U << 5) 550 #define CPUID_PAE (1U << 6) 551 #define CPUID_MCE (1U << 7) 552 #define CPUID_CX8 (1U << 8) 553 #define CPUID_APIC (1U << 9) 554 #define CPUID_SEP (1U << 11) /* sysenter/sysexit */ 555 #define CPUID_MTRR (1U << 12) 556 #define CPUID_PGE (1U << 13) 557 #define CPUID_MCA (1U << 14) 558 #define CPUID_CMOV (1U << 15) 559 #define CPUID_PAT (1U << 16) 560 #define CPUID_PSE36 (1U << 17) 561 #define CPUID_PN (1U << 18) 562 #define CPUID_CLFLUSH (1U << 19) 563 #define CPUID_DTS (1U << 21) 564 #define CPUID_ACPI (1U << 22) 565 #define CPUID_MMX (1U << 23) 566 #define CPUID_FXSR (1U << 24) 567 #define CPUID_SSE (1U << 25) 568 #define CPUID_SSE2 (1U << 26) 569 #define CPUID_SS (1U << 27) 570 #define CPUID_HT (1U << 28) 571 #define CPUID_TM (1U << 29) 572 #define CPUID_IA64 (1U << 30) 573 #define CPUID_PBE (1U << 31) 574 575 #define CPUID_EXT_SSE3 (1U << 0) 576 #define CPUID_EXT_PCLMULQDQ (1U << 1) 577 #define CPUID_EXT_DTES64 (1U << 2) 578 #define CPUID_EXT_MONITOR (1U << 3) 579 #define CPUID_EXT_DSCPL (1U << 4) 580 #define CPUID_EXT_VMX (1U << 5) 581 #define CPUID_EXT_SMX (1U << 6) 582 #define CPUID_EXT_EST (1U << 7) 583 #define CPUID_EXT_TM2 (1U << 8) 584 #define CPUID_EXT_SSSE3 (1U << 9) 585 #define CPUID_EXT_CID (1U << 10) 586 #define CPUID_EXT_FMA (1U << 12) 587 #define CPUID_EXT_CX16 (1U << 13) 588 #define CPUID_EXT_XTPR (1U << 14) 589 #define CPUID_EXT_PDCM (1U << 15) 590 #define CPUID_EXT_PCID (1U << 17) 591 #define CPUID_EXT_DCA (1U << 18) 592 #define CPUID_EXT_SSE41 (1U << 19) 593 #define CPUID_EXT_SSE42 (1U << 20) 594 #define CPUID_EXT_X2APIC (1U << 21) 595 #define CPUID_EXT_MOVBE (1U << 22) 596 #define CPUID_EXT_POPCNT (1U << 23) 597 #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24) 598 #define CPUID_EXT_AES (1U << 25) 599 #define CPUID_EXT_XSAVE (1U << 26) 600 #define CPUID_EXT_OSXSAVE (1U << 27) 601 #define CPUID_EXT_AVX (1U << 28) 602 #define CPUID_EXT_F16C (1U << 29) 603 #define CPUID_EXT_RDRAND (1U << 30) 604 #define CPUID_EXT_HYPERVISOR (1U << 31) 605 606 #define CPUID_EXT2_FPU (1U << 0) 607 #define CPUID_EXT2_VME (1U << 1) 608 #define CPUID_EXT2_DE (1U << 2) 609 #define CPUID_EXT2_PSE (1U << 3) 610 #define CPUID_EXT2_TSC (1U << 4) 611 #define CPUID_EXT2_MSR (1U << 5) 612 #define CPUID_EXT2_PAE (1U << 6) 613 #define CPUID_EXT2_MCE (1U << 7) 614 #define CPUID_EXT2_CX8 (1U << 8) 615 #define CPUID_EXT2_APIC (1U << 9) 616 #define CPUID_EXT2_SYSCALL (1U << 11) 617 #define CPUID_EXT2_MTRR (1U << 12) 618 #define CPUID_EXT2_PGE (1U << 13) 619 #define CPUID_EXT2_MCA (1U << 14) 620 #define CPUID_EXT2_CMOV (1U << 15) 621 #define CPUID_EXT2_PAT (1U << 16) 622 #define CPUID_EXT2_PSE36 (1U << 17) 623 #define CPUID_EXT2_MP (1U << 19) 624 #define CPUID_EXT2_NX (1U << 20) 625 #define CPUID_EXT2_MMXEXT (1U << 22) 626 #define CPUID_EXT2_MMX (1U << 23) 627 #define CPUID_EXT2_FXSR (1U << 24) 628 #define CPUID_EXT2_FFXSR (1U << 25) 629 #define CPUID_EXT2_PDPE1GB (1U << 26) 630 #define CPUID_EXT2_RDTSCP (1U << 27) 631 #define CPUID_EXT2_LM (1U << 29) 632 #define CPUID_EXT2_3DNOWEXT (1U << 30) 633 #define CPUID_EXT2_3DNOW (1U << 31) 634 635 /* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */ 636 #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \ 637 CPUID_EXT2_DE | CPUID_EXT2_PSE | \ 638 CPUID_EXT2_TSC | CPUID_EXT2_MSR | \ 639 CPUID_EXT2_PAE | CPUID_EXT2_MCE | \ 640 CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \ 641 CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \ 642 CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \ 643 CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \ 644 CPUID_EXT2_MMX | CPUID_EXT2_FXSR) 645 646 #define CPUID_EXT3_LAHF_LM (1U << 0) 647 #define CPUID_EXT3_CMP_LEG (1U << 1) 648 #define CPUID_EXT3_SVM (1U << 2) 649 #define CPUID_EXT3_EXTAPIC (1U << 3) 650 #define CPUID_EXT3_CR8LEG (1U << 4) 651 #define CPUID_EXT3_ABM (1U << 5) 652 #define CPUID_EXT3_SSE4A (1U << 6) 653 #define CPUID_EXT3_MISALIGNSSE (1U << 7) 654 #define CPUID_EXT3_3DNOWPREFETCH (1U << 8) 655 #define CPUID_EXT3_OSVW (1U << 9) 656 #define CPUID_EXT3_IBS (1U << 10) 657 #define CPUID_EXT3_XOP (1U << 11) 658 #define CPUID_EXT3_SKINIT (1U << 12) 659 #define CPUID_EXT3_WDT (1U << 13) 660 #define CPUID_EXT3_LWP (1U << 15) 661 #define CPUID_EXT3_FMA4 (1U << 16) 662 #define CPUID_EXT3_TCE (1U << 17) 663 #define CPUID_EXT3_NODEID (1U << 19) 664 #define CPUID_EXT3_TBM (1U << 21) 665 #define CPUID_EXT3_TOPOEXT (1U << 22) 666 #define CPUID_EXT3_PERFCORE (1U << 23) 667 #define CPUID_EXT3_PERFNB (1U << 24) 668 669 #define CPUID_SVM_NPT (1U << 0) 670 #define CPUID_SVM_LBRV (1U << 1) 671 #define CPUID_SVM_SVMLOCK (1U << 2) 672 #define CPUID_SVM_NRIPSAVE (1U << 3) 673 #define CPUID_SVM_TSCSCALE (1U << 4) 674 #define CPUID_SVM_VMCBCLEAN (1U << 5) 675 #define CPUID_SVM_FLUSHASID (1U << 6) 676 #define CPUID_SVM_DECODEASSIST (1U << 7) 677 #define CPUID_SVM_PAUSEFILTER (1U << 10) 678 #define CPUID_SVM_PFTHRESHOLD (1U << 12) 679 680 /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ 681 #define CPUID_7_0_EBX_FSGSBASE (1U << 0) 682 /* 1st Group of Advanced Bit Manipulation Extensions */ 683 #define CPUID_7_0_EBX_BMI1 (1U << 3) 684 /* Hardware Lock Elision */ 685 #define CPUID_7_0_EBX_HLE (1U << 4) 686 /* Intel Advanced Vector Extensions 2 */ 687 #define CPUID_7_0_EBX_AVX2 (1U << 5) 688 /* Supervisor-mode Execution Prevention */ 689 #define CPUID_7_0_EBX_SMEP (1U << 7) 690 /* 2nd Group of Advanced Bit Manipulation Extensions */ 691 #define CPUID_7_0_EBX_BMI2 (1U << 8) 692 /* Enhanced REP MOVSB/STOSB */ 693 #define CPUID_7_0_EBX_ERMS (1U << 9) 694 /* Invalidate Process-Context Identifier */ 695 #define CPUID_7_0_EBX_INVPCID (1U << 10) 696 /* Restricted Transactional Memory */ 697 #define CPUID_7_0_EBX_RTM (1U << 11) 698 /* Memory Protection Extension */ 699 #define CPUID_7_0_EBX_MPX (1U << 14) 700 /* AVX-512 Foundation */ 701 #define CPUID_7_0_EBX_AVX512F (1U << 16) 702 /* AVX-512 Doubleword & Quadword Instruction */ 703 #define CPUID_7_0_EBX_AVX512DQ (1U << 17) 704 /* Read Random SEED */ 705 #define CPUID_7_0_EBX_RDSEED (1U << 18) 706 /* ADCX and ADOX instructions */ 707 #define CPUID_7_0_EBX_ADX (1U << 19) 708 /* Supervisor Mode Access Prevention */ 709 #define CPUID_7_0_EBX_SMAP (1U << 20) 710 /* AVX-512 Integer Fused Multiply Add */ 711 #define CPUID_7_0_EBX_AVX512IFMA (1U << 21) 712 /* Persistent Commit */ 713 #define CPUID_7_0_EBX_PCOMMIT (1U << 22) 714 /* Flush a Cache Line Optimized */ 715 #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) 716 /* Cache Line Write Back */ 717 #define CPUID_7_0_EBX_CLWB (1U << 24) 718 /* Intel Processor Trace */ 719 #define CPUID_7_0_EBX_INTEL_PT (1U << 25) 720 /* AVX-512 Prefetch */ 721 #define CPUID_7_0_EBX_AVX512PF (1U << 26) 722 /* AVX-512 Exponential and Reciprocal */ 723 #define CPUID_7_0_EBX_AVX512ER (1U << 27) 724 /* AVX-512 Conflict Detection */ 725 #define CPUID_7_0_EBX_AVX512CD (1U << 28) 726 /* SHA1/SHA256 Instruction Extensions */ 727 #define CPUID_7_0_EBX_SHA_NI (1U << 29) 728 /* AVX-512 Byte and Word Instructions */ 729 #define CPUID_7_0_EBX_AVX512BW (1U << 30) 730 /* AVX-512 Vector Length Extensions */ 731 #define CPUID_7_0_EBX_AVX512VL (1U << 31) 732 733 /* AVX-512 Vector Byte Manipulation Instruction */ 734 #define CPUID_7_0_ECX_AVX512_VBMI (1U << 1) 735 /* User-Mode Instruction Prevention */ 736 #define CPUID_7_0_ECX_UMIP (1U << 2) 737 /* Protection Keys for User-mode Pages */ 738 #define CPUID_7_0_ECX_PKU (1U << 3) 739 /* OS Enable Protection Keys */ 740 #define CPUID_7_0_ECX_OSPKE (1U << 4) 741 /* UMONITOR/UMWAIT/TPAUSE Instructions */ 742 #define CPUID_7_0_ECX_WAITPKG (1U << 5) 743 /* Additional AVX-512 Vector Byte Manipulation Instruction */ 744 #define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6) 745 /* Galois Field New Instructions */ 746 #define CPUID_7_0_ECX_GFNI (1U << 8) 747 /* Vector AES Instructions */ 748 #define CPUID_7_0_ECX_VAES (1U << 9) 749 /* Carry-Less Multiplication Quadword */ 750 #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10) 751 /* Vector Neural Network Instructions */ 752 #define CPUID_7_0_ECX_AVX512VNNI (1U << 11) 753 /* Support for VPOPCNT[B,W] and VPSHUFBITQMB */ 754 #define CPUID_7_0_ECX_AVX512BITALG (1U << 12) 755 /* POPCNT for vectors of DW/QW */ 756 #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) 757 /* 5-level Page Tables */ 758 #define CPUID_7_0_ECX_LA57 (1U << 16) 759 /* Read Processor ID */ 760 #define CPUID_7_0_ECX_RDPID (1U << 22) 761 /* Cache Line Demote Instruction */ 762 #define CPUID_7_0_ECX_CLDEMOTE (1U << 25) 763 /* Move Doubleword as Direct Store Instruction */ 764 #define CPUID_7_0_ECX_MOVDIRI (1U << 27) 765 /* Move 64 Bytes as Direct Store Instruction */ 766 #define CPUID_7_0_ECX_MOVDIR64B (1U << 28) 767 768 /* AVX512 Neural Network Instructions */ 769 #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) 770 /* AVX512 Multiply Accumulation Single Precision */ 771 #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) 772 /* Speculation Control */ 773 #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) 774 /* Single Thread Indirect Branch Predictors */ 775 #define CPUID_7_0_EDX_STIBP (1U << 27) 776 /* Arch Capabilities */ 777 #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) 778 /* Core Capability */ 779 #define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30) 780 /* Speculative Store Bypass Disable */ 781 #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) 782 783 /* AVX512 BFloat16 Instruction */ 784 #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) 785 786 /* CLZERO instruction */ 787 #define CPUID_8000_0008_EBX_CLZERO (1U << 0) 788 /* Always save/restore FP error pointers */ 789 #define CPUID_8000_0008_EBX_XSAVEERPTR (1U << 2) 790 /* Write back and do not invalidate cache */ 791 #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) 792 /* Indirect Branch Prediction Barrier */ 793 #define CPUID_8000_0008_EBX_IBPB (1U << 12) 794 795 #define CPUID_XSAVE_XSAVEOPT (1U << 0) 796 #define CPUID_XSAVE_XSAVEC (1U << 1) 797 #define CPUID_XSAVE_XGETBV1 (1U << 2) 798 #define CPUID_XSAVE_XSAVES (1U << 3) 799 800 #define CPUID_6_EAX_ARAT (1U << 2) 801 802 /* CPUID[0x80000007].EDX flags: */ 803 #define CPUID_APM_INVTSC (1U << 8) 804 805 #define CPUID_VENDOR_SZ 12 806 807 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ 808 #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ 809 #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ 810 #define CPUID_VENDOR_INTEL "GenuineIntel" 811 812 #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ 813 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ 814 #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ 815 #define CPUID_VENDOR_AMD "AuthenticAMD" 816 817 #define CPUID_VENDOR_VIA "CentaurHauls" 818 819 #define CPUID_VENDOR_HYGON "HygonGenuine" 820 821 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ 822 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ 823 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) 824 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ 825 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ 826 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) 827 828 #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */ 829 #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */ 830 831 /* CPUID[0xB].ECX level types */ 832 #define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8) 833 #define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8) 834 #define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8) 835 #define CPUID_TOPOLOGY_LEVEL_DIE (5U << 8) 836 837 /* MSR Feature Bits */ 838 #define MSR_ARCH_CAP_RDCL_NO (1U << 0) 839 #define MSR_ARCH_CAP_IBRS_ALL (1U << 1) 840 #define MSR_ARCH_CAP_RSBA (1U << 2) 841 #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3) 842 #define MSR_ARCH_CAP_SSB_NO (1U << 4) 843 #define MSR_ARCH_CAP_MDS_NO (1U << 5) 844 845 #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5) 846 847 /* VMX MSR features */ 848 #define MSR_VMX_BASIC_VMCS_REVISION_MASK 0x7FFFFFFFull 849 #define MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK (0x00001FFFull << 32) 850 #define MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK (0x003C0000ull << 32) 851 #define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49) 852 #define MSR_VMX_BASIC_INS_OUTS (1ULL << 54) 853 #define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55) 854 855 #define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full 856 #define MSR_VMX_MISC_STORE_LMA (1ULL << 5) 857 #define MSR_VMX_MISC_ACTIVITY_HLT (1ULL << 6) 858 #define MSR_VMX_MISC_ACTIVITY_SHUTDOWN (1ULL << 7) 859 #define MSR_VMX_MISC_ACTIVITY_WAIT_SIPI (1ULL << 8) 860 #define MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK 0x0E000000ull 861 #define MSR_VMX_MISC_VMWRITE_VMEXIT (1ULL << 29) 862 #define MSR_VMX_MISC_ZERO_LEN_INJECT (1ULL << 30) 863 864 #define MSR_VMX_EPT_EXECONLY (1ULL << 0) 865 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_4 (1ULL << 6) 866 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_5 (1ULL << 7) 867 #define MSR_VMX_EPT_UC (1ULL << 8) 868 #define MSR_VMX_EPT_WB (1ULL << 14) 869 #define MSR_VMX_EPT_2MB (1ULL << 16) 870 #define MSR_VMX_EPT_1GB (1ULL << 17) 871 #define MSR_VMX_EPT_INVEPT (1ULL << 20) 872 #define MSR_VMX_EPT_AD_BITS (1ULL << 21) 873 #define MSR_VMX_EPT_ADVANCED_VMEXIT_INFO (1ULL << 22) 874 #define MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT (1ULL << 25) 875 #define MSR_VMX_EPT_INVEPT_ALL_CONTEXT (1ULL << 26) 876 #define MSR_VMX_EPT_INVVPID (1ULL << 32) 877 #define MSR_VMX_EPT_INVVPID_SINGLE_ADDR (1ULL << 40) 878 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT (1ULL << 41) 879 #define MSR_VMX_EPT_INVVPID_ALL_CONTEXT (1ULL << 42) 880 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS (1ULL << 43) 881 882 #define MSR_VMX_VMFUNC_EPT_SWITCHING (1ULL << 0) 883 884 885 /* VMX controls */ 886 #define VMX_CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 887 #define VMX_CPU_BASED_USE_TSC_OFFSETING 0x00000008 888 #define VMX_CPU_BASED_HLT_EXITING 0x00000080 889 #define VMX_CPU_BASED_INVLPG_EXITING 0x00000200 890 #define VMX_CPU_BASED_MWAIT_EXITING 0x00000400 891 #define VMX_CPU_BASED_RDPMC_EXITING 0x00000800 892 #define VMX_CPU_BASED_RDTSC_EXITING 0x00001000 893 #define VMX_CPU_BASED_CR3_LOAD_EXITING 0x00008000 894 #define VMX_CPU_BASED_CR3_STORE_EXITING 0x00010000 895 #define VMX_CPU_BASED_CR8_LOAD_EXITING 0x00080000 896 #define VMX_CPU_BASED_CR8_STORE_EXITING 0x00100000 897 #define VMX_CPU_BASED_TPR_SHADOW 0x00200000 898 #define VMX_CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 899 #define VMX_CPU_BASED_MOV_DR_EXITING 0x00800000 900 #define VMX_CPU_BASED_UNCOND_IO_EXITING 0x01000000 901 #define VMX_CPU_BASED_USE_IO_BITMAPS 0x02000000 902 #define VMX_CPU_BASED_MONITOR_TRAP_FLAG 0x08000000 903 #define VMX_CPU_BASED_USE_MSR_BITMAPS 0x10000000 904 #define VMX_CPU_BASED_MONITOR_EXITING 0x20000000 905 #define VMX_CPU_BASED_PAUSE_EXITING 0x40000000 906 #define VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 907 908 #define VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 909 #define VMX_SECONDARY_EXEC_ENABLE_EPT 0x00000002 910 #define VMX_SECONDARY_EXEC_DESC 0x00000004 911 #define VMX_SECONDARY_EXEC_RDTSCP 0x00000008 912 #define VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 913 #define VMX_SECONDARY_EXEC_ENABLE_VPID 0x00000020 914 #define VMX_SECONDARY_EXEC_WBINVD_EXITING 0x00000040 915 #define VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 916 #define VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 917 #define VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 918 #define VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 919 #define VMX_SECONDARY_EXEC_RDRAND_EXITING 0x00000800 920 #define VMX_SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 921 #define VMX_SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000 922 #define VMX_SECONDARY_EXEC_SHADOW_VMCS 0x00004000 923 #define VMX_SECONDARY_EXEC_ENCLS_EXITING 0x00008000 924 #define VMX_SECONDARY_EXEC_RDSEED_EXITING 0x00010000 925 #define VMX_SECONDARY_EXEC_ENABLE_PML 0x00020000 926 #define VMX_SECONDARY_EXEC_XSAVES 0x00100000 927 928 #define VMX_PIN_BASED_EXT_INTR_MASK 0x00000001 929 #define VMX_PIN_BASED_NMI_EXITING 0x00000008 930 #define VMX_PIN_BASED_VIRTUAL_NMIS 0x00000020 931 #define VMX_PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040 932 #define VMX_PIN_BASED_POSTED_INTR 0x00000080 933 934 #define VMX_VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 935 #define VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 936 #define VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 937 #define VMX_VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 938 #define VMX_VM_EXIT_SAVE_IA32_PAT 0x00040000 939 #define VMX_VM_EXIT_LOAD_IA32_PAT 0x00080000 940 #define VMX_VM_EXIT_SAVE_IA32_EFER 0x00100000 941 #define VMX_VM_EXIT_LOAD_IA32_EFER 0x00200000 942 #define VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 943 #define VMX_VM_EXIT_CLEAR_BNDCFGS 0x00800000 944 #define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000 945 #define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 946 947 #define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 948 #define VMX_VM_ENTRY_IA32E_MODE 0x00000200 949 #define VMX_VM_ENTRY_SMM 0x00000400 950 #define VMX_VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 951 #define VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 952 #define VMX_VM_ENTRY_LOAD_IA32_PAT 0x00004000 953 #define VMX_VM_ENTRY_LOAD_IA32_EFER 0x00008000 954 #define VMX_VM_ENTRY_LOAD_BNDCFGS 0x00010000 955 #define VMX_VM_ENTRY_PT_CONCEAL_PIP 0x00020000 956 #define VMX_VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000 957 958 /* Supported Hyper-V Enlightenments */ 959 #define HYPERV_FEAT_RELAXED 0 960 #define HYPERV_FEAT_VAPIC 1 961 #define HYPERV_FEAT_TIME 2 962 #define HYPERV_FEAT_CRASH 3 963 #define HYPERV_FEAT_RESET 4 964 #define HYPERV_FEAT_VPINDEX 5 965 #define HYPERV_FEAT_RUNTIME 6 966 #define HYPERV_FEAT_SYNIC 7 967 #define HYPERV_FEAT_STIMER 8 968 #define HYPERV_FEAT_FREQUENCIES 9 969 #define HYPERV_FEAT_REENLIGHTENMENT 10 970 #define HYPERV_FEAT_TLBFLUSH 11 971 #define HYPERV_FEAT_EVMCS 12 972 #define HYPERV_FEAT_IPI 13 973 #define HYPERV_FEAT_STIMER_DIRECT 14 974 975 #ifndef HYPERV_SPINLOCK_NEVER_RETRY 976 #define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF 977 #endif 978 979 #define EXCP00_DIVZ 0 980 #define EXCP01_DB 1 981 #define EXCP02_NMI 2 982 #define EXCP03_INT3 3 983 #define EXCP04_INTO 4 984 #define EXCP05_BOUND 5 985 #define EXCP06_ILLOP 6 986 #define EXCP07_PREX 7 987 #define EXCP08_DBLE 8 988 #define EXCP09_XERR 9 989 #define EXCP0A_TSS 10 990 #define EXCP0B_NOSEG 11 991 #define EXCP0C_STACK 12 992 #define EXCP0D_GPF 13 993 #define EXCP0E_PAGE 14 994 #define EXCP10_COPR 16 995 #define EXCP11_ALGN 17 996 #define EXCP12_MCHK 18 997 998 #define EXCP_SYSCALL 0x100 /* only happens in user only emulation 999 for syscall instruction */ 1000 #define EXCP_VMEXIT 0x100 1001 1002 /* i386-specific interrupt pending bits. */ 1003 #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1 1004 #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 1005 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 1006 #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 1007 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 1008 #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1 1009 #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2 1010 1011 /* Use a clearer name for this. */ 1012 #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET 1013 1014 /* Instead of computing the condition codes after each x86 instruction, 1015 * QEMU just stores one operand (called CC_SRC), the result 1016 * (called CC_DST) and the type of operation (called CC_OP). When the 1017 * condition codes are needed, the condition codes can be calculated 1018 * using this information. Condition codes are not generated if they 1019 * are only needed for conditional branches. 1020 */ 1021 typedef enum { 1022 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ 1023 CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ 1024 1025 CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ 1026 CC_OP_MULW, 1027 CC_OP_MULL, 1028 CC_OP_MULQ, 1029 1030 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1031 CC_OP_ADDW, 1032 CC_OP_ADDL, 1033 CC_OP_ADDQ, 1034 1035 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1036 CC_OP_ADCW, 1037 CC_OP_ADCL, 1038 CC_OP_ADCQ, 1039 1040 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1041 CC_OP_SUBW, 1042 CC_OP_SUBL, 1043 CC_OP_SUBQ, 1044 1045 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1046 CC_OP_SBBW, 1047 CC_OP_SBBL, 1048 CC_OP_SBBQ, 1049 1050 CC_OP_LOGICB, /* modify all flags, CC_DST = res */ 1051 CC_OP_LOGICW, 1052 CC_OP_LOGICL, 1053 CC_OP_LOGICQ, 1054 1055 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ 1056 CC_OP_INCW, 1057 CC_OP_INCL, 1058 CC_OP_INCQ, 1059 1060 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ 1061 CC_OP_DECW, 1062 CC_OP_DECL, 1063 CC_OP_DECQ, 1064 1065 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ 1066 CC_OP_SHLW, 1067 CC_OP_SHLL, 1068 CC_OP_SHLQ, 1069 1070 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ 1071 CC_OP_SARW, 1072 CC_OP_SARL, 1073 CC_OP_SARQ, 1074 1075 CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */ 1076 CC_OP_BMILGW, 1077 CC_OP_BMILGL, 1078 CC_OP_BMILGQ, 1079 1080 CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */ 1081 CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */ 1082 CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ 1083 1084 CC_OP_CLR, /* Z set, all other flags clear. */ 1085 CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */ 1086 1087 CC_OP_NB, 1088 } CCOp; 1089 1090 typedef struct SegmentCache { 1091 uint32_t selector; 1092 target_ulong base; 1093 uint32_t limit; 1094 uint32_t flags; 1095 } SegmentCache; 1096 1097 #define MMREG_UNION(n, bits) \ 1098 union n { \ 1099 uint8_t _b_##n[(bits)/8]; \ 1100 uint16_t _w_##n[(bits)/16]; \ 1101 uint32_t _l_##n[(bits)/32]; \ 1102 uint64_t _q_##n[(bits)/64]; \ 1103 float32 _s_##n[(bits)/32]; \ 1104 float64 _d_##n[(bits)/64]; \ 1105 } 1106 1107 typedef union { 1108 uint8_t _b[16]; 1109 uint16_t _w[8]; 1110 uint32_t _l[4]; 1111 uint64_t _q[2]; 1112 } XMMReg; 1113 1114 typedef union { 1115 uint8_t _b[32]; 1116 uint16_t _w[16]; 1117 uint32_t _l[8]; 1118 uint64_t _q[4]; 1119 } YMMReg; 1120 1121 typedef MMREG_UNION(ZMMReg, 512) ZMMReg; 1122 typedef MMREG_UNION(MMXReg, 64) MMXReg; 1123 1124 typedef struct BNDReg { 1125 uint64_t lb; 1126 uint64_t ub; 1127 } BNDReg; 1128 1129 typedef struct BNDCSReg { 1130 uint64_t cfgu; 1131 uint64_t sts; 1132 } BNDCSReg; 1133 1134 #define BNDCFG_ENABLE 1ULL 1135 #define BNDCFG_BNDPRESERVE 2ULL 1136 #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK 1137 1138 #ifdef HOST_WORDS_BIGENDIAN 1139 #define ZMM_B(n) _b_ZMMReg[63 - (n)] 1140 #define ZMM_W(n) _w_ZMMReg[31 - (n)] 1141 #define ZMM_L(n) _l_ZMMReg[15 - (n)] 1142 #define ZMM_S(n) _s_ZMMReg[15 - (n)] 1143 #define ZMM_Q(n) _q_ZMMReg[7 - (n)] 1144 #define ZMM_D(n) _d_ZMMReg[7 - (n)] 1145 1146 #define MMX_B(n) _b_MMXReg[7 - (n)] 1147 #define MMX_W(n) _w_MMXReg[3 - (n)] 1148 #define MMX_L(n) _l_MMXReg[1 - (n)] 1149 #define MMX_S(n) _s_MMXReg[1 - (n)] 1150 #else 1151 #define ZMM_B(n) _b_ZMMReg[n] 1152 #define ZMM_W(n) _w_ZMMReg[n] 1153 #define ZMM_L(n) _l_ZMMReg[n] 1154 #define ZMM_S(n) _s_ZMMReg[n] 1155 #define ZMM_Q(n) _q_ZMMReg[n] 1156 #define ZMM_D(n) _d_ZMMReg[n] 1157 1158 #define MMX_B(n) _b_MMXReg[n] 1159 #define MMX_W(n) _w_MMXReg[n] 1160 #define MMX_L(n) _l_MMXReg[n] 1161 #define MMX_S(n) _s_MMXReg[n] 1162 #endif 1163 #define MMX_Q(n) _q_MMXReg[n] 1164 1165 typedef union { 1166 floatx80 d __attribute__((aligned(16))); 1167 MMXReg mmx; 1168 } FPReg; 1169 1170 typedef struct { 1171 uint64_t base; 1172 uint64_t mask; 1173 } MTRRVar; 1174 1175 #define CPU_NB_REGS64 16 1176 #define CPU_NB_REGS32 8 1177 1178 #ifdef TARGET_X86_64 1179 #define CPU_NB_REGS CPU_NB_REGS64 1180 #else 1181 #define CPU_NB_REGS CPU_NB_REGS32 1182 #endif 1183 1184 #define MAX_FIXED_COUNTERS 3 1185 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) 1186 1187 #define TARGET_INSN_START_EXTRA_WORDS 1 1188 1189 #define NB_OPMASK_REGS 8 1190 1191 /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish 1192 * that APIC ID hasn't been set yet 1193 */ 1194 #define UNASSIGNED_APIC_ID 0xFFFFFFFF 1195 1196 typedef union X86LegacyXSaveArea { 1197 struct { 1198 uint16_t fcw; 1199 uint16_t fsw; 1200 uint8_t ftw; 1201 uint8_t reserved; 1202 uint16_t fpop; 1203 uint64_t fpip; 1204 uint64_t fpdp; 1205 uint32_t mxcsr; 1206 uint32_t mxcsr_mask; 1207 FPReg fpregs[8]; 1208 uint8_t xmm_regs[16][16]; 1209 }; 1210 uint8_t data[512]; 1211 } X86LegacyXSaveArea; 1212 1213 typedef struct X86XSaveHeader { 1214 uint64_t xstate_bv; 1215 uint64_t xcomp_bv; 1216 uint64_t reserve0; 1217 uint8_t reserved[40]; 1218 } X86XSaveHeader; 1219 1220 /* Ext. save area 2: AVX State */ 1221 typedef struct XSaveAVX { 1222 uint8_t ymmh[16][16]; 1223 } XSaveAVX; 1224 1225 /* Ext. save area 3: BNDREG */ 1226 typedef struct XSaveBNDREG { 1227 BNDReg bnd_regs[4]; 1228 } XSaveBNDREG; 1229 1230 /* Ext. save area 4: BNDCSR */ 1231 typedef union XSaveBNDCSR { 1232 BNDCSReg bndcsr; 1233 uint8_t data[64]; 1234 } XSaveBNDCSR; 1235 1236 /* Ext. save area 5: Opmask */ 1237 typedef struct XSaveOpmask { 1238 uint64_t opmask_regs[NB_OPMASK_REGS]; 1239 } XSaveOpmask; 1240 1241 /* Ext. save area 6: ZMM_Hi256 */ 1242 typedef struct XSaveZMM_Hi256 { 1243 uint8_t zmm_hi256[16][32]; 1244 } XSaveZMM_Hi256; 1245 1246 /* Ext. save area 7: Hi16_ZMM */ 1247 typedef struct XSaveHi16_ZMM { 1248 uint8_t hi16_zmm[16][64]; 1249 } XSaveHi16_ZMM; 1250 1251 /* Ext. save area 9: PKRU state */ 1252 typedef struct XSavePKRU { 1253 uint32_t pkru; 1254 uint32_t padding; 1255 } XSavePKRU; 1256 1257 typedef struct X86XSaveArea { 1258 X86LegacyXSaveArea legacy; 1259 X86XSaveHeader header; 1260 1261 /* Extended save areas: */ 1262 1263 /* AVX State: */ 1264 XSaveAVX avx_state; 1265 uint8_t padding[960 - 576 - sizeof(XSaveAVX)]; 1266 /* MPX State: */ 1267 XSaveBNDREG bndreg_state; 1268 XSaveBNDCSR bndcsr_state; 1269 /* AVX-512 State: */ 1270 XSaveOpmask opmask_state; 1271 XSaveZMM_Hi256 zmm_hi256_state; 1272 XSaveHi16_ZMM hi16_zmm_state; 1273 /* PKRU State: */ 1274 XSavePKRU pkru_state; 1275 } X86XSaveArea; 1276 1277 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != 0x240); 1278 QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100); 1279 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != 0x3c0); 1280 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40); 1281 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != 0x400); 1282 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40); 1283 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != 0x440); 1284 QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40); 1285 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != 0x480); 1286 QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200); 1287 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != 0x680); 1288 QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400); 1289 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != 0xA80); 1290 QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8); 1291 1292 typedef enum TPRAccess { 1293 TPR_ACCESS_READ, 1294 TPR_ACCESS_WRITE, 1295 } TPRAccess; 1296 1297 /* Cache information data structures: */ 1298 1299 enum CacheType { 1300 DATA_CACHE, 1301 INSTRUCTION_CACHE, 1302 UNIFIED_CACHE 1303 }; 1304 1305 typedef struct CPUCacheInfo { 1306 enum CacheType type; 1307 uint8_t level; 1308 /* Size in bytes */ 1309 uint32_t size; 1310 /* Line size, in bytes */ 1311 uint16_t line_size; 1312 /* 1313 * Associativity. 1314 * Note: representation of fully-associative caches is not implemented 1315 */ 1316 uint8_t associativity; 1317 /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */ 1318 uint8_t partitions; 1319 /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */ 1320 uint32_t sets; 1321 /* 1322 * Lines per tag. 1323 * AMD-specific: CPUID[0x80000005], CPUID[0x80000006]. 1324 * (Is this synonym to @partitions?) 1325 */ 1326 uint8_t lines_per_tag; 1327 1328 /* Self-initializing cache */ 1329 bool self_init; 1330 /* 1331 * WBINVD/INVD is not guaranteed to act upon lower level caches of 1332 * non-originating threads sharing this cache. 1333 * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0] 1334 */ 1335 bool no_invd_sharing; 1336 /* 1337 * Cache is inclusive of lower cache levels. 1338 * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1]. 1339 */ 1340 bool inclusive; 1341 /* 1342 * A complex function is used to index the cache, potentially using all 1343 * address bits. CPUID[4].EDX[bit 2]. 1344 */ 1345 bool complex_indexing; 1346 } CPUCacheInfo; 1347 1348 1349 typedef struct CPUCaches { 1350 CPUCacheInfo *l1d_cache; 1351 CPUCacheInfo *l1i_cache; 1352 CPUCacheInfo *l2_cache; 1353 CPUCacheInfo *l3_cache; 1354 } CPUCaches; 1355 1356 typedef struct CPUX86State { 1357 /* standard registers */ 1358 target_ulong regs[CPU_NB_REGS]; 1359 target_ulong eip; 1360 target_ulong eflags; /* eflags register. During CPU emulation, CC 1361 flags and DF are set to zero because they are 1362 stored elsewhere */ 1363 1364 /* emulator internal eflags handling */ 1365 target_ulong cc_dst; 1366 target_ulong cc_src; 1367 target_ulong cc_src2; 1368 uint32_t cc_op; 1369 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ 1370 uint32_t hflags; /* TB flags, see HF_xxx constants. These flags 1371 are known at translation time. */ 1372 uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ 1373 1374 /* segments */ 1375 SegmentCache segs[6]; /* selector values */ 1376 SegmentCache ldt; 1377 SegmentCache tr; 1378 SegmentCache gdt; /* only base and limit are used */ 1379 SegmentCache idt; /* only base and limit are used */ 1380 1381 target_ulong cr[5]; /* NOTE: cr1 is unused */ 1382 int32_t a20_mask; 1383 1384 BNDReg bnd_regs[4]; 1385 BNDCSReg bndcs_regs; 1386 uint64_t msr_bndcfgs; 1387 uint64_t efer; 1388 1389 /* Beginning of state preserved by INIT (dummy marker). */ 1390 struct {} start_init_save; 1391 1392 /* FPU state */ 1393 unsigned int fpstt; /* top of stack index */ 1394 uint16_t fpus; 1395 uint16_t fpuc; 1396 uint8_t fptags[8]; /* 0 = valid, 1 = empty */ 1397 FPReg fpregs[8]; 1398 /* KVM-only so far */ 1399 uint16_t fpop; 1400 uint64_t fpip; 1401 uint64_t fpdp; 1402 1403 /* emulator internal variables */ 1404 float_status fp_status; 1405 floatx80 ft0; 1406 1407 float_status mmx_status; /* for 3DNow! float ops */ 1408 float_status sse_status; 1409 uint32_t mxcsr; 1410 ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32]; 1411 ZMMReg xmm_t0; 1412 MMXReg mmx_t0; 1413 1414 XMMReg ymmh_regs[CPU_NB_REGS]; 1415 1416 uint64_t opmask_regs[NB_OPMASK_REGS]; 1417 YMMReg zmmh_regs[CPU_NB_REGS]; 1418 ZMMReg hi16_zmm_regs[CPU_NB_REGS]; 1419 1420 /* sysenter registers */ 1421 uint32_t sysenter_cs; 1422 target_ulong sysenter_esp; 1423 target_ulong sysenter_eip; 1424 uint64_t star; 1425 1426 uint64_t vm_hsave; 1427 1428 #ifdef TARGET_X86_64 1429 target_ulong lstar; 1430 target_ulong cstar; 1431 target_ulong fmask; 1432 target_ulong kernelgsbase; 1433 #endif 1434 1435 uint64_t tsc; 1436 uint64_t tsc_adjust; 1437 uint64_t tsc_deadline; 1438 uint64_t tsc_aux; 1439 1440 uint64_t xcr0; 1441 1442 uint64_t mcg_status; 1443 uint64_t msr_ia32_misc_enable; 1444 uint64_t msr_ia32_feature_control; 1445 1446 uint64_t msr_fixed_ctr_ctrl; 1447 uint64_t msr_global_ctrl; 1448 uint64_t msr_global_status; 1449 uint64_t msr_global_ovf_ctrl; 1450 uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS]; 1451 uint64_t msr_gp_counters[MAX_GP_COUNTERS]; 1452 uint64_t msr_gp_evtsel[MAX_GP_COUNTERS]; 1453 1454 uint64_t pat; 1455 uint32_t smbase; 1456 uint64_t msr_smi_count; 1457 1458 uint32_t pkru; 1459 uint32_t tsx_ctrl; 1460 1461 uint64_t spec_ctrl; 1462 uint64_t virt_ssbd; 1463 1464 /* End of state preserved by INIT (dummy marker). */ 1465 struct {} end_init_save; 1466 1467 uint64_t system_time_msr; 1468 uint64_t wall_clock_msr; 1469 uint64_t steal_time_msr; 1470 uint64_t async_pf_en_msr; 1471 uint64_t pv_eoi_en_msr; 1472 uint64_t poll_control_msr; 1473 1474 /* Partition-wide HV MSRs, will be updated only on the first vcpu */ 1475 uint64_t msr_hv_hypercall; 1476 uint64_t msr_hv_guest_os_id; 1477 uint64_t msr_hv_tsc; 1478 1479 /* Per-VCPU HV MSRs */ 1480 uint64_t msr_hv_vapic; 1481 uint64_t msr_hv_crash_params[HV_CRASH_PARAMS]; 1482 uint64_t msr_hv_runtime; 1483 uint64_t msr_hv_synic_control; 1484 uint64_t msr_hv_synic_evt_page; 1485 uint64_t msr_hv_synic_msg_page; 1486 uint64_t msr_hv_synic_sint[HV_SINT_COUNT]; 1487 uint64_t msr_hv_stimer_config[HV_STIMER_COUNT]; 1488 uint64_t msr_hv_stimer_count[HV_STIMER_COUNT]; 1489 uint64_t msr_hv_reenlightenment_control; 1490 uint64_t msr_hv_tsc_emulation_control; 1491 uint64_t msr_hv_tsc_emulation_status; 1492 1493 uint64_t msr_rtit_ctrl; 1494 uint64_t msr_rtit_status; 1495 uint64_t msr_rtit_output_base; 1496 uint64_t msr_rtit_output_mask; 1497 uint64_t msr_rtit_cr3_match; 1498 uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS]; 1499 1500 /* exception/interrupt handling */ 1501 int error_code; 1502 int exception_is_int; 1503 target_ulong exception_next_eip; 1504 target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */ 1505 union { 1506 struct CPUBreakpoint *cpu_breakpoint[4]; 1507 struct CPUWatchpoint *cpu_watchpoint[4]; 1508 }; /* break/watchpoints for dr[0..3] */ 1509 int old_exception; /* exception in flight */ 1510 1511 uint64_t vm_vmcb; 1512 uint64_t tsc_offset; 1513 uint64_t intercept; 1514 uint16_t intercept_cr_read; 1515 uint16_t intercept_cr_write; 1516 uint16_t intercept_dr_read; 1517 uint16_t intercept_dr_write; 1518 uint32_t intercept_exceptions; 1519 uint64_t nested_cr3; 1520 uint32_t nested_pg_mode; 1521 uint8_t v_tpr; 1522 1523 /* KVM states, automatically cleared on reset */ 1524 uint8_t nmi_injected; 1525 uint8_t nmi_pending; 1526 1527 uintptr_t retaddr; 1528 1529 /* Fields up to this point are cleared by a CPU reset */ 1530 struct {} end_reset_fields; 1531 1532 /* Fields after this point are preserved across CPU reset. */ 1533 1534 /* processor features (e.g. for CPUID insn) */ 1535 /* Minimum cpuid leaf 7 value */ 1536 uint32_t cpuid_level_func7; 1537 /* Actual cpuid leaf 7 value */ 1538 uint32_t cpuid_min_level_func7; 1539 /* Minimum level/xlevel/xlevel2, based on CPU model + features */ 1540 uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2; 1541 /* Maximum level/xlevel/xlevel2 value for auto-assignment: */ 1542 uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2; 1543 /* Actual level/xlevel/xlevel2 value: */ 1544 uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2; 1545 uint32_t cpuid_vendor1; 1546 uint32_t cpuid_vendor2; 1547 uint32_t cpuid_vendor3; 1548 uint32_t cpuid_version; 1549 FeatureWordArray features; 1550 /* Features that were explicitly enabled/disabled */ 1551 FeatureWordArray user_features; 1552 uint32_t cpuid_model[12]; 1553 /* Cache information for CPUID. When legacy-cache=on, the cache data 1554 * on each CPUID leaf will be different, because we keep compatibility 1555 * with old QEMU versions. 1556 */ 1557 CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd; 1558 1559 /* MTRRs */ 1560 uint64_t mtrr_fixed[11]; 1561 uint64_t mtrr_deftype; 1562 MTRRVar mtrr_var[MSR_MTRRcap_VCNT]; 1563 1564 /* For KVM */ 1565 uint32_t mp_state; 1566 int32_t exception_nr; 1567 int32_t interrupt_injected; 1568 uint8_t soft_interrupt; 1569 uint8_t exception_pending; 1570 uint8_t exception_injected; 1571 uint8_t has_error_code; 1572 uint8_t exception_has_payload; 1573 uint64_t exception_payload; 1574 uint32_t ins_len; 1575 uint32_t sipi_vector; 1576 bool tsc_valid; 1577 int64_t tsc_khz; 1578 int64_t user_tsc_khz; /* for sanity check only */ 1579 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 1580 void *xsave_buf; 1581 #endif 1582 #if defined(CONFIG_KVM) 1583 struct kvm_nested_state *nested_state; 1584 #endif 1585 #if defined(CONFIG_HVF) 1586 HVFX86EmulatorState *hvf_emul; 1587 #endif 1588 1589 uint64_t mcg_cap; 1590 uint64_t mcg_ctl; 1591 uint64_t mcg_ext_ctl; 1592 uint64_t mce_banks[MCE_BANKS_DEF*4]; 1593 uint64_t xstate_bv; 1594 1595 /* vmstate */ 1596 uint16_t fpus_vmstate; 1597 uint16_t fptag_vmstate; 1598 uint16_t fpregs_format_vmstate; 1599 1600 uint64_t xss; 1601 uint32_t umwait; 1602 1603 TPRAccess tpr_access_type; 1604 1605 unsigned nr_dies; 1606 } CPUX86State; 1607 1608 struct kvm_msrs; 1609 1610 /** 1611 * X86CPU: 1612 * @env: #CPUX86State 1613 * @migratable: If set, only migratable flags will be accepted when "enforce" 1614 * mode is used, and only migratable flags will be included in the "host" 1615 * CPU model. 1616 * 1617 * An x86 CPU. 1618 */ 1619 struct X86CPU { 1620 /*< private >*/ 1621 CPUState parent_obj; 1622 /*< public >*/ 1623 1624 CPUNegativeOffsetState neg; 1625 CPUX86State env; 1626 1627 uint32_t hyperv_spinlock_attempts; 1628 char *hyperv_vendor_id; 1629 bool hyperv_synic_kvm_only; 1630 uint64_t hyperv_features; 1631 bool hyperv_passthrough; 1632 OnOffAuto hyperv_no_nonarch_cs; 1633 1634 bool check_cpuid; 1635 bool enforce_cpuid; 1636 /* 1637 * Force features to be enabled even if the host doesn't support them. 1638 * This is dangerous and should be done only for testing CPUID 1639 * compatibility. 1640 */ 1641 bool force_features; 1642 bool expose_kvm; 1643 bool expose_tcg; 1644 bool migratable; 1645 bool migrate_smi_count; 1646 bool max_features; /* Enable all supported features automatically */ 1647 uint32_t apic_id; 1648 1649 /* Enables publishing of TSC increment and Local APIC bus frequencies to 1650 * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */ 1651 bool vmware_cpuid_freq; 1652 1653 /* if true the CPUID code directly forward host cache leaves to the guest */ 1654 bool cache_info_passthrough; 1655 1656 /* if true the CPUID code directly forwards 1657 * host monitor/mwait leaves to the guest */ 1658 struct { 1659 uint32_t eax; 1660 uint32_t ebx; 1661 uint32_t ecx; 1662 uint32_t edx; 1663 } mwait; 1664 1665 /* Features that were filtered out because of missing host capabilities */ 1666 FeatureWordArray filtered_features; 1667 1668 /* Enable PMU CPUID bits. This can't be enabled by default yet because 1669 * it doesn't have ABI stability guarantees, as it passes all PMU CPUID 1670 * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel 1671 * capabilities) directly to the guest. 1672 */ 1673 bool enable_pmu; 1674 1675 /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is 1676 * disabled by default to avoid breaking migration between QEMU with 1677 * different LMCE configurations. 1678 */ 1679 bool enable_lmce; 1680 1681 /* Compatibility bits for old machine types. 1682 * If true present virtual l3 cache for VM, the vcpus in the same virtual 1683 * socket share an virtual l3 cache. 1684 */ 1685 bool enable_l3_cache; 1686 1687 /* Compatibility bits for old machine types. 1688 * If true present the old cache topology information 1689 */ 1690 bool legacy_cache; 1691 1692 /* Compatibility bits for old machine types: */ 1693 bool enable_cpuid_0xb; 1694 1695 /* Enable auto level-increase for all CPUID leaves */ 1696 bool full_cpuid_auto_level; 1697 1698 /* Enable auto level-increase for Intel Processor Trace leave */ 1699 bool intel_pt_auto_level; 1700 1701 /* if true fill the top bits of the MTRR_PHYSMASKn variable range */ 1702 bool fill_mtrr_mask; 1703 1704 /* if true override the phys_bits value with a value read from the host */ 1705 bool host_phys_bits; 1706 1707 /* if set, limit maximum value for phys_bits when host_phys_bits is true */ 1708 uint8_t host_phys_bits_limit; 1709 1710 /* Stop SMI delivery for migration compatibility with old machines */ 1711 bool kvm_no_smi_migration; 1712 1713 /* Number of physical address bits supported */ 1714 uint32_t phys_bits; 1715 1716 /* in order to simplify APIC support, we leave this pointer to the 1717 user */ 1718 struct DeviceState *apic_state; 1719 struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram; 1720 Notifier machine_done; 1721 1722 struct kvm_msrs *kvm_msr_buf; 1723 1724 int32_t node_id; /* NUMA node this CPU belongs to */ 1725 int32_t socket_id; 1726 int32_t die_id; 1727 int32_t core_id; 1728 int32_t thread_id; 1729 1730 int32_t hv_max_vps; 1731 }; 1732 1733 1734 #ifndef CONFIG_USER_ONLY 1735 extern VMStateDescription vmstate_x86_cpu; 1736 #endif 1737 1738 /** 1739 * x86_cpu_do_interrupt: 1740 * @cpu: vCPU the interrupt is to be handled by. 1741 */ 1742 void x86_cpu_do_interrupt(CPUState *cpu); 1743 bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req); 1744 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); 1745 1746 int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, 1747 int cpuid, void *opaque); 1748 int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, 1749 int cpuid, void *opaque); 1750 int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 1751 void *opaque); 1752 int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 1753 void *opaque); 1754 1755 void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, 1756 Error **errp); 1757 1758 void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags); 1759 1760 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, 1761 MemTxAttrs *attrs); 1762 1763 int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); 1764 int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 1765 1766 void x86_cpu_exec_enter(CPUState *cpu); 1767 void x86_cpu_exec_exit(CPUState *cpu); 1768 1769 void x86_cpu_list(void); 1770 int cpu_x86_support_mca_broadcast(CPUX86State *env); 1771 1772 int cpu_get_pic_interrupt(CPUX86State *s); 1773 /* MSDOS compatibility mode FPU exception support */ 1774 void x86_register_ferr_irq(qemu_irq irq); 1775 void cpu_set_ignne(void); 1776 /* mpx_helper.c */ 1777 void cpu_sync_bndcs_hflags(CPUX86State *env); 1778 1779 /* this function must always be used to load data in the segment 1780 cache: it synchronizes the hflags with the segment cache values */ 1781 static inline void cpu_x86_load_seg_cache(CPUX86State *env, 1782 int seg_reg, unsigned int selector, 1783 target_ulong base, 1784 unsigned int limit, 1785 unsigned int flags) 1786 { 1787 SegmentCache *sc; 1788 unsigned int new_hflags; 1789 1790 sc = &env->segs[seg_reg]; 1791 sc->selector = selector; 1792 sc->base = base; 1793 sc->limit = limit; 1794 sc->flags = flags; 1795 1796 /* update the hidden flags */ 1797 { 1798 if (seg_reg == R_CS) { 1799 #ifdef TARGET_X86_64 1800 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { 1801 /* long mode */ 1802 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 1803 env->hflags &= ~(HF_ADDSEG_MASK); 1804 } else 1805 #endif 1806 { 1807 /* legacy / compatibility case */ 1808 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) 1809 >> (DESC_B_SHIFT - HF_CS32_SHIFT); 1810 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | 1811 new_hflags; 1812 } 1813 } 1814 if (seg_reg == R_SS) { 1815 int cpl = (flags >> DESC_DPL_SHIFT) & 3; 1816 #if HF_CPL_MASK != 3 1817 #error HF_CPL_MASK is hardcoded 1818 #endif 1819 env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl; 1820 /* Possibly switch between BNDCFGS and BNDCFGU */ 1821 cpu_sync_bndcs_hflags(env); 1822 } 1823 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) 1824 >> (DESC_B_SHIFT - HF_SS32_SHIFT); 1825 if (env->hflags & HF_CS64_MASK) { 1826 /* zero base assumed for DS, ES and SS in long mode */ 1827 } else if (!(env->cr[0] & CR0_PE_MASK) || 1828 (env->eflags & VM_MASK) || 1829 !(env->hflags & HF_CS32_MASK)) { 1830 /* XXX: try to avoid this test. The problem comes from the 1831 fact that is real mode or vm86 mode we only modify the 1832 'base' and 'selector' fields of the segment cache to go 1833 faster. A solution may be to force addseg to one in 1834 translate-i386.c. */ 1835 new_hflags |= HF_ADDSEG_MASK; 1836 } else { 1837 new_hflags |= ((env->segs[R_DS].base | 1838 env->segs[R_ES].base | 1839 env->segs[R_SS].base) != 0) << 1840 HF_ADDSEG_SHIFT; 1841 } 1842 env->hflags = (env->hflags & 1843 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; 1844 } 1845 } 1846 1847 static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, 1848 uint8_t sipi_vector) 1849 { 1850 CPUState *cs = CPU(cpu); 1851 CPUX86State *env = &cpu->env; 1852 1853 env->eip = 0; 1854 cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, 1855 sipi_vector << 12, 1856 env->segs[R_CS].limit, 1857 env->segs[R_CS].flags); 1858 cs->halted = 0; 1859 } 1860 1861 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, 1862 target_ulong *base, unsigned int *limit, 1863 unsigned int *flags); 1864 1865 /* op_helper.c */ 1866 /* used for debug or cpu save/restore */ 1867 1868 /* cpu-exec.c */ 1869 /* the following helpers are only usable in user mode simulation as 1870 they can trigger unexpected exceptions */ 1871 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); 1872 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); 1873 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); 1874 void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr); 1875 void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr); 1876 1877 /* you can call this signal handler from your SIGBUS and SIGSEGV 1878 signal handlers to inform the virtual CPU of exceptions. non zero 1879 is returned if the signal was handled by the virtual CPU. */ 1880 int cpu_x86_signal_handler(int host_signum, void *pinfo, 1881 void *puc); 1882 1883 /* cpu.c */ 1884 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 1885 uint32_t *eax, uint32_t *ebx, 1886 uint32_t *ecx, uint32_t *edx); 1887 void cpu_clear_apic_feature(CPUX86State *env); 1888 void host_cpuid(uint32_t function, uint32_t count, 1889 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); 1890 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping); 1891 1892 /* helper.c */ 1893 bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 1894 MMUAccessType access_type, int mmu_idx, 1895 bool probe, uintptr_t retaddr); 1896 void x86_cpu_set_a20(X86CPU *cpu, int a20_state); 1897 1898 #ifndef CONFIG_USER_ONLY 1899 static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) 1900 { 1901 return !!attrs.secure; 1902 } 1903 1904 static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs) 1905 { 1906 return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs)); 1907 } 1908 1909 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr); 1910 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr); 1911 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr); 1912 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr); 1913 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val); 1914 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val); 1915 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val); 1916 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val); 1917 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val); 1918 #endif 1919 1920 void breakpoint_handler(CPUState *cs); 1921 1922 /* will be suppressed */ 1923 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); 1924 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); 1925 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); 1926 void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7); 1927 1928 /* hw/pc.c */ 1929 uint64_t cpu_get_tsc(CPUX86State *env); 1930 1931 /* XXX: This value should match the one returned by CPUID 1932 * and in exec.c */ 1933 # if defined(TARGET_X86_64) 1934 # define TCG_PHYS_ADDR_BITS 40 1935 # else 1936 # define TCG_PHYS_ADDR_BITS 36 1937 # endif 1938 1939 #define PHYS_ADDR_MASK MAKE_64BIT_MASK(0, TCG_PHYS_ADDR_BITS) 1940 1941 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU 1942 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX) 1943 #define CPU_RESOLVING_TYPE TYPE_X86_CPU 1944 1945 #ifdef TARGET_X86_64 1946 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64") 1947 #else 1948 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32") 1949 #endif 1950 1951 #define cpu_signal_handler cpu_x86_signal_handler 1952 #define cpu_list x86_cpu_list 1953 1954 /* MMU modes definitions */ 1955 #define MMU_MODE0_SUFFIX _ksmap 1956 #define MMU_MODE1_SUFFIX _user 1957 #define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */ 1958 #define MMU_KSMAP_IDX 0 1959 #define MMU_USER_IDX 1 1960 #define MMU_KNOSMAP_IDX 2 1961 static inline int cpu_mmu_index(CPUX86State *env, bool ifetch) 1962 { 1963 return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : 1964 (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK)) 1965 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; 1966 } 1967 1968 static inline int cpu_mmu_index_kernel(CPUX86State *env) 1969 { 1970 return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX : 1971 ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) 1972 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; 1973 } 1974 1975 #define CC_DST (env->cc_dst) 1976 #define CC_SRC (env->cc_src) 1977 #define CC_SRC2 (env->cc_src2) 1978 #define CC_OP (env->cc_op) 1979 1980 /* n must be a constant to be efficient */ 1981 static inline target_long lshift(target_long x, int n) 1982 { 1983 if (n >= 0) { 1984 return x << n; 1985 } else { 1986 return x >> (-n); 1987 } 1988 } 1989 1990 /* float macros */ 1991 #define FT0 (env->ft0) 1992 #define ST0 (env->fpregs[env->fpstt].d) 1993 #define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) 1994 #define ST1 ST(1) 1995 1996 /* translate.c */ 1997 void tcg_x86_init(void); 1998 1999 typedef CPUX86State CPUArchState; 2000 typedef X86CPU ArchCPU; 2001 2002 #include "exec/cpu-all.h" 2003 #include "svm.h" 2004 2005 #if !defined(CONFIG_USER_ONLY) 2006 #include "hw/i386/apic.h" 2007 #endif 2008 2009 static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc, 2010 target_ulong *cs_base, uint32_t *flags) 2011 { 2012 *cs_base = env->segs[R_CS].base; 2013 *pc = *cs_base + env->eip; 2014 *flags = env->hflags | 2015 (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); 2016 } 2017 2018 void do_cpu_init(X86CPU *cpu); 2019 void do_cpu_sipi(X86CPU *cpu); 2020 2021 #define MCE_INJECT_BROADCAST 1 2022 #define MCE_INJECT_UNCOND_AO 2 2023 2024 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, 2025 uint64_t status, uint64_t mcg_status, uint64_t addr, 2026 uint64_t misc, int flags); 2027 2028 /* excp_helper.c */ 2029 void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index); 2030 void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index, 2031 uintptr_t retaddr); 2032 void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, 2033 int error_code); 2034 void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index, 2035 int error_code, uintptr_t retaddr); 2036 void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int, 2037 int error_code, int next_eip_addend); 2038 2039 /* cc_helper.c */ 2040 extern const uint8_t parity_table[256]; 2041 uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); 2042 2043 static inline uint32_t cpu_compute_eflags(CPUX86State *env) 2044 { 2045 uint32_t eflags = env->eflags; 2046 if (tcg_enabled()) { 2047 eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK); 2048 } 2049 return eflags; 2050 } 2051 2052 /* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS 2053 * after generating a call to a helper that uses this. 2054 */ 2055 static inline void cpu_load_eflags(CPUX86State *env, int eflags, 2056 int update_mask) 2057 { 2058 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 2059 CC_OP = CC_OP_EFLAGS; 2060 env->df = 1 - (2 * ((eflags >> 10) & 1)); 2061 env->eflags = (env->eflags & ~update_mask) | 2062 (eflags & update_mask) | 0x2; 2063 } 2064 2065 /* load efer and update the corresponding hflags. XXX: do consistency 2066 checks with cpuid bits? */ 2067 static inline void cpu_load_efer(CPUX86State *env, uint64_t val) 2068 { 2069 env->efer = val; 2070 env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); 2071 if (env->efer & MSR_EFER_LMA) { 2072 env->hflags |= HF_LMA_MASK; 2073 } 2074 if (env->efer & MSR_EFER_SVME) { 2075 env->hflags |= HF_SVME_MASK; 2076 } 2077 } 2078 2079 static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env) 2080 { 2081 return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 }); 2082 } 2083 2084 static inline int32_t x86_get_a20_mask(CPUX86State *env) 2085 { 2086 if (env->hflags & HF_SMM_MASK) { 2087 return -1; 2088 } else { 2089 return env->a20_mask; 2090 } 2091 } 2092 2093 static inline bool cpu_has_vmx(CPUX86State *env) 2094 { 2095 return env->features[FEAT_1_ECX] & CPUID_EXT_VMX; 2096 } 2097 2098 /* 2099 * In order for a vCPU to enter VMX operation it must have CR4.VMXE set. 2100 * Since it was set, CR4.VMXE must remain set as long as vCPU is in 2101 * VMX operation. This is because CR4.VMXE is one of the bits set 2102 * in MSR_IA32_VMX_CR4_FIXED1. 2103 * 2104 * There is one exception to above statement when vCPU enters SMM mode. 2105 * When a vCPU enters SMM mode, it temporarily exit VMX operation and 2106 * may also reset CR4.VMXE during execution in SMM mode. 2107 * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation 2108 * and CR4.VMXE is restored to it's original value of being set. 2109 * 2110 * Therefore, when vCPU is not in SMM mode, we can infer whether 2111 * VMX is being used by examining CR4.VMXE. Otherwise, we cannot 2112 * know for certain. 2113 */ 2114 static inline bool cpu_vmx_maybe_enabled(CPUX86State *env) 2115 { 2116 return cpu_has_vmx(env) && 2117 ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK)); 2118 } 2119 2120 /* fpu_helper.c */ 2121 void update_fp_status(CPUX86State *env); 2122 void update_mxcsr_status(CPUX86State *env); 2123 2124 static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr) 2125 { 2126 env->mxcsr = mxcsr; 2127 if (tcg_enabled()) { 2128 update_mxcsr_status(env); 2129 } 2130 } 2131 2132 static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc) 2133 { 2134 env->fpuc = fpuc; 2135 if (tcg_enabled()) { 2136 update_fp_status(env); 2137 } 2138 } 2139 2140 /* mem_helper.c */ 2141 void helper_lock_init(void); 2142 2143 /* svm_helper.c */ 2144 void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, 2145 uint64_t param, uintptr_t retaddr); 2146 void QEMU_NORETURN cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, 2147 uint64_t exit_info_1, uintptr_t retaddr); 2148 void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1); 2149 2150 /* seg_helper.c */ 2151 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw); 2152 2153 /* smm_helper.c */ 2154 void do_smm_enter(X86CPU *cpu); 2155 2156 /* apic.c */ 2157 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); 2158 void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip, 2159 TPRAccess access); 2160 2161 2162 /* Change the value of a KVM-specific default 2163 * 2164 * If value is NULL, no default will be set and the original 2165 * value from the CPU model table will be kept. 2166 * 2167 * It is valid to call this function only for properties that 2168 * are already present in the kvm_default_props table. 2169 */ 2170 void x86_cpu_change_kvm_default(const char *prop, const char *value); 2171 2172 /* Special values for X86CPUVersion: */ 2173 2174 /* Resolve to latest CPU version */ 2175 #define CPU_VERSION_LATEST -1 2176 2177 /* 2178 * Resolve to version defined by current machine type. 2179 * See x86_cpu_set_default_version() 2180 */ 2181 #define CPU_VERSION_AUTO -2 2182 2183 /* Don't resolve to any versioned CPU models, like old QEMU versions */ 2184 #define CPU_VERSION_LEGACY 0 2185 2186 typedef int X86CPUVersion; 2187 2188 /* 2189 * Set default CPU model version for CPU models having 2190 * version == CPU_VERSION_AUTO. 2191 */ 2192 void x86_cpu_set_default_version(X86CPUVersion version); 2193 2194 /* Return name of 32-bit register, from a R_* constant */ 2195 const char *get_register_name_32(unsigned int reg); 2196 2197 void enable_compat_apic_id_mode(void); 2198 2199 #define APIC_DEFAULT_ADDRESS 0xfee00000 2200 #define APIC_SPACE_SIZE 0x100000 2201 2202 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags); 2203 2204 /* cpu.c */ 2205 bool cpu_is_bsp(X86CPU *cpu); 2206 2207 void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf); 2208 void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf); 2209 void x86_update_hflags(CPUX86State* env); 2210 2211 static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat) 2212 { 2213 return !!(cpu->hyperv_features & BIT(feat)); 2214 } 2215 2216 #endif /* I386_CPU_H */ 2217