1 /* 2 * i386 virtual CPU header 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef I386_CPU_H 21 #define I386_CPU_H 22 23 #include "sysemu/tcg.h" 24 #include "cpu-qom.h" 25 #include "kvm/hyperv-proto.h" 26 #include "exec/cpu-defs.h" 27 #include "hw/i386/topology.h" 28 #include "qapi/qapi-types-common.h" 29 #include "qemu/cpu-float.h" 30 #include "qemu/timer.h" 31 32 #define XEN_NR_VIRQS 24 33 34 #define KVM_HAVE_MCE_INJECTION 1 35 36 /* support for self modifying code even if the modified instruction is 37 close to the modifying instruction */ 38 #define TARGET_HAS_PRECISE_SMC 39 40 #ifdef TARGET_X86_64 41 #define I386_ELF_MACHINE EM_X86_64 42 #define ELF_MACHINE_UNAME "x86_64" 43 #else 44 #define I386_ELF_MACHINE EM_386 45 #define ELF_MACHINE_UNAME "i686" 46 #endif 47 48 enum { 49 R_EAX = 0, 50 R_ECX = 1, 51 R_EDX = 2, 52 R_EBX = 3, 53 R_ESP = 4, 54 R_EBP = 5, 55 R_ESI = 6, 56 R_EDI = 7, 57 R_R8 = 8, 58 R_R9 = 9, 59 R_R10 = 10, 60 R_R11 = 11, 61 R_R12 = 12, 62 R_R13 = 13, 63 R_R14 = 14, 64 R_R15 = 15, 65 66 R_AL = 0, 67 R_CL = 1, 68 R_DL = 2, 69 R_BL = 3, 70 R_AH = 4, 71 R_CH = 5, 72 R_DH = 6, 73 R_BH = 7, 74 }; 75 76 typedef enum X86Seg { 77 R_ES = 0, 78 R_CS = 1, 79 R_SS = 2, 80 R_DS = 3, 81 R_FS = 4, 82 R_GS = 5, 83 R_LDTR = 6, 84 R_TR = 7, 85 } X86Seg; 86 87 /* segment descriptor fields */ 88 #define DESC_G_SHIFT 23 89 #define DESC_G_MASK (1 << DESC_G_SHIFT) 90 #define DESC_B_SHIFT 22 91 #define DESC_B_MASK (1 << DESC_B_SHIFT) 92 #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ 93 #define DESC_L_MASK (1 << DESC_L_SHIFT) 94 #define DESC_AVL_SHIFT 20 95 #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT) 96 #define DESC_P_SHIFT 15 97 #define DESC_P_MASK (1 << DESC_P_SHIFT) 98 #define DESC_DPL_SHIFT 13 99 #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) 100 #define DESC_S_SHIFT 12 101 #define DESC_S_MASK (1 << DESC_S_SHIFT) 102 #define DESC_TYPE_SHIFT 8 103 #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) 104 #define DESC_A_MASK (1 << 8) 105 106 #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ 107 #define DESC_C_MASK (1 << 10) /* code: conforming */ 108 #define DESC_R_MASK (1 << 9) /* code: readable */ 109 110 #define DESC_E_MASK (1 << 10) /* data: expansion direction */ 111 #define DESC_W_MASK (1 << 9) /* data: writable */ 112 113 #define DESC_TSS_BUSY_MASK (1 << 9) 114 115 /* eflags masks */ 116 #define CC_C 0x0001 117 #define CC_P 0x0004 118 #define CC_A 0x0010 119 #define CC_Z 0x0040 120 #define CC_S 0x0080 121 #define CC_O 0x0800 122 123 #define TF_SHIFT 8 124 #define IOPL_SHIFT 12 125 #define VM_SHIFT 17 126 127 #define TF_MASK 0x00000100 128 #define IF_MASK 0x00000200 129 #define DF_MASK 0x00000400 130 #define IOPL_MASK 0x00003000 131 #define NT_MASK 0x00004000 132 #define RF_MASK 0x00010000 133 #define VM_MASK 0x00020000 134 #define AC_MASK 0x00040000 135 #define VIF_MASK 0x00080000 136 #define VIP_MASK 0x00100000 137 #define ID_MASK 0x00200000 138 139 /* hidden flags - used internally by qemu to represent additional cpu 140 states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We 141 avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit 142 positions to ease oring with eflags. */ 143 /* current cpl */ 144 #define HF_CPL_SHIFT 0 145 /* true if hardware interrupts must be disabled for next instruction */ 146 #define HF_INHIBIT_IRQ_SHIFT 3 147 /* 16 or 32 segments */ 148 #define HF_CS32_SHIFT 4 149 #define HF_SS32_SHIFT 5 150 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ 151 #define HF_ADDSEG_SHIFT 6 152 /* copy of CR0.PE (protected mode) */ 153 #define HF_PE_SHIFT 7 154 #define HF_TF_SHIFT 8 /* must be same as eflags */ 155 #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ 156 #define HF_EM_SHIFT 10 157 #define HF_TS_SHIFT 11 158 #define HF_IOPL_SHIFT 12 /* must be same as eflags */ 159 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ 160 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ 161 #define HF_RF_SHIFT 16 /* must be same as eflags */ 162 #define HF_VM_SHIFT 17 /* must be same as eflags */ 163 #define HF_AC_SHIFT 18 /* must be same as eflags */ 164 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ 165 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ 166 #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */ 167 #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ 168 #define HF_SMAP_SHIFT 23 /* CR4.SMAP */ 169 #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */ 170 #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */ 171 #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */ 172 #define HF_UMIP_SHIFT 27 /* CR4.UMIP */ 173 #define HF_AVX_EN_SHIFT 28 /* AVX Enabled (CR4+XCR0) */ 174 175 #define HF_CPL_MASK (3 << HF_CPL_SHIFT) 176 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) 177 #define HF_CS32_MASK (1 << HF_CS32_SHIFT) 178 #define HF_SS32_MASK (1 << HF_SS32_SHIFT) 179 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) 180 #define HF_PE_MASK (1 << HF_PE_SHIFT) 181 #define HF_TF_MASK (1 << HF_TF_SHIFT) 182 #define HF_MP_MASK (1 << HF_MP_SHIFT) 183 #define HF_EM_MASK (1 << HF_EM_SHIFT) 184 #define HF_TS_MASK (1 << HF_TS_SHIFT) 185 #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) 186 #define HF_LMA_MASK (1 << HF_LMA_SHIFT) 187 #define HF_CS64_MASK (1 << HF_CS64_SHIFT) 188 #define HF_RF_MASK (1 << HF_RF_SHIFT) 189 #define HF_VM_MASK (1 << HF_VM_SHIFT) 190 #define HF_AC_MASK (1 << HF_AC_SHIFT) 191 #define HF_SMM_MASK (1 << HF_SMM_SHIFT) 192 #define HF_SVME_MASK (1 << HF_SVME_SHIFT) 193 #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT) 194 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) 195 #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) 196 #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT) 197 #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT) 198 #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT) 199 #define HF_UMIP_MASK (1 << HF_UMIP_SHIFT) 200 #define HF_AVX_EN_MASK (1 << HF_AVX_EN_SHIFT) 201 202 /* hflags2 */ 203 204 #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ 205 #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ 206 #define HF2_NMI_SHIFT 2 /* CPU serving NMI */ 207 #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ 208 #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */ 209 #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */ 210 #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */ 211 #define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */ 212 #define HF2_VGIF_SHIFT 8 /* Can take VIRQ*/ 213 214 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) 215 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) 216 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) 217 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) 218 #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT) 219 #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT) 220 #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT) 221 #define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT) 222 #define HF2_VGIF_MASK (1 << HF2_VGIF_SHIFT) 223 224 #define CR0_PE_SHIFT 0 225 #define CR0_MP_SHIFT 1 226 227 #define CR0_PE_MASK (1U << 0) 228 #define CR0_MP_MASK (1U << 1) 229 #define CR0_EM_MASK (1U << 2) 230 #define CR0_TS_MASK (1U << 3) 231 #define CR0_ET_MASK (1U << 4) 232 #define CR0_NE_MASK (1U << 5) 233 #define CR0_WP_MASK (1U << 16) 234 #define CR0_AM_MASK (1U << 18) 235 #define CR0_NW_MASK (1U << 29) 236 #define CR0_CD_MASK (1U << 30) 237 #define CR0_PG_MASK (1U << 31) 238 239 #define CR4_VME_MASK (1U << 0) 240 #define CR4_PVI_MASK (1U << 1) 241 #define CR4_TSD_MASK (1U << 2) 242 #define CR4_DE_MASK (1U << 3) 243 #define CR4_PSE_MASK (1U << 4) 244 #define CR4_PAE_MASK (1U << 5) 245 #define CR4_MCE_MASK (1U << 6) 246 #define CR4_PGE_MASK (1U << 7) 247 #define CR4_PCE_MASK (1U << 8) 248 #define CR4_OSFXSR_SHIFT 9 249 #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT) 250 #define CR4_OSXMMEXCPT_MASK (1U << 10) 251 #define CR4_UMIP_MASK (1U << 11) 252 #define CR4_LA57_MASK (1U << 12) 253 #define CR4_VMXE_MASK (1U << 13) 254 #define CR4_SMXE_MASK (1U << 14) 255 #define CR4_FSGSBASE_MASK (1U << 16) 256 #define CR4_PCIDE_MASK (1U << 17) 257 #define CR4_OSXSAVE_MASK (1U << 18) 258 #define CR4_SMEP_MASK (1U << 20) 259 #define CR4_SMAP_MASK (1U << 21) 260 #define CR4_PKE_MASK (1U << 22) 261 #define CR4_PKS_MASK (1U << 24) 262 #define CR4_LAM_SUP_MASK (1U << 28) 263 264 #ifdef TARGET_X86_64 265 #define CR4_FRED_MASK (1ULL << 32) 266 #else 267 #define CR4_FRED_MASK 0 268 #endif 269 270 #ifdef TARGET_X86_64 271 #define CR4_FRED_MASK (1ULL << 32) 272 #else 273 #define CR4_FRED_MASK 0 274 #endif 275 276 #define CR4_RESERVED_MASK \ 277 (~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \ 278 | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \ 279 | CR4_MCE_MASK | CR4_PGE_MASK | CR4_PCE_MASK \ 280 | CR4_OSFXSR_MASK | CR4_OSXMMEXCPT_MASK | CR4_UMIP_MASK \ 281 | CR4_LA57_MASK \ 282 | CR4_FSGSBASE_MASK | CR4_PCIDE_MASK | CR4_OSXSAVE_MASK \ 283 | CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_PKE_MASK | CR4_PKS_MASK \ 284 | CR4_LAM_SUP_MASK | CR4_FRED_MASK)) 285 286 #define DR6_BD (1 << 13) 287 #define DR6_BS (1 << 14) 288 #define DR6_BT (1 << 15) 289 #define DR6_FIXED_1 0xffff0ff0 290 291 #define DR7_GD (1 << 13) 292 #define DR7_TYPE_SHIFT 16 293 #define DR7_LEN_SHIFT 18 294 #define DR7_FIXED_1 0x00000400 295 #define DR7_GLOBAL_BP_MASK 0xaa 296 #define DR7_LOCAL_BP_MASK 0x55 297 #define DR7_MAX_BP 4 298 #define DR7_TYPE_BP_INST 0x0 299 #define DR7_TYPE_DATA_WR 0x1 300 #define DR7_TYPE_IO_RW 0x2 301 #define DR7_TYPE_DATA_RW 0x3 302 303 #define DR_RESERVED_MASK 0xffffffff00000000ULL 304 305 #define PG_PRESENT_BIT 0 306 #define PG_RW_BIT 1 307 #define PG_USER_BIT 2 308 #define PG_PWT_BIT 3 309 #define PG_PCD_BIT 4 310 #define PG_ACCESSED_BIT 5 311 #define PG_DIRTY_BIT 6 312 #define PG_PSE_BIT 7 313 #define PG_GLOBAL_BIT 8 314 #define PG_PSE_PAT_BIT 12 315 #define PG_PKRU_BIT 59 316 #define PG_NX_BIT 63 317 318 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) 319 #define PG_RW_MASK (1 << PG_RW_BIT) 320 #define PG_USER_MASK (1 << PG_USER_BIT) 321 #define PG_PWT_MASK (1 << PG_PWT_BIT) 322 #define PG_PCD_MASK (1 << PG_PCD_BIT) 323 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) 324 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) 325 #define PG_PSE_MASK (1 << PG_PSE_BIT) 326 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) 327 #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT) 328 #define PG_ADDRESS_MASK 0x000ffffffffff000LL 329 #define PG_HI_USER_MASK 0x7ff0000000000000LL 330 #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT) 331 #define PG_NX_MASK (1ULL << PG_NX_BIT) 332 333 #define PG_ERROR_W_BIT 1 334 335 #define PG_ERROR_P_MASK 0x01 336 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) 337 #define PG_ERROR_U_MASK 0x04 338 #define PG_ERROR_RSVD_MASK 0x08 339 #define PG_ERROR_I_D_MASK 0x10 340 #define PG_ERROR_PK_MASK 0x20 341 342 #define PG_MODE_PAE (1 << 0) 343 #define PG_MODE_LMA (1 << 1) 344 #define PG_MODE_NXE (1 << 2) 345 #define PG_MODE_PSE (1 << 3) 346 #define PG_MODE_LA57 (1 << 4) 347 #define PG_MODE_SVM_MASK MAKE_64BIT_MASK(0, 15) 348 349 /* Bits of CR4 that do not affect the NPT page format. */ 350 #define PG_MODE_WP (1 << 16) 351 #define PG_MODE_PKE (1 << 17) 352 #define PG_MODE_PKS (1 << 18) 353 #define PG_MODE_SMEP (1 << 19) 354 355 #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ 356 #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ 357 #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */ 358 359 #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) 360 #define MCE_BANKS_DEF 10 361 362 #define MCG_CAP_BANKS_MASK 0xff 363 364 #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ 365 #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ 366 #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ 367 #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */ 368 369 #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */ 370 371 #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ 372 #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ 373 #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ 374 #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ 375 #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ 376 #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ 377 #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ 378 #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ 379 #define MCI_STATUS_AR (1ULL<<55) /* Action required */ 380 #define MCI_STATUS_DEFERRED (1ULL<<44) /* Deferred error */ 381 #define MCI_STATUS_POISON (1ULL<<43) /* Poisoned data consumed */ 382 383 /* MISC register defines */ 384 #define MCM_ADDR_SEGOFF 0 /* segment offset */ 385 #define MCM_ADDR_LINEAR 1 /* linear address */ 386 #define MCM_ADDR_PHYS 2 /* physical address */ 387 #define MCM_ADDR_MEM 3 /* memory address */ 388 #define MCM_ADDR_GENERIC 7 /* generic */ 389 390 #define MSR_IA32_TSC 0x10 391 #define MSR_IA32_APICBASE 0x1b 392 #define MSR_IA32_APICBASE_BSP (1<<8) 393 #define MSR_IA32_APICBASE_ENABLE (1<<11) 394 #define MSR_IA32_APICBASE_EXTD (1 << 10) 395 #define MSR_IA32_APICBASE_BASE (0xfffffU<<12) 396 #define MSR_IA32_APICBASE_RESERVED \ 397 (~(uint64_t)(MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE \ 398 | MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_BASE)) 399 400 #define MSR_IA32_FEATURE_CONTROL 0x0000003a 401 #define MSR_TSC_ADJUST 0x0000003b 402 #define MSR_IA32_SPEC_CTRL 0x48 403 #define MSR_VIRT_SSBD 0xc001011f 404 #define MSR_IA32_PRED_CMD 0x49 405 #define MSR_IA32_UCODE_REV 0x8b 406 #define MSR_IA32_CORE_CAPABILITY 0xcf 407 408 #define MSR_IA32_ARCH_CAPABILITIES 0x10a 409 #define ARCH_CAP_TSX_CTRL_MSR (1<<7) 410 411 #define MSR_IA32_PERF_CAPABILITIES 0x345 412 #define PERF_CAP_LBR_FMT 0x3f 413 414 #define MSR_IA32_TSX_CTRL 0x122 415 #define MSR_IA32_TSCDEADLINE 0x6e0 416 #define MSR_IA32_PKRS 0x6e1 417 #define MSR_ARCH_LBR_CTL 0x000014ce 418 #define MSR_ARCH_LBR_DEPTH 0x000014cf 419 #define MSR_ARCH_LBR_FROM_0 0x00001500 420 #define MSR_ARCH_LBR_TO_0 0x00001600 421 #define MSR_ARCH_LBR_INFO_0 0x00001200 422 423 #define FEATURE_CONTROL_LOCKED (1<<0) 424 #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1) 425 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) 426 #define FEATURE_CONTROL_SGX_LC (1ULL << 17) 427 #define FEATURE_CONTROL_SGX (1ULL << 18) 428 #define FEATURE_CONTROL_LMCE (1<<20) 429 430 #define MSR_IA32_SGXLEPUBKEYHASH0 0x8c 431 #define MSR_IA32_SGXLEPUBKEYHASH1 0x8d 432 #define MSR_IA32_SGXLEPUBKEYHASH2 0x8e 433 #define MSR_IA32_SGXLEPUBKEYHASH3 0x8f 434 435 #define MSR_P6_PERFCTR0 0xc1 436 437 #define MSR_IA32_SMBASE 0x9e 438 #define MSR_SMI_COUNT 0x34 439 #define MSR_CORE_THREAD_COUNT 0x35 440 #define MSR_MTRRcap 0xfe 441 #define MSR_MTRRcap_VCNT 8 442 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) 443 #define MSR_MTRRcap_WC_SUPPORTED (1 << 10) 444 445 #define MSR_IA32_SYSENTER_CS 0x174 446 #define MSR_IA32_SYSENTER_ESP 0x175 447 #define MSR_IA32_SYSENTER_EIP 0x176 448 449 #define MSR_MCG_CAP 0x179 450 #define MSR_MCG_STATUS 0x17a 451 #define MSR_MCG_CTL 0x17b 452 #define MSR_MCG_EXT_CTL 0x4d0 453 454 #define MSR_P6_EVNTSEL0 0x186 455 456 #define MSR_IA32_PERF_STATUS 0x198 457 458 #define MSR_IA32_MISC_ENABLE 0x1a0 459 /* Indicates good rep/movs microcode on some processors: */ 460 #define MSR_IA32_MISC_ENABLE_DEFAULT 1 461 #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) 462 463 #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) 464 #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) 465 466 #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2) 467 468 #define MSR_MTRRfix64K_00000 0x250 469 #define MSR_MTRRfix16K_80000 0x258 470 #define MSR_MTRRfix16K_A0000 0x259 471 #define MSR_MTRRfix4K_C0000 0x268 472 #define MSR_MTRRfix4K_C8000 0x269 473 #define MSR_MTRRfix4K_D0000 0x26a 474 #define MSR_MTRRfix4K_D8000 0x26b 475 #define MSR_MTRRfix4K_E0000 0x26c 476 #define MSR_MTRRfix4K_E8000 0x26d 477 #define MSR_MTRRfix4K_F0000 0x26e 478 #define MSR_MTRRfix4K_F8000 0x26f 479 480 #define MSR_PAT 0x277 481 482 #define MSR_MTRRdefType 0x2ff 483 484 #define MSR_CORE_PERF_FIXED_CTR0 0x309 485 #define MSR_CORE_PERF_FIXED_CTR1 0x30a 486 #define MSR_CORE_PERF_FIXED_CTR2 0x30b 487 #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d 488 #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e 489 #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f 490 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 491 492 #define MSR_MC0_CTL 0x400 493 #define MSR_MC0_STATUS 0x401 494 #define MSR_MC0_ADDR 0x402 495 #define MSR_MC0_MISC 0x403 496 497 #define MSR_IA32_RTIT_OUTPUT_BASE 0x560 498 #define MSR_IA32_RTIT_OUTPUT_MASK 0x561 499 #define MSR_IA32_RTIT_CTL 0x570 500 #define MSR_IA32_RTIT_STATUS 0x571 501 #define MSR_IA32_RTIT_CR3_MATCH 0x572 502 #define MSR_IA32_RTIT_ADDR0_A 0x580 503 #define MSR_IA32_RTIT_ADDR0_B 0x581 504 #define MSR_IA32_RTIT_ADDR1_A 0x582 505 #define MSR_IA32_RTIT_ADDR1_B 0x583 506 #define MSR_IA32_RTIT_ADDR2_A 0x584 507 #define MSR_IA32_RTIT_ADDR2_B 0x585 508 #define MSR_IA32_RTIT_ADDR3_A 0x586 509 #define MSR_IA32_RTIT_ADDR3_B 0x587 510 #define MAX_RTIT_ADDRS 8 511 512 #define MSR_EFER 0xc0000080 513 514 #define MSR_EFER_SCE (1 << 0) 515 #define MSR_EFER_LME (1 << 8) 516 #define MSR_EFER_LMA (1 << 10) 517 #define MSR_EFER_NXE (1 << 11) 518 #define MSR_EFER_SVME (1 << 12) 519 #define MSR_EFER_FFXSR (1 << 14) 520 521 #define MSR_EFER_RESERVED\ 522 (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\ 523 | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\ 524 | MSR_EFER_FFXSR)) 525 526 #define MSR_STAR 0xc0000081 527 #define MSR_LSTAR 0xc0000082 528 #define MSR_CSTAR 0xc0000083 529 #define MSR_FMASK 0xc0000084 530 #define MSR_FSBASE 0xc0000100 531 #define MSR_GSBASE 0xc0000101 532 #define MSR_KERNELGSBASE 0xc0000102 533 #define MSR_TSC_AUX 0xc0000103 534 #define MSR_AMD64_TSC_RATIO 0xc0000104 535 536 #define MSR_AMD64_TSC_RATIO_DEFAULT 0x100000000ULL 537 538 #define MSR_VM_HSAVE_PA 0xc0010117 539 540 #define MSR_IA32_XFD 0x000001c4 541 #define MSR_IA32_XFD_ERR 0x000001c5 542 543 /* FRED MSRs */ 544 #define MSR_IA32_FRED_RSP0 0x000001cc /* Stack level 0 regular stack pointer */ 545 #define MSR_IA32_FRED_RSP1 0x000001cd /* Stack level 1 regular stack pointer */ 546 #define MSR_IA32_FRED_RSP2 0x000001ce /* Stack level 2 regular stack pointer */ 547 #define MSR_IA32_FRED_RSP3 0x000001cf /* Stack level 3 regular stack pointer */ 548 #define MSR_IA32_FRED_STKLVLS 0x000001d0 /* FRED exception stack levels */ 549 #define MSR_IA32_FRED_SSP1 0x000001d1 /* Stack level 1 shadow stack pointer in ring 0 */ 550 #define MSR_IA32_FRED_SSP2 0x000001d2 /* Stack level 2 shadow stack pointer in ring 0 */ 551 #define MSR_IA32_FRED_SSP3 0x000001d3 /* Stack level 3 shadow stack pointer in ring 0 */ 552 #define MSR_IA32_FRED_CONFIG 0x000001d4 /* FRED Entrypoint and interrupt stack level */ 553 554 #define MSR_IA32_BNDCFGS 0x00000d90 555 #define MSR_IA32_XSS 0x00000da0 556 #define MSR_IA32_UMWAIT_CONTROL 0xe1 557 558 #define MSR_IA32_VMX_BASIC 0x00000480 559 #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 560 #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482 561 #define MSR_IA32_VMX_EXIT_CTLS 0x00000483 562 #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484 563 #define MSR_IA32_VMX_MISC 0x00000485 564 #define MSR_IA32_VMX_CR0_FIXED0 0x00000486 565 #define MSR_IA32_VMX_CR0_FIXED1 0x00000487 566 #define MSR_IA32_VMX_CR4_FIXED0 0x00000488 567 #define MSR_IA32_VMX_CR4_FIXED1 0x00000489 568 #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a 569 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b 570 #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c 571 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d 572 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e 573 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f 574 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 575 #define MSR_IA32_VMX_VMFUNC 0x00000491 576 577 #define MSR_APIC_START 0x00000800 578 #define MSR_APIC_END 0x000008ff 579 580 #define XSTATE_FP_BIT 0 581 #define XSTATE_SSE_BIT 1 582 #define XSTATE_YMM_BIT 2 583 #define XSTATE_BNDREGS_BIT 3 584 #define XSTATE_BNDCSR_BIT 4 585 #define XSTATE_OPMASK_BIT 5 586 #define XSTATE_ZMM_Hi256_BIT 6 587 #define XSTATE_Hi16_ZMM_BIT 7 588 #define XSTATE_PKRU_BIT 9 589 #define XSTATE_ARCH_LBR_BIT 15 590 #define XSTATE_XTILE_CFG_BIT 17 591 #define XSTATE_XTILE_DATA_BIT 18 592 593 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT) 594 #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT) 595 #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT) 596 #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT) 597 #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT) 598 #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT) 599 #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT) 600 #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) 601 #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) 602 #define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT) 603 #define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT) 604 #define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT) 605 606 #define XSTATE_DYNAMIC_MASK (XSTATE_XTILE_DATA_MASK) 607 608 #define ESA_FEATURE_ALIGN64_BIT 1 609 #define ESA_FEATURE_XFD_BIT 2 610 611 #define ESA_FEATURE_ALIGN64_MASK (1U << ESA_FEATURE_ALIGN64_BIT) 612 #define ESA_FEATURE_XFD_MASK (1U << ESA_FEATURE_XFD_BIT) 613 614 615 /* CPUID feature bits available in XCR0 */ 616 #define CPUID_XSTATE_XCR0_MASK (XSTATE_FP_MASK | XSTATE_SSE_MASK | \ 617 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | \ 618 XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK | \ 619 XSTATE_ZMM_Hi256_MASK | \ 620 XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \ 621 XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK) 622 623 /* CPUID feature words */ 624 typedef enum FeatureWord { 625 FEAT_1_EDX, /* CPUID[1].EDX */ 626 FEAT_1_ECX, /* CPUID[1].ECX */ 627 FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ 628 FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */ 629 FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */ 630 FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */ 631 FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ 632 FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ 633 FEAT_8000_0007_EBX, /* CPUID[8000_0007].EBX */ 634 FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ 635 FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ 636 FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */ 637 FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ 638 FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ 639 FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */ 640 FEAT_SVM, /* CPUID[8000_000A].EDX */ 641 FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */ 642 FEAT_6_EAX, /* CPUID[6].EAX */ 643 FEAT_XSAVE_XCR0_LO, /* CPUID[EAX=0xd,ECX=0].EAX */ 644 FEAT_XSAVE_XCR0_HI, /* CPUID[EAX=0xd,ECX=0].EDX */ 645 FEAT_ARCH_CAPABILITIES, 646 FEAT_CORE_CAPABILITY, 647 FEAT_PERF_CAPABILITIES, 648 FEAT_VMX_PROCBASED_CTLS, 649 FEAT_VMX_SECONDARY_CTLS, 650 FEAT_VMX_PINBASED_CTLS, 651 FEAT_VMX_EXIT_CTLS, 652 FEAT_VMX_ENTRY_CTLS, 653 FEAT_VMX_MISC, 654 FEAT_VMX_EPT_VPID_CAPS, 655 FEAT_VMX_BASIC, 656 FEAT_VMX_VMFUNC, 657 FEAT_14_0_ECX, 658 FEAT_SGX_12_0_EAX, /* CPUID[EAX=0x12,ECX=0].EAX (SGX) */ 659 FEAT_SGX_12_0_EBX, /* CPUID[EAX=0x12,ECX=0].EBX (SGX MISCSELECT[31:0]) */ 660 FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */ 661 FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */ 662 FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */ 663 FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */ 664 FEAT_7_2_EDX, /* CPUID[EAX=7,ECX=2].EDX */ 665 FEATURE_WORDS, 666 } FeatureWord; 667 668 typedef uint64_t FeatureWordArray[FEATURE_WORDS]; 669 uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 670 bool migratable_only); 671 672 /* cpuid_features bits */ 673 #define CPUID_FP87 (1U << 0) 674 #define CPUID_VME (1U << 1) 675 #define CPUID_DE (1U << 2) 676 #define CPUID_PSE (1U << 3) 677 #define CPUID_TSC (1U << 4) 678 #define CPUID_MSR (1U << 5) 679 #define CPUID_PAE (1U << 6) 680 #define CPUID_MCE (1U << 7) 681 #define CPUID_CX8 (1U << 8) 682 #define CPUID_APIC (1U << 9) 683 #define CPUID_SEP (1U << 11) /* sysenter/sysexit */ 684 #define CPUID_MTRR (1U << 12) 685 #define CPUID_PGE (1U << 13) 686 #define CPUID_MCA (1U << 14) 687 #define CPUID_CMOV (1U << 15) 688 #define CPUID_PAT (1U << 16) 689 #define CPUID_PSE36 (1U << 17) 690 #define CPUID_PN (1U << 18) 691 #define CPUID_CLFLUSH (1U << 19) 692 #define CPUID_DTS (1U << 21) 693 #define CPUID_ACPI (1U << 22) 694 #define CPUID_MMX (1U << 23) 695 #define CPUID_FXSR (1U << 24) 696 #define CPUID_SSE (1U << 25) 697 #define CPUID_SSE2 (1U << 26) 698 #define CPUID_SS (1U << 27) 699 #define CPUID_HT (1U << 28) 700 #define CPUID_TM (1U << 29) 701 #define CPUID_IA64 (1U << 30) 702 #define CPUID_PBE (1U << 31) 703 704 #define CPUID_EXT_SSE3 (1U << 0) 705 #define CPUID_EXT_PCLMULQDQ (1U << 1) 706 #define CPUID_EXT_DTES64 (1U << 2) 707 #define CPUID_EXT_MONITOR (1U << 3) 708 #define CPUID_EXT_DSCPL (1U << 4) 709 #define CPUID_EXT_VMX (1U << 5) 710 #define CPUID_EXT_SMX (1U << 6) 711 #define CPUID_EXT_EST (1U << 7) 712 #define CPUID_EXT_TM2 (1U << 8) 713 #define CPUID_EXT_SSSE3 (1U << 9) 714 #define CPUID_EXT_CID (1U << 10) 715 #define CPUID_EXT_FMA (1U << 12) 716 #define CPUID_EXT_CX16 (1U << 13) 717 #define CPUID_EXT_XTPR (1U << 14) 718 #define CPUID_EXT_PDCM (1U << 15) 719 #define CPUID_EXT_PCID (1U << 17) 720 #define CPUID_EXT_DCA (1U << 18) 721 #define CPUID_EXT_SSE41 (1U << 19) 722 #define CPUID_EXT_SSE42 (1U << 20) 723 #define CPUID_EXT_X2APIC (1U << 21) 724 #define CPUID_EXT_MOVBE (1U << 22) 725 #define CPUID_EXT_POPCNT (1U << 23) 726 #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24) 727 #define CPUID_EXT_AES (1U << 25) 728 #define CPUID_EXT_XSAVE (1U << 26) 729 #define CPUID_EXT_OSXSAVE (1U << 27) 730 #define CPUID_EXT_AVX (1U << 28) 731 #define CPUID_EXT_F16C (1U << 29) 732 #define CPUID_EXT_RDRAND (1U << 30) 733 #define CPUID_EXT_HYPERVISOR (1U << 31) 734 735 #define CPUID_EXT2_FPU (1U << 0) 736 #define CPUID_EXT2_VME (1U << 1) 737 #define CPUID_EXT2_DE (1U << 2) 738 #define CPUID_EXT2_PSE (1U << 3) 739 #define CPUID_EXT2_TSC (1U << 4) 740 #define CPUID_EXT2_MSR (1U << 5) 741 #define CPUID_EXT2_PAE (1U << 6) 742 #define CPUID_EXT2_MCE (1U << 7) 743 #define CPUID_EXT2_CX8 (1U << 8) 744 #define CPUID_EXT2_APIC (1U << 9) 745 #define CPUID_EXT2_SYSCALL (1U << 11) 746 #define CPUID_EXT2_MTRR (1U << 12) 747 #define CPUID_EXT2_PGE (1U << 13) 748 #define CPUID_EXT2_MCA (1U << 14) 749 #define CPUID_EXT2_CMOV (1U << 15) 750 #define CPUID_EXT2_PAT (1U << 16) 751 #define CPUID_EXT2_PSE36 (1U << 17) 752 #define CPUID_EXT2_MP (1U << 19) 753 #define CPUID_EXT2_NX (1U << 20) 754 #define CPUID_EXT2_MMXEXT (1U << 22) 755 #define CPUID_EXT2_MMX (1U << 23) 756 #define CPUID_EXT2_FXSR (1U << 24) 757 #define CPUID_EXT2_FFXSR (1U << 25) 758 #define CPUID_EXT2_PDPE1GB (1U << 26) 759 #define CPUID_EXT2_RDTSCP (1U << 27) 760 #define CPUID_EXT2_LM (1U << 29) 761 #define CPUID_EXT2_3DNOWEXT (1U << 30) 762 #define CPUID_EXT2_3DNOW (1U << 31) 763 764 /* CPUID[8000_0001].EDX bits that are aliases of CPUID[1].EDX bits on AMD CPUs */ 765 #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \ 766 CPUID_EXT2_DE | CPUID_EXT2_PSE | \ 767 CPUID_EXT2_TSC | CPUID_EXT2_MSR | \ 768 CPUID_EXT2_PAE | CPUID_EXT2_MCE | \ 769 CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \ 770 CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \ 771 CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \ 772 CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \ 773 CPUID_EXT2_MMX | CPUID_EXT2_FXSR) 774 775 #define CPUID_EXT3_LAHF_LM (1U << 0) 776 #define CPUID_EXT3_CMP_LEG (1U << 1) 777 #define CPUID_EXT3_SVM (1U << 2) 778 #define CPUID_EXT3_EXTAPIC (1U << 3) 779 #define CPUID_EXT3_CR8LEG (1U << 4) 780 #define CPUID_EXT3_ABM (1U << 5) 781 #define CPUID_EXT3_SSE4A (1U << 6) 782 #define CPUID_EXT3_MISALIGNSSE (1U << 7) 783 #define CPUID_EXT3_3DNOWPREFETCH (1U << 8) 784 #define CPUID_EXT3_OSVW (1U << 9) 785 #define CPUID_EXT3_IBS (1U << 10) 786 #define CPUID_EXT3_XOP (1U << 11) 787 #define CPUID_EXT3_SKINIT (1U << 12) 788 #define CPUID_EXT3_WDT (1U << 13) 789 #define CPUID_EXT3_LWP (1U << 15) 790 #define CPUID_EXT3_FMA4 (1U << 16) 791 #define CPUID_EXT3_TCE (1U << 17) 792 #define CPUID_EXT3_NODEID (1U << 19) 793 #define CPUID_EXT3_TBM (1U << 21) 794 #define CPUID_EXT3_TOPOEXT (1U << 22) 795 #define CPUID_EXT3_PERFCORE (1U << 23) 796 #define CPUID_EXT3_PERFNB (1U << 24) 797 798 #define CPUID_SVM_NPT (1U << 0) 799 #define CPUID_SVM_LBRV (1U << 1) 800 #define CPUID_SVM_SVMLOCK (1U << 2) 801 #define CPUID_SVM_NRIPSAVE (1U << 3) 802 #define CPUID_SVM_TSCSCALE (1U << 4) 803 #define CPUID_SVM_VMCBCLEAN (1U << 5) 804 #define CPUID_SVM_FLUSHASID (1U << 6) 805 #define CPUID_SVM_DECODEASSIST (1U << 7) 806 #define CPUID_SVM_PAUSEFILTER (1U << 10) 807 #define CPUID_SVM_PFTHRESHOLD (1U << 12) 808 #define CPUID_SVM_AVIC (1U << 13) 809 #define CPUID_SVM_V_VMSAVE_VMLOAD (1U << 15) 810 #define CPUID_SVM_VGIF (1U << 16) 811 #define CPUID_SVM_VNMI (1U << 25) 812 #define CPUID_SVM_SVME_ADDR_CHK (1U << 28) 813 814 /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ 815 #define CPUID_7_0_EBX_FSGSBASE (1U << 0) 816 /* Support SGX */ 817 #define CPUID_7_0_EBX_SGX (1U << 2) 818 /* 1st Group of Advanced Bit Manipulation Extensions */ 819 #define CPUID_7_0_EBX_BMI1 (1U << 3) 820 /* Hardware Lock Elision */ 821 #define CPUID_7_0_EBX_HLE (1U << 4) 822 /* Intel Advanced Vector Extensions 2 */ 823 #define CPUID_7_0_EBX_AVX2 (1U << 5) 824 /* Supervisor-mode Execution Prevention */ 825 #define CPUID_7_0_EBX_SMEP (1U << 7) 826 /* 2nd Group of Advanced Bit Manipulation Extensions */ 827 #define CPUID_7_0_EBX_BMI2 (1U << 8) 828 /* Enhanced REP MOVSB/STOSB */ 829 #define CPUID_7_0_EBX_ERMS (1U << 9) 830 /* Invalidate Process-Context Identifier */ 831 #define CPUID_7_0_EBX_INVPCID (1U << 10) 832 /* Restricted Transactional Memory */ 833 #define CPUID_7_0_EBX_RTM (1U << 11) 834 /* Memory Protection Extension */ 835 #define CPUID_7_0_EBX_MPX (1U << 14) 836 /* AVX-512 Foundation */ 837 #define CPUID_7_0_EBX_AVX512F (1U << 16) 838 /* AVX-512 Doubleword & Quadword Instruction */ 839 #define CPUID_7_0_EBX_AVX512DQ (1U << 17) 840 /* Read Random SEED */ 841 #define CPUID_7_0_EBX_RDSEED (1U << 18) 842 /* ADCX and ADOX instructions */ 843 #define CPUID_7_0_EBX_ADX (1U << 19) 844 /* Supervisor Mode Access Prevention */ 845 #define CPUID_7_0_EBX_SMAP (1U << 20) 846 /* AVX-512 Integer Fused Multiply Add */ 847 #define CPUID_7_0_EBX_AVX512IFMA (1U << 21) 848 /* Flush a Cache Line Optimized */ 849 #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) 850 /* Cache Line Write Back */ 851 #define CPUID_7_0_EBX_CLWB (1U << 24) 852 /* Intel Processor Trace */ 853 #define CPUID_7_0_EBX_INTEL_PT (1U << 25) 854 /* AVX-512 Prefetch */ 855 #define CPUID_7_0_EBX_AVX512PF (1U << 26) 856 /* AVX-512 Exponential and Reciprocal */ 857 #define CPUID_7_0_EBX_AVX512ER (1U << 27) 858 /* AVX-512 Conflict Detection */ 859 #define CPUID_7_0_EBX_AVX512CD (1U << 28) 860 /* SHA1/SHA256 Instruction Extensions */ 861 #define CPUID_7_0_EBX_SHA_NI (1U << 29) 862 /* AVX-512 Byte and Word Instructions */ 863 #define CPUID_7_0_EBX_AVX512BW (1U << 30) 864 /* AVX-512 Vector Length Extensions */ 865 #define CPUID_7_0_EBX_AVX512VL (1U << 31) 866 867 /* AVX-512 Vector Byte Manipulation Instruction */ 868 #define CPUID_7_0_ECX_AVX512_VBMI (1U << 1) 869 /* User-Mode Instruction Prevention */ 870 #define CPUID_7_0_ECX_UMIP (1U << 2) 871 /* Protection Keys for User-mode Pages */ 872 #define CPUID_7_0_ECX_PKU (1U << 3) 873 /* OS Enable Protection Keys */ 874 #define CPUID_7_0_ECX_OSPKE (1U << 4) 875 /* UMONITOR/UMWAIT/TPAUSE Instructions */ 876 #define CPUID_7_0_ECX_WAITPKG (1U << 5) 877 /* Additional AVX-512 Vector Byte Manipulation Instruction */ 878 #define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6) 879 /* Galois Field New Instructions */ 880 #define CPUID_7_0_ECX_GFNI (1U << 8) 881 /* Vector AES Instructions */ 882 #define CPUID_7_0_ECX_VAES (1U << 9) 883 /* Carry-Less Multiplication Quadword */ 884 #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10) 885 /* Vector Neural Network Instructions */ 886 #define CPUID_7_0_ECX_AVX512VNNI (1U << 11) 887 /* Support for VPOPCNT[B,W] and VPSHUFBITQMB */ 888 #define CPUID_7_0_ECX_AVX512BITALG (1U << 12) 889 /* POPCNT for vectors of DW/QW */ 890 #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) 891 /* 5-level Page Tables */ 892 #define CPUID_7_0_ECX_LA57 (1U << 16) 893 /* Read Processor ID */ 894 #define CPUID_7_0_ECX_RDPID (1U << 22) 895 /* Bus Lock Debug Exception */ 896 #define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24) 897 /* Cache Line Demote Instruction */ 898 #define CPUID_7_0_ECX_CLDEMOTE (1U << 25) 899 /* Move Doubleword as Direct Store Instruction */ 900 #define CPUID_7_0_ECX_MOVDIRI (1U << 27) 901 /* Move 64 Bytes as Direct Store Instruction */ 902 #define CPUID_7_0_ECX_MOVDIR64B (1U << 28) 903 /* Support SGX Launch Control */ 904 #define CPUID_7_0_ECX_SGX_LC (1U << 30) 905 /* Protection Keys for Supervisor-mode Pages */ 906 #define CPUID_7_0_ECX_PKS (1U << 31) 907 908 /* AVX512 Neural Network Instructions */ 909 #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) 910 /* AVX512 Multiply Accumulation Single Precision */ 911 #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) 912 /* Fast Short Rep Mov */ 913 #define CPUID_7_0_EDX_FSRM (1U << 4) 914 /* AVX512 Vector Pair Intersection to a Pair of Mask Registers */ 915 #define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8) 916 /* SERIALIZE instruction */ 917 #define CPUID_7_0_EDX_SERIALIZE (1U << 14) 918 /* TSX Suspend Load Address Tracking instruction */ 919 #define CPUID_7_0_EDX_TSX_LDTRK (1U << 16) 920 /* Architectural LBRs */ 921 #define CPUID_7_0_EDX_ARCH_LBR (1U << 19) 922 /* AMX_BF16 instruction */ 923 #define CPUID_7_0_EDX_AMX_BF16 (1U << 22) 924 /* AVX512_FP16 instruction */ 925 #define CPUID_7_0_EDX_AVX512_FP16 (1U << 23) 926 /* AMX tile (two-dimensional register) */ 927 #define CPUID_7_0_EDX_AMX_TILE (1U << 24) 928 /* AMX_INT8 instruction */ 929 #define CPUID_7_0_EDX_AMX_INT8 (1U << 25) 930 /* Speculation Control */ 931 #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) 932 /* Single Thread Indirect Branch Predictors */ 933 #define CPUID_7_0_EDX_STIBP (1U << 27) 934 /* Flush L1D cache */ 935 #define CPUID_7_0_EDX_FLUSH_L1D (1U << 28) 936 /* Arch Capabilities */ 937 #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) 938 /* Core Capability */ 939 #define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30) 940 /* Speculative Store Bypass Disable */ 941 #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) 942 943 /* AVX VNNI Instruction */ 944 #define CPUID_7_1_EAX_AVX_VNNI (1U << 4) 945 /* AVX512 BFloat16 Instruction */ 946 #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) 947 /* CMPCCXADD Instructions */ 948 #define CPUID_7_1_EAX_CMPCCXADD (1U << 7) 949 /* Fast Zero REP MOVS */ 950 #define CPUID_7_1_EAX_FZRM (1U << 10) 951 /* Fast Short REP STOS */ 952 #define CPUID_7_1_EAX_FSRS (1U << 11) 953 /* Fast Short REP CMPS/SCAS */ 954 #define CPUID_7_1_EAX_FSRC (1U << 12) 955 /* Support Tile Computational Operations on FP16 Numbers */ 956 #define CPUID_7_1_EAX_AMX_FP16 (1U << 21) 957 /* Support for VPMADD52[H,L]UQ */ 958 #define CPUID_7_1_EAX_AVX_IFMA (1U << 23) 959 /* Linear Address Masking */ 960 #define CPUID_7_1_EAX_LAM (1U << 26) 961 962 /* Support for VPDPB[SU,UU,SS]D[,S] */ 963 #define CPUID_7_1_EDX_AVX_VNNI_INT8 (1U << 4) 964 /* AVX NE CONVERT Instructions */ 965 #define CPUID_7_1_EDX_AVX_NE_CONVERT (1U << 5) 966 /* AMX COMPLEX Instructions */ 967 #define CPUID_7_1_EDX_AMX_COMPLEX (1U << 8) 968 /* PREFETCHIT0/1 Instructions */ 969 #define CPUID_7_1_EDX_PREFETCHITI (1U << 14) 970 /* Flexible return and event delivery (FRED) */ 971 #define CPUID_7_1_EAX_FRED (1U << 17) 972 /* Load into IA32_KERNEL_GS_BASE (LKGS) */ 973 #define CPUID_7_1_EAX_LKGS (1U << 18) 974 /* Non-Serializing Write to Model Specific Register (WRMSRNS) */ 975 #define CPUID_7_1_EAX_WRMSRNS (1U << 19) 976 977 /* Do not exhibit MXCSR Configuration Dependent Timing (MCDT) behavior */ 978 #define CPUID_7_2_EDX_MCDT_NO (1U << 5) 979 980 /* XFD Extend Feature Disabled */ 981 #define CPUID_D_1_EAX_XFD (1U << 4) 982 983 /* Packets which contain IP payload have LIP values */ 984 #define CPUID_14_0_ECX_LIP (1U << 31) 985 986 /* RAS Features */ 987 #define CPUID_8000_0007_EBX_OVERFLOW_RECOV (1U << 0) 988 #define CPUID_8000_0007_EBX_SUCCOR (1U << 1) 989 990 /* CLZERO instruction */ 991 #define CPUID_8000_0008_EBX_CLZERO (1U << 0) 992 /* Always save/restore FP error pointers */ 993 #define CPUID_8000_0008_EBX_XSAVEERPTR (1U << 2) 994 /* Write back and do not invalidate cache */ 995 #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) 996 /* Indirect Branch Prediction Barrier */ 997 #define CPUID_8000_0008_EBX_IBPB (1U << 12) 998 /* Indirect Branch Restricted Speculation */ 999 #define CPUID_8000_0008_EBX_IBRS (1U << 14) 1000 /* Single Thread Indirect Branch Predictors */ 1001 #define CPUID_8000_0008_EBX_STIBP (1U << 15) 1002 /* STIBP mode has enhanced performance and may be left always on */ 1003 #define CPUID_8000_0008_EBX_STIBP_ALWAYS_ON (1U << 17) 1004 /* Speculative Store Bypass Disable */ 1005 #define CPUID_8000_0008_EBX_AMD_SSBD (1U << 24) 1006 /* Predictive Store Forwarding Disable */ 1007 #define CPUID_8000_0008_EBX_AMD_PSFD (1U << 28) 1008 1009 /* Processor ignores nested data breakpoints */ 1010 #define CPUID_8000_0021_EAX_No_NESTED_DATA_BP (1U << 0) 1011 /* LFENCE is always serializing */ 1012 #define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2) 1013 /* Null Selector Clears Base */ 1014 #define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6) 1015 /* Automatic IBRS */ 1016 #define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8) 1017 1018 #define CPUID_XSAVE_XSAVEOPT (1U << 0) 1019 #define CPUID_XSAVE_XSAVEC (1U << 1) 1020 #define CPUID_XSAVE_XGETBV1 (1U << 2) 1021 #define CPUID_XSAVE_XSAVES (1U << 3) 1022 1023 #define CPUID_6_EAX_ARAT (1U << 2) 1024 1025 /* CPUID[0x80000007].EDX flags: */ 1026 #define CPUID_APM_INVTSC (1U << 8) 1027 1028 #define CPUID_VENDOR_SZ 12 1029 1030 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ 1031 #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ 1032 #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ 1033 #define CPUID_VENDOR_INTEL "GenuineIntel" 1034 1035 #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ 1036 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ 1037 #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ 1038 #define CPUID_VENDOR_AMD "AuthenticAMD" 1039 1040 #define CPUID_VENDOR_VIA "CentaurHauls" 1041 1042 #define CPUID_VENDOR_HYGON "HygonGenuine" 1043 1044 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ 1045 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ 1046 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) 1047 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ 1048 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ 1049 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) 1050 1051 #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */ 1052 #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */ 1053 1054 /* CPUID[0xB].ECX level types */ 1055 #define CPUID_B_ECX_TOPO_LEVEL_INVALID 0 1056 #define CPUID_B_ECX_TOPO_LEVEL_SMT 1 1057 #define CPUID_B_ECX_TOPO_LEVEL_CORE 2 1058 1059 /* COUID[0x1F].ECX level types */ 1060 #define CPUID_1F_ECX_TOPO_LEVEL_INVALID CPUID_B_ECX_TOPO_LEVEL_INVALID 1061 #define CPUID_1F_ECX_TOPO_LEVEL_SMT CPUID_B_ECX_TOPO_LEVEL_SMT 1062 #define CPUID_1F_ECX_TOPO_LEVEL_CORE CPUID_B_ECX_TOPO_LEVEL_CORE 1063 #define CPUID_1F_ECX_TOPO_LEVEL_MODULE 3 1064 #define CPUID_1F_ECX_TOPO_LEVEL_DIE 5 1065 1066 /* MSR Feature Bits */ 1067 #define MSR_ARCH_CAP_RDCL_NO (1U << 0) 1068 #define MSR_ARCH_CAP_IBRS_ALL (1U << 1) 1069 #define MSR_ARCH_CAP_RSBA (1U << 2) 1070 #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3) 1071 #define MSR_ARCH_CAP_SSB_NO (1U << 4) 1072 #define MSR_ARCH_CAP_MDS_NO (1U << 5) 1073 #define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6) 1074 #define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7) 1075 #define MSR_ARCH_CAP_TAA_NO (1U << 8) 1076 #define MSR_ARCH_CAP_SBDR_SSDP_NO (1U << 13) 1077 #define MSR_ARCH_CAP_FBSDP_NO (1U << 14) 1078 #define MSR_ARCH_CAP_PSDP_NO (1U << 15) 1079 #define MSR_ARCH_CAP_FB_CLEAR (1U << 17) 1080 #define MSR_ARCH_CAP_PBRSB_NO (1U << 24) 1081 1082 #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5) 1083 1084 /* VMX MSR features */ 1085 #define MSR_VMX_BASIC_VMCS_REVISION_MASK 0x7FFFFFFFull 1086 #define MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK (0x00001FFFull << 32) 1087 #define MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK (0x003C0000ull << 32) 1088 #define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49) 1089 #define MSR_VMX_BASIC_INS_OUTS (1ULL << 54) 1090 #define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55) 1091 #define MSR_VMX_BASIC_ANY_ERRCODE (1ULL << 56) 1092 #define MSR_VMX_BASIC_NESTED_EXCEPTION (1ULL << 58) 1093 1094 #define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full 1095 #define MSR_VMX_MISC_STORE_LMA (1ULL << 5) 1096 #define MSR_VMX_MISC_ACTIVITY_HLT (1ULL << 6) 1097 #define MSR_VMX_MISC_ACTIVITY_SHUTDOWN (1ULL << 7) 1098 #define MSR_VMX_MISC_ACTIVITY_WAIT_SIPI (1ULL << 8) 1099 #define MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK 0x0E000000ull 1100 #define MSR_VMX_MISC_VMWRITE_VMEXIT (1ULL << 29) 1101 #define MSR_VMX_MISC_ZERO_LEN_INJECT (1ULL << 30) 1102 1103 #define MSR_VMX_EPT_EXECONLY (1ULL << 0) 1104 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_4 (1ULL << 6) 1105 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_5 (1ULL << 7) 1106 #define MSR_VMX_EPT_UC (1ULL << 8) 1107 #define MSR_VMX_EPT_WB (1ULL << 14) 1108 #define MSR_VMX_EPT_2MB (1ULL << 16) 1109 #define MSR_VMX_EPT_1GB (1ULL << 17) 1110 #define MSR_VMX_EPT_INVEPT (1ULL << 20) 1111 #define MSR_VMX_EPT_AD_BITS (1ULL << 21) 1112 #define MSR_VMX_EPT_ADVANCED_VMEXIT_INFO (1ULL << 22) 1113 #define MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT (1ULL << 25) 1114 #define MSR_VMX_EPT_INVEPT_ALL_CONTEXT (1ULL << 26) 1115 #define MSR_VMX_EPT_INVVPID (1ULL << 32) 1116 #define MSR_VMX_EPT_INVVPID_SINGLE_ADDR (1ULL << 40) 1117 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT (1ULL << 41) 1118 #define MSR_VMX_EPT_INVVPID_ALL_CONTEXT (1ULL << 42) 1119 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS (1ULL << 43) 1120 1121 #define MSR_VMX_VMFUNC_EPT_SWITCHING (1ULL << 0) 1122 1123 1124 /* VMX controls */ 1125 #define VMX_CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 1126 #define VMX_CPU_BASED_USE_TSC_OFFSETING 0x00000008 1127 #define VMX_CPU_BASED_HLT_EXITING 0x00000080 1128 #define VMX_CPU_BASED_INVLPG_EXITING 0x00000200 1129 #define VMX_CPU_BASED_MWAIT_EXITING 0x00000400 1130 #define VMX_CPU_BASED_RDPMC_EXITING 0x00000800 1131 #define VMX_CPU_BASED_RDTSC_EXITING 0x00001000 1132 #define VMX_CPU_BASED_CR3_LOAD_EXITING 0x00008000 1133 #define VMX_CPU_BASED_CR3_STORE_EXITING 0x00010000 1134 #define VMX_CPU_BASED_CR8_LOAD_EXITING 0x00080000 1135 #define VMX_CPU_BASED_CR8_STORE_EXITING 0x00100000 1136 #define VMX_CPU_BASED_TPR_SHADOW 0x00200000 1137 #define VMX_CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 1138 #define VMX_CPU_BASED_MOV_DR_EXITING 0x00800000 1139 #define VMX_CPU_BASED_UNCOND_IO_EXITING 0x01000000 1140 #define VMX_CPU_BASED_USE_IO_BITMAPS 0x02000000 1141 #define VMX_CPU_BASED_MONITOR_TRAP_FLAG 0x08000000 1142 #define VMX_CPU_BASED_USE_MSR_BITMAPS 0x10000000 1143 #define VMX_CPU_BASED_MONITOR_EXITING 0x20000000 1144 #define VMX_CPU_BASED_PAUSE_EXITING 0x40000000 1145 #define VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 1146 1147 #define VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 1148 #define VMX_SECONDARY_EXEC_ENABLE_EPT 0x00000002 1149 #define VMX_SECONDARY_EXEC_DESC 0x00000004 1150 #define VMX_SECONDARY_EXEC_RDTSCP 0x00000008 1151 #define VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 1152 #define VMX_SECONDARY_EXEC_ENABLE_VPID 0x00000020 1153 #define VMX_SECONDARY_EXEC_WBINVD_EXITING 0x00000040 1154 #define VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 1155 #define VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 1156 #define VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 1157 #define VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 1158 #define VMX_SECONDARY_EXEC_RDRAND_EXITING 0x00000800 1159 #define VMX_SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 1160 #define VMX_SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000 1161 #define VMX_SECONDARY_EXEC_SHADOW_VMCS 0x00004000 1162 #define VMX_SECONDARY_EXEC_ENCLS_EXITING 0x00008000 1163 #define VMX_SECONDARY_EXEC_RDSEED_EXITING 0x00010000 1164 #define VMX_SECONDARY_EXEC_ENABLE_PML 0x00020000 1165 #define VMX_SECONDARY_EXEC_XSAVES 0x00100000 1166 #define VMX_SECONDARY_EXEC_TSC_SCALING 0x02000000 1167 #define VMX_SECONDARY_EXEC_ENABLE_USER_WAIT_PAUSE 0x04000000 1168 1169 #define VMX_PIN_BASED_EXT_INTR_MASK 0x00000001 1170 #define VMX_PIN_BASED_NMI_EXITING 0x00000008 1171 #define VMX_PIN_BASED_VIRTUAL_NMIS 0x00000020 1172 #define VMX_PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040 1173 #define VMX_PIN_BASED_POSTED_INTR 0x00000080 1174 1175 #define VMX_VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 1176 #define VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 1177 #define VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 1178 #define VMX_VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 1179 #define VMX_VM_EXIT_SAVE_IA32_PAT 0x00040000 1180 #define VMX_VM_EXIT_LOAD_IA32_PAT 0x00080000 1181 #define VMX_VM_EXIT_SAVE_IA32_EFER 0x00100000 1182 #define VMX_VM_EXIT_LOAD_IA32_EFER 0x00200000 1183 #define VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 1184 #define VMX_VM_EXIT_CLEAR_BNDCFGS 0x00800000 1185 #define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000 1186 #define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 1187 #define VMX_VM_EXIT_LOAD_IA32_PKRS 0x20000000 1188 1189 #define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 1190 #define VMX_VM_ENTRY_IA32E_MODE 0x00000200 1191 #define VMX_VM_ENTRY_SMM 0x00000400 1192 #define VMX_VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 1193 #define VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 1194 #define VMX_VM_ENTRY_LOAD_IA32_PAT 0x00004000 1195 #define VMX_VM_ENTRY_LOAD_IA32_EFER 0x00008000 1196 #define VMX_VM_ENTRY_LOAD_BNDCFGS 0x00010000 1197 #define VMX_VM_ENTRY_PT_CONCEAL_PIP 0x00020000 1198 #define VMX_VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000 1199 #define VMX_VM_ENTRY_LOAD_IA32_PKRS 0x00400000 1200 1201 /* Supported Hyper-V Enlightenments */ 1202 #define HYPERV_FEAT_RELAXED 0 1203 #define HYPERV_FEAT_VAPIC 1 1204 #define HYPERV_FEAT_TIME 2 1205 #define HYPERV_FEAT_CRASH 3 1206 #define HYPERV_FEAT_RESET 4 1207 #define HYPERV_FEAT_VPINDEX 5 1208 #define HYPERV_FEAT_RUNTIME 6 1209 #define HYPERV_FEAT_SYNIC 7 1210 #define HYPERV_FEAT_STIMER 8 1211 #define HYPERV_FEAT_FREQUENCIES 9 1212 #define HYPERV_FEAT_REENLIGHTENMENT 10 1213 #define HYPERV_FEAT_TLBFLUSH 11 1214 #define HYPERV_FEAT_EVMCS 12 1215 #define HYPERV_FEAT_IPI 13 1216 #define HYPERV_FEAT_STIMER_DIRECT 14 1217 #define HYPERV_FEAT_AVIC 15 1218 #define HYPERV_FEAT_SYNDBG 16 1219 #define HYPERV_FEAT_MSR_BITMAP 17 1220 #define HYPERV_FEAT_XMM_INPUT 18 1221 #define HYPERV_FEAT_TLBFLUSH_EXT 19 1222 #define HYPERV_FEAT_TLBFLUSH_DIRECT 20 1223 1224 #ifndef HYPERV_SPINLOCK_NEVER_NOTIFY 1225 #define HYPERV_SPINLOCK_NEVER_NOTIFY 0xFFFFFFFF 1226 #endif 1227 1228 #define EXCP00_DIVZ 0 1229 #define EXCP01_DB 1 1230 #define EXCP02_NMI 2 1231 #define EXCP03_INT3 3 1232 #define EXCP04_INTO 4 1233 #define EXCP05_BOUND 5 1234 #define EXCP06_ILLOP 6 1235 #define EXCP07_PREX 7 1236 #define EXCP08_DBLE 8 1237 #define EXCP09_XERR 9 1238 #define EXCP0A_TSS 10 1239 #define EXCP0B_NOSEG 11 1240 #define EXCP0C_STACK 12 1241 #define EXCP0D_GPF 13 1242 #define EXCP0E_PAGE 14 1243 #define EXCP10_COPR 16 1244 #define EXCP11_ALGN 17 1245 #define EXCP12_MCHK 18 1246 1247 #define EXCP_VMEXIT 0x100 /* only for system emulation */ 1248 #define EXCP_SYSCALL 0x101 /* only for user emulation */ 1249 #define EXCP_VSYSCALL 0x102 /* only for user emulation */ 1250 1251 /* i386-specific interrupt pending bits. */ 1252 #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1 1253 #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 1254 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 1255 #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 1256 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 1257 #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1 1258 #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2 1259 1260 /* Use a clearer name for this. */ 1261 #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET 1262 1263 #define CC_OP_HAS_EFLAGS(op) ((op) >= CC_OP_EFLAGS && (op) <= CC_OP_ADCOX) 1264 1265 /* Instead of computing the condition codes after each x86 instruction, 1266 * QEMU just stores one operand (called CC_SRC), the result 1267 * (called CC_DST) and the type of operation (called CC_OP). When the 1268 * condition codes are needed, the condition codes can be calculated 1269 * using this information. Condition codes are not generated if they 1270 * are only needed for conditional branches. 1271 */ 1272 typedef enum { 1273 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ 1274 CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ 1275 CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */ 1276 CC_OP_ADOX, /* CC_SRC2 = O, CC_SRC = rest. */ 1277 CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ 1278 1279 CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ 1280 CC_OP_MULW, 1281 CC_OP_MULL, 1282 CC_OP_MULQ, 1283 1284 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1285 CC_OP_ADDW, 1286 CC_OP_ADDL, 1287 CC_OP_ADDQ, 1288 1289 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1290 CC_OP_ADCW, 1291 CC_OP_ADCL, 1292 CC_OP_ADCQ, 1293 1294 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1295 CC_OP_SUBW, 1296 CC_OP_SUBL, 1297 CC_OP_SUBQ, 1298 1299 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1300 CC_OP_SBBW, 1301 CC_OP_SBBL, 1302 CC_OP_SBBQ, 1303 1304 CC_OP_LOGICB, /* modify all flags, CC_DST = res */ 1305 CC_OP_LOGICW, 1306 CC_OP_LOGICL, 1307 CC_OP_LOGICQ, 1308 1309 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ 1310 CC_OP_INCW, 1311 CC_OP_INCL, 1312 CC_OP_INCQ, 1313 1314 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ 1315 CC_OP_DECW, 1316 CC_OP_DECL, 1317 CC_OP_DECQ, 1318 1319 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ 1320 CC_OP_SHLW, 1321 CC_OP_SHLL, 1322 CC_OP_SHLQ, 1323 1324 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ 1325 CC_OP_SARW, 1326 CC_OP_SARL, 1327 CC_OP_SARQ, 1328 1329 CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */ 1330 CC_OP_BMILGW, 1331 CC_OP_BMILGL, 1332 CC_OP_BMILGQ, 1333 1334 CC_OP_CLR, /* Z set, all other flags clear. */ 1335 CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */ 1336 1337 CC_OP_NB, 1338 } CCOp; 1339 QEMU_BUILD_BUG_ON(CC_OP_NB >= 128); 1340 1341 typedef struct SegmentCache { 1342 uint32_t selector; 1343 target_ulong base; 1344 uint32_t limit; 1345 uint32_t flags; 1346 } SegmentCache; 1347 1348 typedef union MMXReg { 1349 uint8_t _b_MMXReg[64 / 8]; 1350 uint16_t _w_MMXReg[64 / 16]; 1351 uint32_t _l_MMXReg[64 / 32]; 1352 uint64_t _q_MMXReg[64 / 64]; 1353 float32 _s_MMXReg[64 / 32]; 1354 float64 _d_MMXReg[64 / 64]; 1355 } MMXReg; 1356 1357 typedef union XMMReg { 1358 uint64_t _q_XMMReg[128 / 64]; 1359 } XMMReg; 1360 1361 typedef union YMMReg { 1362 uint64_t _q_YMMReg[256 / 64]; 1363 XMMReg _x_YMMReg[256 / 128]; 1364 } YMMReg; 1365 1366 typedef union ZMMReg { 1367 uint8_t _b_ZMMReg[512 / 8]; 1368 uint16_t _w_ZMMReg[512 / 16]; 1369 uint32_t _l_ZMMReg[512 / 32]; 1370 uint64_t _q_ZMMReg[512 / 64]; 1371 float16 _h_ZMMReg[512 / 16]; 1372 float32 _s_ZMMReg[512 / 32]; 1373 float64 _d_ZMMReg[512 / 64]; 1374 XMMReg _x_ZMMReg[512 / 128]; 1375 YMMReg _y_ZMMReg[512 / 256]; 1376 } ZMMReg; 1377 1378 typedef struct BNDReg { 1379 uint64_t lb; 1380 uint64_t ub; 1381 } BNDReg; 1382 1383 typedef struct BNDCSReg { 1384 uint64_t cfgu; 1385 uint64_t sts; 1386 } BNDCSReg; 1387 1388 #define BNDCFG_ENABLE 1ULL 1389 #define BNDCFG_BNDPRESERVE 2ULL 1390 #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK 1391 1392 #if HOST_BIG_ENDIAN 1393 #define ZMM_B(n) _b_ZMMReg[63 - (n)] 1394 #define ZMM_W(n) _w_ZMMReg[31 - (n)] 1395 #define ZMM_L(n) _l_ZMMReg[15 - (n)] 1396 #define ZMM_H(n) _h_ZMMReg[31 - (n)] 1397 #define ZMM_S(n) _s_ZMMReg[15 - (n)] 1398 #define ZMM_Q(n) _q_ZMMReg[7 - (n)] 1399 #define ZMM_D(n) _d_ZMMReg[7 - (n)] 1400 #define ZMM_X(n) _x_ZMMReg[3 - (n)] 1401 #define ZMM_Y(n) _y_ZMMReg[1 - (n)] 1402 1403 #define XMM_Q(n) _q_XMMReg[1 - (n)] 1404 1405 #define YMM_Q(n) _q_YMMReg[3 - (n)] 1406 #define YMM_X(n) _x_YMMReg[1 - (n)] 1407 1408 #define MMX_B(n) _b_MMXReg[7 - (n)] 1409 #define MMX_W(n) _w_MMXReg[3 - (n)] 1410 #define MMX_L(n) _l_MMXReg[1 - (n)] 1411 #define MMX_S(n) _s_MMXReg[1 - (n)] 1412 #else 1413 #define ZMM_B(n) _b_ZMMReg[n] 1414 #define ZMM_W(n) _w_ZMMReg[n] 1415 #define ZMM_L(n) _l_ZMMReg[n] 1416 #define ZMM_H(n) _h_ZMMReg[n] 1417 #define ZMM_S(n) _s_ZMMReg[n] 1418 #define ZMM_Q(n) _q_ZMMReg[n] 1419 #define ZMM_D(n) _d_ZMMReg[n] 1420 #define ZMM_X(n) _x_ZMMReg[n] 1421 #define ZMM_Y(n) _y_ZMMReg[n] 1422 1423 #define XMM_Q(n) _q_XMMReg[n] 1424 1425 #define YMM_Q(n) _q_YMMReg[n] 1426 #define YMM_X(n) _x_YMMReg[n] 1427 1428 #define MMX_B(n) _b_MMXReg[n] 1429 #define MMX_W(n) _w_MMXReg[n] 1430 #define MMX_L(n) _l_MMXReg[n] 1431 #define MMX_S(n) _s_MMXReg[n] 1432 #endif 1433 #define MMX_Q(n) _q_MMXReg[n] 1434 1435 typedef union { 1436 floatx80 d __attribute__((aligned(16))); 1437 MMXReg mmx; 1438 } FPReg; 1439 1440 typedef struct { 1441 uint64_t base; 1442 uint64_t mask; 1443 } MTRRVar; 1444 1445 #define CPU_NB_REGS64 16 1446 #define CPU_NB_REGS32 8 1447 1448 #ifdef TARGET_X86_64 1449 #define CPU_NB_REGS CPU_NB_REGS64 1450 #else 1451 #define CPU_NB_REGS CPU_NB_REGS32 1452 #endif 1453 1454 #define MAX_FIXED_COUNTERS 3 1455 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) 1456 1457 #define TARGET_INSN_START_EXTRA_WORDS 1 1458 1459 #define NB_OPMASK_REGS 8 1460 1461 /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish 1462 * that APIC ID hasn't been set yet 1463 */ 1464 #define UNASSIGNED_APIC_ID 0xFFFFFFFF 1465 1466 typedef struct X86LegacyXSaveArea { 1467 uint16_t fcw; 1468 uint16_t fsw; 1469 uint8_t ftw; 1470 uint8_t reserved; 1471 uint16_t fpop; 1472 union { 1473 struct { 1474 uint64_t fpip; 1475 uint64_t fpdp; 1476 }; 1477 struct { 1478 uint32_t fip; 1479 uint32_t fcs; 1480 uint32_t foo; 1481 uint32_t fos; 1482 }; 1483 }; 1484 uint32_t mxcsr; 1485 uint32_t mxcsr_mask; 1486 FPReg fpregs[8]; 1487 uint8_t xmm_regs[16][16]; 1488 uint32_t hw_reserved[12]; 1489 uint32_t sw_reserved[12]; 1490 } X86LegacyXSaveArea; 1491 1492 QEMU_BUILD_BUG_ON(sizeof(X86LegacyXSaveArea) != 512); 1493 1494 typedef struct X86XSaveHeader { 1495 uint64_t xstate_bv; 1496 uint64_t xcomp_bv; 1497 uint64_t reserve0; 1498 uint8_t reserved[40]; 1499 } X86XSaveHeader; 1500 1501 /* Ext. save area 2: AVX State */ 1502 typedef struct XSaveAVX { 1503 uint8_t ymmh[16][16]; 1504 } XSaveAVX; 1505 1506 /* Ext. save area 3: BNDREG */ 1507 typedef struct XSaveBNDREG { 1508 BNDReg bnd_regs[4]; 1509 } XSaveBNDREG; 1510 1511 /* Ext. save area 4: BNDCSR */ 1512 typedef union XSaveBNDCSR { 1513 BNDCSReg bndcsr; 1514 uint8_t data[64]; 1515 } XSaveBNDCSR; 1516 1517 /* Ext. save area 5: Opmask */ 1518 typedef struct XSaveOpmask { 1519 uint64_t opmask_regs[NB_OPMASK_REGS]; 1520 } XSaveOpmask; 1521 1522 /* Ext. save area 6: ZMM_Hi256 */ 1523 typedef struct XSaveZMM_Hi256 { 1524 uint8_t zmm_hi256[16][32]; 1525 } XSaveZMM_Hi256; 1526 1527 /* Ext. save area 7: Hi16_ZMM */ 1528 typedef struct XSaveHi16_ZMM { 1529 uint8_t hi16_zmm[16][64]; 1530 } XSaveHi16_ZMM; 1531 1532 /* Ext. save area 9: PKRU state */ 1533 typedef struct XSavePKRU { 1534 uint32_t pkru; 1535 uint32_t padding; 1536 } XSavePKRU; 1537 1538 /* Ext. save area 17: AMX XTILECFG state */ 1539 typedef struct XSaveXTILECFG { 1540 uint8_t xtilecfg[64]; 1541 } XSaveXTILECFG; 1542 1543 /* Ext. save area 18: AMX XTILEDATA state */ 1544 typedef struct XSaveXTILEDATA { 1545 uint8_t xtiledata[8][1024]; 1546 } XSaveXTILEDATA; 1547 1548 typedef struct { 1549 uint64_t from; 1550 uint64_t to; 1551 uint64_t info; 1552 } LBREntry; 1553 1554 #define ARCH_LBR_NR_ENTRIES 32 1555 1556 /* Ext. save area 19: Supervisor mode Arch LBR state */ 1557 typedef struct XSavesArchLBR { 1558 uint64_t lbr_ctl; 1559 uint64_t lbr_depth; 1560 uint64_t ler_from; 1561 uint64_t ler_to; 1562 uint64_t ler_info; 1563 LBREntry lbr_records[ARCH_LBR_NR_ENTRIES]; 1564 } XSavesArchLBR; 1565 1566 QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100); 1567 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40); 1568 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40); 1569 QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40); 1570 QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200); 1571 QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400); 1572 QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8); 1573 QEMU_BUILD_BUG_ON(sizeof(XSaveXTILECFG) != 0x40); 1574 QEMU_BUILD_BUG_ON(sizeof(XSaveXTILEDATA) != 0x2000); 1575 QEMU_BUILD_BUG_ON(sizeof(XSavesArchLBR) != 0x328); 1576 1577 typedef struct ExtSaveArea { 1578 uint32_t feature, bits; 1579 uint32_t offset, size; 1580 uint32_t ecx; 1581 } ExtSaveArea; 1582 1583 #define XSAVE_STATE_AREA_COUNT (XSTATE_XTILE_DATA_BIT + 1) 1584 1585 extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT]; 1586 1587 typedef enum TPRAccess { 1588 TPR_ACCESS_READ, 1589 TPR_ACCESS_WRITE, 1590 } TPRAccess; 1591 1592 /* Cache information data structures: */ 1593 1594 enum CacheType { 1595 DATA_CACHE, 1596 INSTRUCTION_CACHE, 1597 UNIFIED_CACHE 1598 }; 1599 1600 typedef struct CPUCacheInfo { 1601 enum CacheType type; 1602 uint8_t level; 1603 /* Size in bytes */ 1604 uint32_t size; 1605 /* Line size, in bytes */ 1606 uint16_t line_size; 1607 /* 1608 * Associativity. 1609 * Note: representation of fully-associative caches is not implemented 1610 */ 1611 uint8_t associativity; 1612 /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */ 1613 uint8_t partitions; 1614 /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */ 1615 uint32_t sets; 1616 /* 1617 * Lines per tag. 1618 * AMD-specific: CPUID[0x80000005], CPUID[0x80000006]. 1619 * (Is this synonym to @partitions?) 1620 */ 1621 uint8_t lines_per_tag; 1622 1623 /* Self-initializing cache */ 1624 bool self_init; 1625 /* 1626 * WBINVD/INVD is not guaranteed to act upon lower level caches of 1627 * non-originating threads sharing this cache. 1628 * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0] 1629 */ 1630 bool no_invd_sharing; 1631 /* 1632 * Cache is inclusive of lower cache levels. 1633 * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1]. 1634 */ 1635 bool inclusive; 1636 /* 1637 * A complex function is used to index the cache, potentially using all 1638 * address bits. CPUID[4].EDX[bit 2]. 1639 */ 1640 bool complex_indexing; 1641 1642 /* 1643 * Cache Topology. The level that cache is shared in. 1644 * Used to encode CPUID[4].EAX[bits 25:14] or 1645 * CPUID[0x8000001D].EAX[bits 25:14]. 1646 */ 1647 enum CPUTopoLevel share_level; 1648 } CPUCacheInfo; 1649 1650 1651 typedef struct CPUCaches { 1652 CPUCacheInfo *l1d_cache; 1653 CPUCacheInfo *l1i_cache; 1654 CPUCacheInfo *l2_cache; 1655 CPUCacheInfo *l3_cache; 1656 } CPUCaches; 1657 1658 typedef struct HVFX86LazyFlags { 1659 target_ulong result; 1660 target_ulong auxbits; 1661 } HVFX86LazyFlags; 1662 1663 typedef struct CPUArchState { 1664 /* standard registers */ 1665 target_ulong regs[CPU_NB_REGS]; 1666 target_ulong eip; 1667 target_ulong eflags; /* eflags register. During CPU emulation, CC 1668 flags and DF are set to zero because they are 1669 stored elsewhere */ 1670 1671 /* emulator internal eflags handling */ 1672 target_ulong cc_dst; 1673 target_ulong cc_src; 1674 target_ulong cc_src2; 1675 uint32_t cc_op; 1676 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ 1677 uint32_t hflags; /* TB flags, see HF_xxx constants. These flags 1678 are known at translation time. */ 1679 uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ 1680 1681 /* segments */ 1682 SegmentCache segs[6]; /* selector values */ 1683 SegmentCache ldt; 1684 SegmentCache tr; 1685 SegmentCache gdt; /* only base and limit are used */ 1686 SegmentCache idt; /* only base and limit are used */ 1687 1688 target_ulong cr[5]; /* NOTE: cr1 is unused */ 1689 1690 bool pdptrs_valid; 1691 uint64_t pdptrs[4]; 1692 int32_t a20_mask; 1693 1694 BNDReg bnd_regs[4]; 1695 BNDCSReg bndcs_regs; 1696 uint64_t msr_bndcfgs; 1697 uint64_t efer; 1698 1699 /* Beginning of state preserved by INIT (dummy marker). */ 1700 struct {} start_init_save; 1701 1702 /* FPU state */ 1703 unsigned int fpstt; /* top of stack index */ 1704 uint16_t fpus; 1705 uint16_t fpuc; 1706 uint8_t fptags[8]; /* 0 = valid, 1 = empty */ 1707 FPReg fpregs[8]; 1708 /* KVM-only so far */ 1709 uint16_t fpop; 1710 uint16_t fpcs; 1711 uint16_t fpds; 1712 uint64_t fpip; 1713 uint64_t fpdp; 1714 1715 /* emulator internal variables */ 1716 float_status fp_status; 1717 floatx80 ft0; 1718 1719 float_status mmx_status; /* for 3DNow! float ops */ 1720 float_status sse_status; 1721 uint32_t mxcsr; 1722 ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32] QEMU_ALIGNED(16); 1723 ZMMReg xmm_t0 QEMU_ALIGNED(16); 1724 MMXReg mmx_t0; 1725 1726 uint64_t opmask_regs[NB_OPMASK_REGS]; 1727 #ifdef TARGET_X86_64 1728 uint8_t xtilecfg[64]; 1729 uint8_t xtiledata[8192]; 1730 #endif 1731 1732 /* sysenter registers */ 1733 uint32_t sysenter_cs; 1734 target_ulong sysenter_esp; 1735 target_ulong sysenter_eip; 1736 uint64_t star; 1737 1738 uint64_t vm_hsave; 1739 1740 #ifdef TARGET_X86_64 1741 target_ulong lstar; 1742 target_ulong cstar; 1743 target_ulong fmask; 1744 target_ulong kernelgsbase; 1745 1746 /* FRED MSRs */ 1747 uint64_t fred_rsp0; 1748 uint64_t fred_rsp1; 1749 uint64_t fred_rsp2; 1750 uint64_t fred_rsp3; 1751 uint64_t fred_stklvls; 1752 uint64_t fred_ssp1; 1753 uint64_t fred_ssp2; 1754 uint64_t fred_ssp3; 1755 uint64_t fred_config; 1756 #endif 1757 1758 uint64_t tsc_adjust; 1759 uint64_t tsc_deadline; 1760 uint64_t tsc_aux; 1761 1762 uint64_t xcr0; 1763 1764 uint64_t mcg_status; 1765 uint64_t msr_ia32_misc_enable; 1766 uint64_t msr_ia32_feature_control; 1767 uint64_t msr_ia32_sgxlepubkeyhash[4]; 1768 1769 uint64_t msr_fixed_ctr_ctrl; 1770 uint64_t msr_global_ctrl; 1771 uint64_t msr_global_status; 1772 uint64_t msr_global_ovf_ctrl; 1773 uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS]; 1774 uint64_t msr_gp_counters[MAX_GP_COUNTERS]; 1775 uint64_t msr_gp_evtsel[MAX_GP_COUNTERS]; 1776 1777 uint64_t pat; 1778 uint32_t smbase; 1779 uint64_t msr_smi_count; 1780 1781 uint32_t pkru; 1782 uint32_t pkrs; 1783 uint32_t tsx_ctrl; 1784 1785 uint64_t spec_ctrl; 1786 uint64_t amd_tsc_scale_msr; 1787 uint64_t virt_ssbd; 1788 1789 /* End of state preserved by INIT (dummy marker). */ 1790 struct {} end_init_save; 1791 1792 uint64_t system_time_msr; 1793 uint64_t wall_clock_msr; 1794 uint64_t steal_time_msr; 1795 uint64_t async_pf_en_msr; 1796 uint64_t async_pf_int_msr; 1797 uint64_t pv_eoi_en_msr; 1798 uint64_t poll_control_msr; 1799 1800 /* Partition-wide HV MSRs, will be updated only on the first vcpu */ 1801 uint64_t msr_hv_hypercall; 1802 uint64_t msr_hv_guest_os_id; 1803 uint64_t msr_hv_tsc; 1804 uint64_t msr_hv_syndbg_control; 1805 uint64_t msr_hv_syndbg_status; 1806 uint64_t msr_hv_syndbg_send_page; 1807 uint64_t msr_hv_syndbg_recv_page; 1808 uint64_t msr_hv_syndbg_pending_page; 1809 uint64_t msr_hv_syndbg_options; 1810 1811 /* Per-VCPU HV MSRs */ 1812 uint64_t msr_hv_vapic; 1813 uint64_t msr_hv_crash_params[HV_CRASH_PARAMS]; 1814 uint64_t msr_hv_runtime; 1815 uint64_t msr_hv_synic_control; 1816 uint64_t msr_hv_synic_evt_page; 1817 uint64_t msr_hv_synic_msg_page; 1818 uint64_t msr_hv_synic_sint[HV_SINT_COUNT]; 1819 uint64_t msr_hv_stimer_config[HV_STIMER_COUNT]; 1820 uint64_t msr_hv_stimer_count[HV_STIMER_COUNT]; 1821 uint64_t msr_hv_reenlightenment_control; 1822 uint64_t msr_hv_tsc_emulation_control; 1823 uint64_t msr_hv_tsc_emulation_status; 1824 1825 uint64_t msr_rtit_ctrl; 1826 uint64_t msr_rtit_status; 1827 uint64_t msr_rtit_output_base; 1828 uint64_t msr_rtit_output_mask; 1829 uint64_t msr_rtit_cr3_match; 1830 uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS]; 1831 1832 /* Per-VCPU XFD MSRs */ 1833 uint64_t msr_xfd; 1834 uint64_t msr_xfd_err; 1835 1836 /* Per-VCPU Arch LBR MSRs */ 1837 uint64_t msr_lbr_ctl; 1838 uint64_t msr_lbr_depth; 1839 LBREntry lbr_records[ARCH_LBR_NR_ENTRIES]; 1840 1841 /* exception/interrupt handling */ 1842 int error_code; 1843 int exception_is_int; 1844 target_ulong exception_next_eip; 1845 target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */ 1846 union { 1847 struct CPUBreakpoint *cpu_breakpoint[4]; 1848 struct CPUWatchpoint *cpu_watchpoint[4]; 1849 }; /* break/watchpoints for dr[0..3] */ 1850 int old_exception; /* exception in flight */ 1851 1852 uint64_t vm_vmcb; 1853 uint64_t tsc_offset; 1854 uint64_t intercept; 1855 uint16_t intercept_cr_read; 1856 uint16_t intercept_cr_write; 1857 uint16_t intercept_dr_read; 1858 uint16_t intercept_dr_write; 1859 uint32_t intercept_exceptions; 1860 uint64_t nested_cr3; 1861 uint32_t nested_pg_mode; 1862 uint8_t v_tpr; 1863 uint32_t int_ctl; 1864 1865 /* KVM states, automatically cleared on reset */ 1866 uint8_t nmi_injected; 1867 uint8_t nmi_pending; 1868 1869 uintptr_t retaddr; 1870 1871 /* Fields up to this point are cleared by a CPU reset */ 1872 struct {} end_reset_fields; 1873 1874 /* Fields after this point are preserved across CPU reset. */ 1875 1876 /* processor features (e.g. for CPUID insn) */ 1877 /* Minimum cpuid leaf 7 value */ 1878 uint32_t cpuid_level_func7; 1879 /* Actual cpuid leaf 7 value */ 1880 uint32_t cpuid_min_level_func7; 1881 /* Minimum level/xlevel/xlevel2, based on CPU model + features */ 1882 uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2; 1883 /* Maximum level/xlevel/xlevel2 value for auto-assignment: */ 1884 uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2; 1885 /* Actual level/xlevel/xlevel2 value: */ 1886 uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2; 1887 uint32_t cpuid_vendor1; 1888 uint32_t cpuid_vendor2; 1889 uint32_t cpuid_vendor3; 1890 uint32_t cpuid_version; 1891 FeatureWordArray features; 1892 /* Features that were explicitly enabled/disabled */ 1893 FeatureWordArray user_features; 1894 uint32_t cpuid_model[12]; 1895 /* Cache information for CPUID. When legacy-cache=on, the cache data 1896 * on each CPUID leaf will be different, because we keep compatibility 1897 * with old QEMU versions. 1898 */ 1899 CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd; 1900 1901 /* MTRRs */ 1902 uint64_t mtrr_fixed[11]; 1903 uint64_t mtrr_deftype; 1904 MTRRVar mtrr_var[MSR_MTRRcap_VCNT]; 1905 1906 /* For KVM */ 1907 uint32_t mp_state; 1908 int32_t exception_nr; 1909 int32_t interrupt_injected; 1910 uint8_t soft_interrupt; 1911 uint8_t exception_pending; 1912 uint8_t exception_injected; 1913 uint8_t has_error_code; 1914 uint8_t exception_has_payload; 1915 uint64_t exception_payload; 1916 uint8_t triple_fault_pending; 1917 uint32_t ins_len; 1918 uint32_t sipi_vector; 1919 bool tsc_valid; 1920 int64_t tsc_khz; 1921 int64_t user_tsc_khz; /* for sanity check only */ 1922 uint64_t apic_bus_freq; 1923 uint64_t tsc; 1924 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 1925 void *xsave_buf; 1926 uint32_t xsave_buf_len; 1927 #endif 1928 #if defined(CONFIG_KVM) 1929 struct kvm_nested_state *nested_state; 1930 MemoryRegion *xen_vcpu_info_mr; 1931 void *xen_vcpu_info_hva; 1932 uint64_t xen_vcpu_info_gpa; 1933 uint64_t xen_vcpu_info_default_gpa; 1934 uint64_t xen_vcpu_time_info_gpa; 1935 uint64_t xen_vcpu_runstate_gpa; 1936 uint8_t xen_vcpu_callback_vector; 1937 bool xen_callback_asserted; 1938 uint16_t xen_virq[XEN_NR_VIRQS]; 1939 uint64_t xen_singleshot_timer_ns; 1940 QEMUTimer *xen_singleshot_timer; 1941 uint64_t xen_periodic_timer_period; 1942 QEMUTimer *xen_periodic_timer; 1943 QemuMutex xen_timers_lock; 1944 #endif 1945 #if defined(CONFIG_HVF) 1946 HVFX86LazyFlags hvf_lflags; 1947 void *hvf_mmio_buf; 1948 #endif 1949 1950 uint64_t mcg_cap; 1951 uint64_t mcg_ctl; 1952 uint64_t mcg_ext_ctl; 1953 uint64_t mce_banks[MCE_BANKS_DEF*4]; 1954 uint64_t xstate_bv; 1955 1956 /* vmstate */ 1957 uint16_t fpus_vmstate; 1958 uint16_t fptag_vmstate; 1959 uint16_t fpregs_format_vmstate; 1960 1961 uint64_t xss; 1962 uint32_t umwait; 1963 1964 TPRAccess tpr_access_type; 1965 1966 /* Number of dies within this CPU package. */ 1967 unsigned nr_dies; 1968 1969 /* Number of modules within one die. */ 1970 unsigned nr_modules; 1971 1972 /* Bitmap of available CPU topology levels for this CPU. */ 1973 DECLARE_BITMAP(avail_cpu_topo, CPU_TOPO_LEVEL_MAX); 1974 } CPUX86State; 1975 1976 struct kvm_msrs; 1977 1978 /** 1979 * X86CPU: 1980 * @env: #CPUX86State 1981 * @migratable: If set, only migratable flags will be accepted when "enforce" 1982 * mode is used, and only migratable flags will be included in the "host" 1983 * CPU model. 1984 * 1985 * An x86 CPU. 1986 */ 1987 struct ArchCPU { 1988 CPUState parent_obj; 1989 1990 CPUX86State env; 1991 VMChangeStateEntry *vmsentry; 1992 1993 uint64_t ucode_rev; 1994 1995 uint32_t hyperv_spinlock_attempts; 1996 char *hyperv_vendor; 1997 bool hyperv_synic_kvm_only; 1998 uint64_t hyperv_features; 1999 bool hyperv_passthrough; 2000 OnOffAuto hyperv_no_nonarch_cs; 2001 uint32_t hyperv_vendor_id[3]; 2002 uint32_t hyperv_interface_id[4]; 2003 uint32_t hyperv_limits[3]; 2004 bool hyperv_enforce_cpuid; 2005 uint32_t hyperv_ver_id_build; 2006 uint16_t hyperv_ver_id_major; 2007 uint16_t hyperv_ver_id_minor; 2008 uint32_t hyperv_ver_id_sp; 2009 uint8_t hyperv_ver_id_sb; 2010 uint32_t hyperv_ver_id_sn; 2011 2012 bool check_cpuid; 2013 bool enforce_cpuid; 2014 /* 2015 * Force features to be enabled even if the host doesn't support them. 2016 * This is dangerous and should be done only for testing CPUID 2017 * compatibility. 2018 */ 2019 bool force_features; 2020 bool expose_kvm; 2021 bool expose_tcg; 2022 bool migratable; 2023 bool migrate_smi_count; 2024 bool max_features; /* Enable all supported features automatically */ 2025 uint32_t apic_id; 2026 2027 /* Enables publishing of TSC increment and Local APIC bus frequencies to 2028 * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */ 2029 bool vmware_cpuid_freq; 2030 2031 /* if true the CPUID code directly forward host cache leaves to the guest */ 2032 bool cache_info_passthrough; 2033 2034 /* if true the CPUID code directly forwards 2035 * host monitor/mwait leaves to the guest */ 2036 struct { 2037 uint32_t eax; 2038 uint32_t ebx; 2039 uint32_t ecx; 2040 uint32_t edx; 2041 } mwait; 2042 2043 /* Features that were filtered out because of missing host capabilities */ 2044 FeatureWordArray filtered_features; 2045 2046 /* Enable PMU CPUID bits. This can't be enabled by default yet because 2047 * it doesn't have ABI stability guarantees, as it passes all PMU CPUID 2048 * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel 2049 * capabilities) directly to the guest. 2050 */ 2051 bool enable_pmu; 2052 2053 /* 2054 * Enable LBR_FMT bits of IA32_PERF_CAPABILITIES MSR. 2055 * This can't be initialized with a default because it doesn't have 2056 * stable ABI support yet. It is only allowed to pass all LBR_FMT bits 2057 * returned by kvm_arch_get_supported_msr_feature()(which depends on both 2058 * host CPU and kernel capabilities) to the guest. 2059 */ 2060 uint64_t lbr_fmt; 2061 2062 /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is 2063 * disabled by default to avoid breaking migration between QEMU with 2064 * different LMCE configurations. 2065 */ 2066 bool enable_lmce; 2067 2068 /* Compatibility bits for old machine types. 2069 * If true present virtual l3 cache for VM, the vcpus in the same virtual 2070 * socket share an virtual l3 cache. 2071 */ 2072 bool enable_l3_cache; 2073 2074 /* Compatibility bits for old machine types. 2075 * If true present L1 cache as per-thread, not per-core. 2076 */ 2077 bool l1_cache_per_core; 2078 2079 /* Compatibility bits for old machine types. 2080 * If true present the old cache topology information 2081 */ 2082 bool legacy_cache; 2083 2084 /* Compatibility bits for old machine types. 2085 * If true decode the CPUID Function 0x8000001E_ECX to support multiple 2086 * nodes per processor 2087 */ 2088 bool legacy_multi_node; 2089 2090 /* Compatibility bits for old machine types: */ 2091 bool enable_cpuid_0xb; 2092 2093 /* Enable auto level-increase for all CPUID leaves */ 2094 bool full_cpuid_auto_level; 2095 2096 /* Only advertise CPUID leaves defined by the vendor */ 2097 bool vendor_cpuid_only; 2098 2099 /* Enable auto level-increase for Intel Processor Trace leave */ 2100 bool intel_pt_auto_level; 2101 2102 /* if true fill the top bits of the MTRR_PHYSMASKn variable range */ 2103 bool fill_mtrr_mask; 2104 2105 /* if true override the phys_bits value with a value read from the host */ 2106 bool host_phys_bits; 2107 2108 /* if set, limit maximum value for phys_bits when host_phys_bits is true */ 2109 uint8_t host_phys_bits_limit; 2110 2111 /* Forcefully disable KVM PV features not exposed in guest CPUIDs */ 2112 bool kvm_pv_enforce_cpuid; 2113 2114 /* Number of physical address bits supported */ 2115 uint32_t phys_bits; 2116 2117 /* 2118 * Number of guest physical address bits available. Usually this is 2119 * identical to host physical address bits. With NPT or EPT 4-level 2120 * paging, guest physical address space might be restricted to 48 bits 2121 * even if the host cpu supports more physical address bits. 2122 */ 2123 uint32_t guest_phys_bits; 2124 2125 /* in order to simplify APIC support, we leave this pointer to the 2126 user */ 2127 struct DeviceState *apic_state; 2128 struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram; 2129 Notifier machine_done; 2130 2131 struct kvm_msrs *kvm_msr_buf; 2132 2133 int32_t node_id; /* NUMA node this CPU belongs to */ 2134 int32_t socket_id; 2135 int32_t die_id; 2136 int32_t module_id; 2137 int32_t core_id; 2138 int32_t thread_id; 2139 2140 int32_t hv_max_vps; 2141 2142 bool xen_vapic; 2143 }; 2144 2145 typedef struct X86CPUModel X86CPUModel; 2146 2147 /** 2148 * X86CPUClass: 2149 * @cpu_def: CPU model definition 2150 * @host_cpuid_required: Whether CPU model requires cpuid from host. 2151 * @ordering: Ordering on the "-cpu help" CPU model list. 2152 * @migration_safe: See CpuDefinitionInfo::migration_safe 2153 * @static_model: See CpuDefinitionInfo::static 2154 * @parent_realize: The parent class' realize handler. 2155 * @parent_phases: The parent class' reset phase handlers. 2156 * 2157 * An x86 CPU model or family. 2158 */ 2159 struct X86CPUClass { 2160 CPUClass parent_class; 2161 2162 /* 2163 * CPU definition, automatically loaded by instance_init if not NULL. 2164 * Should be eventually replaced by subclass-specific property defaults. 2165 */ 2166 X86CPUModel *model; 2167 2168 bool host_cpuid_required; 2169 int ordering; 2170 bool migration_safe; 2171 bool static_model; 2172 2173 /* 2174 * Optional description of CPU model. 2175 * If unavailable, cpu_def->model_id is used. 2176 */ 2177 const char *model_description; 2178 2179 DeviceRealize parent_realize; 2180 DeviceUnrealize parent_unrealize; 2181 ResettablePhases parent_phases; 2182 }; 2183 2184 #ifndef CONFIG_USER_ONLY 2185 extern const VMStateDescription vmstate_x86_cpu; 2186 #endif 2187 2188 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); 2189 2190 int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, 2191 int cpuid, DumpState *s); 2192 int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, 2193 int cpuid, DumpState *s); 2194 int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 2195 DumpState *s); 2196 int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 2197 DumpState *s); 2198 2199 bool x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, 2200 Error **errp); 2201 2202 void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags); 2203 2204 int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); 2205 int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 2206 2207 void x86_cpu_list(void); 2208 int cpu_x86_support_mca_broadcast(CPUX86State *env); 2209 2210 #ifndef CONFIG_USER_ONLY 2211 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, 2212 MemTxAttrs *attrs); 2213 int cpu_get_pic_interrupt(CPUX86State *s); 2214 2215 /* MS-DOS compatibility mode FPU exception support */ 2216 void x86_register_ferr_irq(qemu_irq irq); 2217 void fpu_check_raise_ferr_irq(CPUX86State *s); 2218 void cpu_set_ignne(void); 2219 void cpu_clear_ignne(void); 2220 #endif 2221 2222 /* mpx_helper.c */ 2223 void cpu_sync_bndcs_hflags(CPUX86State *env); 2224 2225 /* this function must always be used to load data in the segment 2226 cache: it synchronizes the hflags with the segment cache values */ 2227 static inline void cpu_x86_load_seg_cache(CPUX86State *env, 2228 X86Seg seg_reg, unsigned int selector, 2229 target_ulong base, 2230 unsigned int limit, 2231 unsigned int flags) 2232 { 2233 SegmentCache *sc; 2234 unsigned int new_hflags; 2235 2236 sc = &env->segs[seg_reg]; 2237 sc->selector = selector; 2238 sc->base = base; 2239 sc->limit = limit; 2240 sc->flags = flags; 2241 2242 /* update the hidden flags */ 2243 { 2244 if (seg_reg == R_CS) { 2245 #ifdef TARGET_X86_64 2246 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { 2247 /* long mode */ 2248 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 2249 env->hflags &= ~(HF_ADDSEG_MASK); 2250 } else 2251 #endif 2252 { 2253 /* legacy / compatibility case */ 2254 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) 2255 >> (DESC_B_SHIFT - HF_CS32_SHIFT); 2256 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | 2257 new_hflags; 2258 } 2259 } 2260 if (seg_reg == R_SS) { 2261 int cpl = (flags >> DESC_DPL_SHIFT) & 3; 2262 #if HF_CPL_MASK != 3 2263 #error HF_CPL_MASK is hardcoded 2264 #endif 2265 env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl; 2266 /* Possibly switch between BNDCFGS and BNDCFGU */ 2267 cpu_sync_bndcs_hflags(env); 2268 } 2269 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) 2270 >> (DESC_B_SHIFT - HF_SS32_SHIFT); 2271 if (env->hflags & HF_CS64_MASK) { 2272 /* zero base assumed for DS, ES and SS in long mode */ 2273 } else if (!(env->cr[0] & CR0_PE_MASK) || 2274 (env->eflags & VM_MASK) || 2275 !(env->hflags & HF_CS32_MASK)) { 2276 /* XXX: try to avoid this test. The problem comes from the 2277 fact that is real mode or vm86 mode we only modify the 2278 'base' and 'selector' fields of the segment cache to go 2279 faster. A solution may be to force addseg to one in 2280 translate-i386.c. */ 2281 new_hflags |= HF_ADDSEG_MASK; 2282 } else { 2283 new_hflags |= ((env->segs[R_DS].base | 2284 env->segs[R_ES].base | 2285 env->segs[R_SS].base) != 0) << 2286 HF_ADDSEG_SHIFT; 2287 } 2288 env->hflags = (env->hflags & 2289 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; 2290 } 2291 } 2292 2293 static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, 2294 uint8_t sipi_vector) 2295 { 2296 CPUState *cs = CPU(cpu); 2297 CPUX86State *env = &cpu->env; 2298 2299 env->eip = 0; 2300 cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, 2301 sipi_vector << 12, 2302 env->segs[R_CS].limit, 2303 env->segs[R_CS].flags); 2304 cs->halted = 0; 2305 } 2306 2307 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, 2308 target_ulong *base, unsigned int *limit, 2309 unsigned int *flags); 2310 2311 /* op_helper.c */ 2312 /* used for debug or cpu save/restore */ 2313 2314 /* cpu-exec.c */ 2315 /* 2316 * The following helpers are only usable in user mode simulation. 2317 * The host pointers should come from lock_user(). 2318 */ 2319 void cpu_x86_load_seg(CPUX86State *s, X86Seg seg_reg, int selector); 2320 void cpu_x86_fsave(CPUX86State *s, void *host, size_t len); 2321 void cpu_x86_frstor(CPUX86State *s, void *host, size_t len); 2322 void cpu_x86_fxsave(CPUX86State *s, void *host, size_t len); 2323 void cpu_x86_fxrstor(CPUX86State *s, void *host, size_t len); 2324 void cpu_x86_xsave(CPUX86State *s, void *host, size_t len, uint64_t rbfm); 2325 bool cpu_x86_xrstor(CPUX86State *s, void *host, size_t len, uint64_t rbfm); 2326 2327 /* cpu.c */ 2328 void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 2329 uint32_t vendor2, uint32_t vendor3); 2330 typedef struct PropValue { 2331 const char *prop, *value; 2332 } PropValue; 2333 void x86_cpu_apply_props(X86CPU *cpu, PropValue *props); 2334 2335 void x86_cpu_after_reset(X86CPU *cpu); 2336 2337 uint32_t cpu_x86_virtual_addr_width(CPUX86State *env); 2338 2339 /* cpu.c other functions (cpuid) */ 2340 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 2341 uint32_t *eax, uint32_t *ebx, 2342 uint32_t *ecx, uint32_t *edx); 2343 void cpu_clear_apic_feature(CPUX86State *env); 2344 void cpu_set_apic_feature(CPUX86State *env); 2345 void host_cpuid(uint32_t function, uint32_t count, 2346 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); 2347 bool cpu_has_x2apic_feature(CPUX86State *env); 2348 2349 /* helper.c */ 2350 void x86_cpu_set_a20(X86CPU *cpu, int a20_state); 2351 void cpu_sync_avx_hflag(CPUX86State *env); 2352 2353 #ifndef CONFIG_USER_ONLY 2354 static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) 2355 { 2356 return !!attrs.secure; 2357 } 2358 2359 static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs) 2360 { 2361 return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs)); 2362 } 2363 2364 /* 2365 * load efer and update the corresponding hflags. XXX: do consistency 2366 * checks with cpuid bits? 2367 */ 2368 void cpu_load_efer(CPUX86State *env, uint64_t val); 2369 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr); 2370 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr); 2371 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr); 2372 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr); 2373 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val); 2374 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val); 2375 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val); 2376 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val); 2377 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val); 2378 #endif 2379 2380 /* will be suppressed */ 2381 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); 2382 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); 2383 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); 2384 void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7); 2385 2386 /* hw/pc.c */ 2387 uint64_t cpu_get_tsc(CPUX86State *env); 2388 2389 #define CPU_RESOLVING_TYPE TYPE_X86_CPU 2390 2391 #ifdef TARGET_X86_64 2392 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64") 2393 #else 2394 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32") 2395 #endif 2396 2397 #define cpu_list x86_cpu_list 2398 2399 /* MMU modes definitions */ 2400 #define MMU_KSMAP64_IDX 0 2401 #define MMU_KSMAP32_IDX 1 2402 #define MMU_USER64_IDX 2 2403 #define MMU_USER32_IDX 3 2404 #define MMU_KNOSMAP64_IDX 4 2405 #define MMU_KNOSMAP32_IDX 5 2406 #define MMU_PHYS_IDX 6 2407 #define MMU_NESTED_IDX 7 2408 2409 #ifdef CONFIG_USER_ONLY 2410 #ifdef TARGET_X86_64 2411 #define MMU_USER_IDX MMU_USER64_IDX 2412 #else 2413 #define MMU_USER_IDX MMU_USER32_IDX 2414 #endif 2415 #endif 2416 2417 static inline bool is_mmu_index_smap(int mmu_index) 2418 { 2419 return (mmu_index & ~1) == MMU_KSMAP64_IDX; 2420 } 2421 2422 static inline bool is_mmu_index_user(int mmu_index) 2423 { 2424 return (mmu_index & ~1) == MMU_USER64_IDX; 2425 } 2426 2427 static inline bool is_mmu_index_32(int mmu_index) 2428 { 2429 assert(mmu_index < MMU_PHYS_IDX); 2430 return mmu_index & 1; 2431 } 2432 2433 static inline int cpu_mmu_index_kernel(CPUX86State *env) 2434 { 2435 int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1; 2436 int mmu_index_base = 2437 !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX : 2438 ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX; 2439 2440 return mmu_index_base + mmu_index_32; 2441 } 2442 2443 #define CC_DST (env->cc_dst) 2444 #define CC_SRC (env->cc_src) 2445 #define CC_SRC2 (env->cc_src2) 2446 #define CC_OP (env->cc_op) 2447 2448 #include "exec/cpu-all.h" 2449 #include "svm.h" 2450 2451 #if !defined(CONFIG_USER_ONLY) 2452 #include "hw/i386/apic.h" 2453 #endif 2454 2455 static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc, 2456 uint64_t *cs_base, uint32_t *flags) 2457 { 2458 *flags = env->hflags | 2459 (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); 2460 if (env->hflags & HF_CS64_MASK) { 2461 *cs_base = 0; 2462 *pc = env->eip; 2463 } else { 2464 *cs_base = env->segs[R_CS].base; 2465 *pc = (uint32_t)(*cs_base + env->eip); 2466 } 2467 } 2468 2469 void do_cpu_init(X86CPU *cpu); 2470 2471 #define MCE_INJECT_BROADCAST 1 2472 #define MCE_INJECT_UNCOND_AO 2 2473 2474 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, 2475 uint64_t status, uint64_t mcg_status, uint64_t addr, 2476 uint64_t misc, int flags); 2477 2478 uint32_t cpu_cc_compute_all(CPUX86State *env1); 2479 2480 static inline uint32_t cpu_compute_eflags(CPUX86State *env) 2481 { 2482 uint32_t eflags = env->eflags; 2483 if (tcg_enabled()) { 2484 eflags |= cpu_cc_compute_all(env) | (env->df & DF_MASK); 2485 } 2486 return eflags; 2487 } 2488 2489 static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env) 2490 { 2491 return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 }); 2492 } 2493 2494 static inline int32_t x86_get_a20_mask(CPUX86State *env) 2495 { 2496 if (env->hflags & HF_SMM_MASK) { 2497 return -1; 2498 } else { 2499 return env->a20_mask; 2500 } 2501 } 2502 2503 static inline bool cpu_has_vmx(CPUX86State *env) 2504 { 2505 return env->features[FEAT_1_ECX] & CPUID_EXT_VMX; 2506 } 2507 2508 static inline bool cpu_has_svm(CPUX86State *env) 2509 { 2510 return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM; 2511 } 2512 2513 /* 2514 * In order for a vCPU to enter VMX operation it must have CR4.VMXE set. 2515 * Since it was set, CR4.VMXE must remain set as long as vCPU is in 2516 * VMX operation. This is because CR4.VMXE is one of the bits set 2517 * in MSR_IA32_VMX_CR4_FIXED1. 2518 * 2519 * There is one exception to above statement when vCPU enters SMM mode. 2520 * When a vCPU enters SMM mode, it temporarily exit VMX operation and 2521 * may also reset CR4.VMXE during execution in SMM mode. 2522 * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation 2523 * and CR4.VMXE is restored to it's original value of being set. 2524 * 2525 * Therefore, when vCPU is not in SMM mode, we can infer whether 2526 * VMX is being used by examining CR4.VMXE. Otherwise, we cannot 2527 * know for certain. 2528 */ 2529 static inline bool cpu_vmx_maybe_enabled(CPUX86State *env) 2530 { 2531 return cpu_has_vmx(env) && 2532 ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK)); 2533 } 2534 2535 /* excp_helper.c */ 2536 int get_pg_mode(CPUX86State *env); 2537 2538 /* fpu_helper.c */ 2539 void update_fp_status(CPUX86State *env); 2540 void update_mxcsr_status(CPUX86State *env); 2541 void update_mxcsr_from_sse_status(CPUX86State *env); 2542 2543 static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr) 2544 { 2545 env->mxcsr = mxcsr; 2546 if (tcg_enabled()) { 2547 update_mxcsr_status(env); 2548 } 2549 } 2550 2551 static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc) 2552 { 2553 env->fpuc = fpuc; 2554 if (tcg_enabled()) { 2555 update_fp_status(env); 2556 } 2557 } 2558 2559 /* svm_helper.c */ 2560 #ifdef CONFIG_USER_ONLY 2561 static inline void 2562 cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, 2563 uint64_t param, uintptr_t retaddr) 2564 { /* no-op */ } 2565 static inline bool 2566 cpu_svm_has_intercept(CPUX86State *env, uint32_t type) 2567 { return false; } 2568 #else 2569 void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, 2570 uint64_t param, uintptr_t retaddr); 2571 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type); 2572 #endif 2573 2574 /* apic.c */ 2575 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); 2576 void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip, 2577 TPRAccess access); 2578 2579 /* Special values for X86CPUVersion: */ 2580 2581 /* Resolve to latest CPU version */ 2582 #define CPU_VERSION_LATEST -1 2583 2584 /* 2585 * Resolve to version defined by current machine type. 2586 * See x86_cpu_set_default_version() 2587 */ 2588 #define CPU_VERSION_AUTO -2 2589 2590 /* Don't resolve to any versioned CPU models, like old QEMU versions */ 2591 #define CPU_VERSION_LEGACY 0 2592 2593 typedef int X86CPUVersion; 2594 2595 /* 2596 * Set default CPU model version for CPU models having 2597 * version == CPU_VERSION_AUTO. 2598 */ 2599 void x86_cpu_set_default_version(X86CPUVersion version); 2600 2601 #ifndef CONFIG_USER_ONLY 2602 2603 void do_cpu_sipi(X86CPU *cpu); 2604 2605 #define APIC_DEFAULT_ADDRESS 0xfee00000 2606 #define APIC_SPACE_SIZE 0x100000 2607 2608 /* cpu-dump.c */ 2609 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags); 2610 2611 #endif 2612 2613 /* cpu.c */ 2614 bool cpu_is_bsp(X86CPU *cpu); 2615 2616 void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen); 2617 void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen); 2618 uint32_t xsave_area_size(uint64_t mask, bool compacted); 2619 void x86_update_hflags(CPUX86State* env); 2620 2621 static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat) 2622 { 2623 return !!(cpu->hyperv_features & BIT(feat)); 2624 } 2625 2626 static inline uint64_t cr4_reserved_bits(CPUX86State *env) 2627 { 2628 uint64_t reserved_bits = CR4_RESERVED_MASK; 2629 if (!env->features[FEAT_XSAVE]) { 2630 reserved_bits |= CR4_OSXSAVE_MASK; 2631 } 2632 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMEP)) { 2633 reserved_bits |= CR4_SMEP_MASK; 2634 } 2635 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) { 2636 reserved_bits |= CR4_SMAP_MASK; 2637 } 2638 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE)) { 2639 reserved_bits |= CR4_FSGSBASE_MASK; 2640 } 2641 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) { 2642 reserved_bits |= CR4_PKE_MASK; 2643 } 2644 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57)) { 2645 reserved_bits |= CR4_LA57_MASK; 2646 } 2647 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) { 2648 reserved_bits |= CR4_UMIP_MASK; 2649 } 2650 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) { 2651 reserved_bits |= CR4_PKS_MASK; 2652 } 2653 if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_LAM)) { 2654 reserved_bits |= CR4_LAM_SUP_MASK; 2655 } 2656 if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED)) { 2657 reserved_bits |= CR4_FRED_MASK; 2658 } 2659 return reserved_bits; 2660 } 2661 2662 static inline bool ctl_has_irq(CPUX86State *env) 2663 { 2664 uint32_t int_prio; 2665 uint32_t tpr; 2666 2667 int_prio = (env->int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT; 2668 tpr = env->int_ctl & V_TPR_MASK; 2669 2670 if (env->int_ctl & V_IGN_TPR_MASK) { 2671 return (env->int_ctl & V_IRQ_MASK); 2672 } 2673 2674 return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr); 2675 } 2676 2677 #if defined(TARGET_X86_64) && \ 2678 defined(CONFIG_USER_ONLY) && \ 2679 defined(CONFIG_LINUX) 2680 # define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20) 2681 #endif 2682 2683 #endif /* I386_CPU_H */ 2684