1 /* 2 * i386 virtual CPU header 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef I386_CPU_H 21 #define I386_CPU_H 22 23 #include "sysemu/tcg.h" 24 #include "cpu-qom.h" 25 #include "kvm/hyperv-proto.h" 26 #include "exec/cpu-defs.h" 27 #include "qapi/qapi-types-common.h" 28 29 /* The x86 has a strong memory model with some store-after-load re-ordering */ 30 #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) 31 32 #define KVM_HAVE_MCE_INJECTION 1 33 34 /* support for self modifying code even if the modified instruction is 35 close to the modifying instruction */ 36 #define TARGET_HAS_PRECISE_SMC 37 38 #ifdef TARGET_X86_64 39 #define I386_ELF_MACHINE EM_X86_64 40 #define ELF_MACHINE_UNAME "x86_64" 41 #else 42 #define I386_ELF_MACHINE EM_386 43 #define ELF_MACHINE_UNAME "i686" 44 #endif 45 46 enum { 47 R_EAX = 0, 48 R_ECX = 1, 49 R_EDX = 2, 50 R_EBX = 3, 51 R_ESP = 4, 52 R_EBP = 5, 53 R_ESI = 6, 54 R_EDI = 7, 55 R_R8 = 8, 56 R_R9 = 9, 57 R_R10 = 10, 58 R_R11 = 11, 59 R_R12 = 12, 60 R_R13 = 13, 61 R_R14 = 14, 62 R_R15 = 15, 63 64 R_AL = 0, 65 R_CL = 1, 66 R_DL = 2, 67 R_BL = 3, 68 R_AH = 4, 69 R_CH = 5, 70 R_DH = 6, 71 R_BH = 7, 72 }; 73 74 typedef enum X86Seg { 75 R_ES = 0, 76 R_CS = 1, 77 R_SS = 2, 78 R_DS = 3, 79 R_FS = 4, 80 R_GS = 5, 81 R_LDTR = 6, 82 R_TR = 7, 83 } X86Seg; 84 85 /* segment descriptor fields */ 86 #define DESC_G_SHIFT 23 87 #define DESC_G_MASK (1 << DESC_G_SHIFT) 88 #define DESC_B_SHIFT 22 89 #define DESC_B_MASK (1 << DESC_B_SHIFT) 90 #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ 91 #define DESC_L_MASK (1 << DESC_L_SHIFT) 92 #define DESC_AVL_SHIFT 20 93 #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT) 94 #define DESC_P_SHIFT 15 95 #define DESC_P_MASK (1 << DESC_P_SHIFT) 96 #define DESC_DPL_SHIFT 13 97 #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) 98 #define DESC_S_SHIFT 12 99 #define DESC_S_MASK (1 << DESC_S_SHIFT) 100 #define DESC_TYPE_SHIFT 8 101 #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) 102 #define DESC_A_MASK (1 << 8) 103 104 #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ 105 #define DESC_C_MASK (1 << 10) /* code: conforming */ 106 #define DESC_R_MASK (1 << 9) /* code: readable */ 107 108 #define DESC_E_MASK (1 << 10) /* data: expansion direction */ 109 #define DESC_W_MASK (1 << 9) /* data: writable */ 110 111 #define DESC_TSS_BUSY_MASK (1 << 9) 112 113 /* eflags masks */ 114 #define CC_C 0x0001 115 #define CC_P 0x0004 116 #define CC_A 0x0010 117 #define CC_Z 0x0040 118 #define CC_S 0x0080 119 #define CC_O 0x0800 120 121 #define TF_SHIFT 8 122 #define IOPL_SHIFT 12 123 #define VM_SHIFT 17 124 125 #define TF_MASK 0x00000100 126 #define IF_MASK 0x00000200 127 #define DF_MASK 0x00000400 128 #define IOPL_MASK 0x00003000 129 #define NT_MASK 0x00004000 130 #define RF_MASK 0x00010000 131 #define VM_MASK 0x00020000 132 #define AC_MASK 0x00040000 133 #define VIF_MASK 0x00080000 134 #define VIP_MASK 0x00100000 135 #define ID_MASK 0x00200000 136 137 /* hidden flags - used internally by qemu to represent additional cpu 138 states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We 139 avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit 140 positions to ease oring with eflags. */ 141 /* current cpl */ 142 #define HF_CPL_SHIFT 0 143 /* true if hardware interrupts must be disabled for next instruction */ 144 #define HF_INHIBIT_IRQ_SHIFT 3 145 /* 16 or 32 segments */ 146 #define HF_CS32_SHIFT 4 147 #define HF_SS32_SHIFT 5 148 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ 149 #define HF_ADDSEG_SHIFT 6 150 /* copy of CR0.PE (protected mode) */ 151 #define HF_PE_SHIFT 7 152 #define HF_TF_SHIFT 8 /* must be same as eflags */ 153 #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ 154 #define HF_EM_SHIFT 10 155 #define HF_TS_SHIFT 11 156 #define HF_IOPL_SHIFT 12 /* must be same as eflags */ 157 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ 158 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ 159 #define HF_RF_SHIFT 16 /* must be same as eflags */ 160 #define HF_VM_SHIFT 17 /* must be same as eflags */ 161 #define HF_AC_SHIFT 18 /* must be same as eflags */ 162 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ 163 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ 164 #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */ 165 #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ 166 #define HF_SMAP_SHIFT 23 /* CR4.SMAP */ 167 #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */ 168 #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */ 169 #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */ 170 #define HF_UMIP_SHIFT 27 /* CR4.UMIP */ 171 172 #define HF_CPL_MASK (3 << HF_CPL_SHIFT) 173 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) 174 #define HF_CS32_MASK (1 << HF_CS32_SHIFT) 175 #define HF_SS32_MASK (1 << HF_SS32_SHIFT) 176 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) 177 #define HF_PE_MASK (1 << HF_PE_SHIFT) 178 #define HF_TF_MASK (1 << HF_TF_SHIFT) 179 #define HF_MP_MASK (1 << HF_MP_SHIFT) 180 #define HF_EM_MASK (1 << HF_EM_SHIFT) 181 #define HF_TS_MASK (1 << HF_TS_SHIFT) 182 #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) 183 #define HF_LMA_MASK (1 << HF_LMA_SHIFT) 184 #define HF_CS64_MASK (1 << HF_CS64_SHIFT) 185 #define HF_RF_MASK (1 << HF_RF_SHIFT) 186 #define HF_VM_MASK (1 << HF_VM_SHIFT) 187 #define HF_AC_MASK (1 << HF_AC_SHIFT) 188 #define HF_SMM_MASK (1 << HF_SMM_SHIFT) 189 #define HF_SVME_MASK (1 << HF_SVME_SHIFT) 190 #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT) 191 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) 192 #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) 193 #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT) 194 #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT) 195 #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT) 196 #define HF_UMIP_MASK (1 << HF_UMIP_SHIFT) 197 198 /* hflags2 */ 199 200 #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ 201 #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ 202 #define HF2_NMI_SHIFT 2 /* CPU serving NMI */ 203 #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ 204 #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */ 205 #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */ 206 #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */ 207 #define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */ 208 #define HF2_VGIF_SHIFT 8 /* Can take VIRQ*/ 209 210 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) 211 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) 212 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) 213 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) 214 #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT) 215 #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT) 216 #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT) 217 #define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT) 218 #define HF2_VGIF_MASK (1 << HF2_VGIF_SHIFT) 219 220 #define CR0_PE_SHIFT 0 221 #define CR0_MP_SHIFT 1 222 223 #define CR0_PE_MASK (1U << 0) 224 #define CR0_MP_MASK (1U << 1) 225 #define CR0_EM_MASK (1U << 2) 226 #define CR0_TS_MASK (1U << 3) 227 #define CR0_ET_MASK (1U << 4) 228 #define CR0_NE_MASK (1U << 5) 229 #define CR0_WP_MASK (1U << 16) 230 #define CR0_AM_MASK (1U << 18) 231 #define CR0_NW_MASK (1U << 29) 232 #define CR0_CD_MASK (1U << 30) 233 #define CR0_PG_MASK (1U << 31) 234 235 #define CR4_VME_MASK (1U << 0) 236 #define CR4_PVI_MASK (1U << 1) 237 #define CR4_TSD_MASK (1U << 2) 238 #define CR4_DE_MASK (1U << 3) 239 #define CR4_PSE_MASK (1U << 4) 240 #define CR4_PAE_MASK (1U << 5) 241 #define CR4_MCE_MASK (1U << 6) 242 #define CR4_PGE_MASK (1U << 7) 243 #define CR4_PCE_MASK (1U << 8) 244 #define CR4_OSFXSR_SHIFT 9 245 #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT) 246 #define CR4_OSXMMEXCPT_MASK (1U << 10) 247 #define CR4_UMIP_MASK (1U << 11) 248 #define CR4_LA57_MASK (1U << 12) 249 #define CR4_VMXE_MASK (1U << 13) 250 #define CR4_SMXE_MASK (1U << 14) 251 #define CR4_FSGSBASE_MASK (1U << 16) 252 #define CR4_PCIDE_MASK (1U << 17) 253 #define CR4_OSXSAVE_MASK (1U << 18) 254 #define CR4_SMEP_MASK (1U << 20) 255 #define CR4_SMAP_MASK (1U << 21) 256 #define CR4_PKE_MASK (1U << 22) 257 #define CR4_PKS_MASK (1U << 24) 258 259 #define CR4_RESERVED_MASK \ 260 (~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \ 261 | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \ 262 | CR4_MCE_MASK | CR4_PGE_MASK | CR4_PCE_MASK \ 263 | CR4_OSFXSR_MASK | CR4_OSXMMEXCPT_MASK | CR4_UMIP_MASK \ 264 | CR4_LA57_MASK \ 265 | CR4_FSGSBASE_MASK | CR4_PCIDE_MASK | CR4_OSXSAVE_MASK \ 266 | CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_PKE_MASK | CR4_PKS_MASK)) 267 268 #define DR6_BD (1 << 13) 269 #define DR6_BS (1 << 14) 270 #define DR6_BT (1 << 15) 271 #define DR6_FIXED_1 0xffff0ff0 272 273 #define DR7_GD (1 << 13) 274 #define DR7_TYPE_SHIFT 16 275 #define DR7_LEN_SHIFT 18 276 #define DR7_FIXED_1 0x00000400 277 #define DR7_GLOBAL_BP_MASK 0xaa 278 #define DR7_LOCAL_BP_MASK 0x55 279 #define DR7_MAX_BP 4 280 #define DR7_TYPE_BP_INST 0x0 281 #define DR7_TYPE_DATA_WR 0x1 282 #define DR7_TYPE_IO_RW 0x2 283 #define DR7_TYPE_DATA_RW 0x3 284 285 #define DR_RESERVED_MASK 0xffffffff00000000ULL 286 287 #define PG_PRESENT_BIT 0 288 #define PG_RW_BIT 1 289 #define PG_USER_BIT 2 290 #define PG_PWT_BIT 3 291 #define PG_PCD_BIT 4 292 #define PG_ACCESSED_BIT 5 293 #define PG_DIRTY_BIT 6 294 #define PG_PSE_BIT 7 295 #define PG_GLOBAL_BIT 8 296 #define PG_PSE_PAT_BIT 12 297 #define PG_PKRU_BIT 59 298 #define PG_NX_BIT 63 299 300 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) 301 #define PG_RW_MASK (1 << PG_RW_BIT) 302 #define PG_USER_MASK (1 << PG_USER_BIT) 303 #define PG_PWT_MASK (1 << PG_PWT_BIT) 304 #define PG_PCD_MASK (1 << PG_PCD_BIT) 305 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) 306 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) 307 #define PG_PSE_MASK (1 << PG_PSE_BIT) 308 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) 309 #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT) 310 #define PG_ADDRESS_MASK 0x000ffffffffff000LL 311 #define PG_HI_USER_MASK 0x7ff0000000000000LL 312 #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT) 313 #define PG_NX_MASK (1ULL << PG_NX_BIT) 314 315 #define PG_ERROR_W_BIT 1 316 317 #define PG_ERROR_P_MASK 0x01 318 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) 319 #define PG_ERROR_U_MASK 0x04 320 #define PG_ERROR_RSVD_MASK 0x08 321 #define PG_ERROR_I_D_MASK 0x10 322 #define PG_ERROR_PK_MASK 0x20 323 324 #define PG_MODE_PAE (1 << 0) 325 #define PG_MODE_LMA (1 << 1) 326 #define PG_MODE_NXE (1 << 2) 327 #define PG_MODE_PSE (1 << 3) 328 #define PG_MODE_LA57 (1 << 4) 329 #define PG_MODE_SVM_MASK MAKE_64BIT_MASK(0, 15) 330 331 /* Bits of CR4 that do not affect the NPT page format. */ 332 #define PG_MODE_WP (1 << 16) 333 #define PG_MODE_PKE (1 << 17) 334 #define PG_MODE_PKS (1 << 18) 335 #define PG_MODE_SMEP (1 << 19) 336 337 #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ 338 #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ 339 #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */ 340 341 #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) 342 #define MCE_BANKS_DEF 10 343 344 #define MCG_CAP_BANKS_MASK 0xff 345 346 #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ 347 #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ 348 #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ 349 #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */ 350 351 #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */ 352 353 #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ 354 #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ 355 #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ 356 #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ 357 #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ 358 #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ 359 #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ 360 #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ 361 #define MCI_STATUS_AR (1ULL<<55) /* Action required */ 362 363 /* MISC register defines */ 364 #define MCM_ADDR_SEGOFF 0 /* segment offset */ 365 #define MCM_ADDR_LINEAR 1 /* linear address */ 366 #define MCM_ADDR_PHYS 2 /* physical address */ 367 #define MCM_ADDR_MEM 3 /* memory address */ 368 #define MCM_ADDR_GENERIC 7 /* generic */ 369 370 #define MSR_IA32_TSC 0x10 371 #define MSR_IA32_APICBASE 0x1b 372 #define MSR_IA32_APICBASE_BSP (1<<8) 373 #define MSR_IA32_APICBASE_ENABLE (1<<11) 374 #define MSR_IA32_APICBASE_EXTD (1 << 10) 375 #define MSR_IA32_APICBASE_BASE (0xfffffU<<12) 376 #define MSR_IA32_FEATURE_CONTROL 0x0000003a 377 #define MSR_TSC_ADJUST 0x0000003b 378 #define MSR_IA32_SPEC_CTRL 0x48 379 #define MSR_VIRT_SSBD 0xc001011f 380 #define MSR_IA32_PRED_CMD 0x49 381 #define MSR_IA32_UCODE_REV 0x8b 382 #define MSR_IA32_CORE_CAPABILITY 0xcf 383 384 #define MSR_IA32_ARCH_CAPABILITIES 0x10a 385 #define ARCH_CAP_TSX_CTRL_MSR (1<<7) 386 387 #define MSR_IA32_PERF_CAPABILITIES 0x345 388 389 #define MSR_IA32_TSX_CTRL 0x122 390 #define MSR_IA32_TSCDEADLINE 0x6e0 391 #define MSR_IA32_PKRS 0x6e1 392 393 #define FEATURE_CONTROL_LOCKED (1<<0) 394 #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1) 395 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) 396 #define FEATURE_CONTROL_SGX_LC (1ULL << 17) 397 #define FEATURE_CONTROL_SGX (1ULL << 18) 398 #define FEATURE_CONTROL_LMCE (1<<20) 399 400 #define MSR_IA32_SGXLEPUBKEYHASH0 0x8c 401 #define MSR_IA32_SGXLEPUBKEYHASH1 0x8d 402 #define MSR_IA32_SGXLEPUBKEYHASH2 0x8e 403 #define MSR_IA32_SGXLEPUBKEYHASH3 0x8f 404 405 #define MSR_P6_PERFCTR0 0xc1 406 407 #define MSR_IA32_SMBASE 0x9e 408 #define MSR_SMI_COUNT 0x34 409 #define MSR_CORE_THREAD_COUNT 0x35 410 #define MSR_MTRRcap 0xfe 411 #define MSR_MTRRcap_VCNT 8 412 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) 413 #define MSR_MTRRcap_WC_SUPPORTED (1 << 10) 414 415 #define MSR_IA32_SYSENTER_CS 0x174 416 #define MSR_IA32_SYSENTER_ESP 0x175 417 #define MSR_IA32_SYSENTER_EIP 0x176 418 419 #define MSR_MCG_CAP 0x179 420 #define MSR_MCG_STATUS 0x17a 421 #define MSR_MCG_CTL 0x17b 422 #define MSR_MCG_EXT_CTL 0x4d0 423 424 #define MSR_P6_EVNTSEL0 0x186 425 426 #define MSR_IA32_PERF_STATUS 0x198 427 428 #define MSR_IA32_MISC_ENABLE 0x1a0 429 /* Indicates good rep/movs microcode on some processors: */ 430 #define MSR_IA32_MISC_ENABLE_DEFAULT 1 431 #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) 432 433 #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) 434 #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) 435 436 #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2) 437 438 #define MSR_MTRRfix64K_00000 0x250 439 #define MSR_MTRRfix16K_80000 0x258 440 #define MSR_MTRRfix16K_A0000 0x259 441 #define MSR_MTRRfix4K_C0000 0x268 442 #define MSR_MTRRfix4K_C8000 0x269 443 #define MSR_MTRRfix4K_D0000 0x26a 444 #define MSR_MTRRfix4K_D8000 0x26b 445 #define MSR_MTRRfix4K_E0000 0x26c 446 #define MSR_MTRRfix4K_E8000 0x26d 447 #define MSR_MTRRfix4K_F0000 0x26e 448 #define MSR_MTRRfix4K_F8000 0x26f 449 450 #define MSR_PAT 0x277 451 452 #define MSR_MTRRdefType 0x2ff 453 454 #define MSR_CORE_PERF_FIXED_CTR0 0x309 455 #define MSR_CORE_PERF_FIXED_CTR1 0x30a 456 #define MSR_CORE_PERF_FIXED_CTR2 0x30b 457 #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d 458 #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e 459 #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f 460 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 461 462 #define MSR_MC0_CTL 0x400 463 #define MSR_MC0_STATUS 0x401 464 #define MSR_MC0_ADDR 0x402 465 #define MSR_MC0_MISC 0x403 466 467 #define MSR_IA32_RTIT_OUTPUT_BASE 0x560 468 #define MSR_IA32_RTIT_OUTPUT_MASK 0x561 469 #define MSR_IA32_RTIT_CTL 0x570 470 #define MSR_IA32_RTIT_STATUS 0x571 471 #define MSR_IA32_RTIT_CR3_MATCH 0x572 472 #define MSR_IA32_RTIT_ADDR0_A 0x580 473 #define MSR_IA32_RTIT_ADDR0_B 0x581 474 #define MSR_IA32_RTIT_ADDR1_A 0x582 475 #define MSR_IA32_RTIT_ADDR1_B 0x583 476 #define MSR_IA32_RTIT_ADDR2_A 0x584 477 #define MSR_IA32_RTIT_ADDR2_B 0x585 478 #define MSR_IA32_RTIT_ADDR3_A 0x586 479 #define MSR_IA32_RTIT_ADDR3_B 0x587 480 #define MAX_RTIT_ADDRS 8 481 482 #define MSR_EFER 0xc0000080 483 484 #define MSR_EFER_SCE (1 << 0) 485 #define MSR_EFER_LME (1 << 8) 486 #define MSR_EFER_LMA (1 << 10) 487 #define MSR_EFER_NXE (1 << 11) 488 #define MSR_EFER_SVME (1 << 12) 489 #define MSR_EFER_FFXSR (1 << 14) 490 491 #define MSR_EFER_RESERVED\ 492 (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\ 493 | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\ 494 | MSR_EFER_FFXSR)) 495 496 #define MSR_STAR 0xc0000081 497 #define MSR_LSTAR 0xc0000082 498 #define MSR_CSTAR 0xc0000083 499 #define MSR_FMASK 0xc0000084 500 #define MSR_FSBASE 0xc0000100 501 #define MSR_GSBASE 0xc0000101 502 #define MSR_KERNELGSBASE 0xc0000102 503 #define MSR_TSC_AUX 0xc0000103 504 #define MSR_AMD64_TSC_RATIO 0xc0000104 505 506 #define MSR_AMD64_TSC_RATIO_DEFAULT 0x100000000ULL 507 508 #define MSR_VM_HSAVE_PA 0xc0010117 509 510 #define MSR_IA32_BNDCFGS 0x00000d90 511 #define MSR_IA32_XSS 0x00000da0 512 #define MSR_IA32_UMWAIT_CONTROL 0xe1 513 514 #define MSR_IA32_VMX_BASIC 0x00000480 515 #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 516 #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482 517 #define MSR_IA32_VMX_EXIT_CTLS 0x00000483 518 #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484 519 #define MSR_IA32_VMX_MISC 0x00000485 520 #define MSR_IA32_VMX_CR0_FIXED0 0x00000486 521 #define MSR_IA32_VMX_CR0_FIXED1 0x00000487 522 #define MSR_IA32_VMX_CR4_FIXED0 0x00000488 523 #define MSR_IA32_VMX_CR4_FIXED1 0x00000489 524 #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a 525 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b 526 #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c 527 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d 528 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e 529 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f 530 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 531 #define MSR_IA32_VMX_VMFUNC 0x00000491 532 533 #define XSTATE_FP_BIT 0 534 #define XSTATE_SSE_BIT 1 535 #define XSTATE_YMM_BIT 2 536 #define XSTATE_BNDREGS_BIT 3 537 #define XSTATE_BNDCSR_BIT 4 538 #define XSTATE_OPMASK_BIT 5 539 #define XSTATE_ZMM_Hi256_BIT 6 540 #define XSTATE_Hi16_ZMM_BIT 7 541 #define XSTATE_PKRU_BIT 9 542 543 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT) 544 #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT) 545 #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT) 546 #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT) 547 #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT) 548 #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT) 549 #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT) 550 #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) 551 #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) 552 553 /* CPUID feature words */ 554 typedef enum FeatureWord { 555 FEAT_1_EDX, /* CPUID[1].EDX */ 556 FEAT_1_ECX, /* CPUID[1].ECX */ 557 FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ 558 FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */ 559 FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */ 560 FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */ 561 FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ 562 FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ 563 FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ 564 FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ 565 FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ 566 FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ 567 FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */ 568 FEAT_SVM, /* CPUID[8000_000A].EDX */ 569 FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */ 570 FEAT_6_EAX, /* CPUID[6].EAX */ 571 FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */ 572 FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */ 573 FEAT_ARCH_CAPABILITIES, 574 FEAT_CORE_CAPABILITY, 575 FEAT_PERF_CAPABILITIES, 576 FEAT_VMX_PROCBASED_CTLS, 577 FEAT_VMX_SECONDARY_CTLS, 578 FEAT_VMX_PINBASED_CTLS, 579 FEAT_VMX_EXIT_CTLS, 580 FEAT_VMX_ENTRY_CTLS, 581 FEAT_VMX_MISC, 582 FEAT_VMX_EPT_VPID_CAPS, 583 FEAT_VMX_BASIC, 584 FEAT_VMX_VMFUNC, 585 FEAT_14_0_ECX, 586 FEAT_SGX_12_0_EAX, /* CPUID[EAX=0x12,ECX=0].EAX (SGX) */ 587 FEAT_SGX_12_0_EBX, /* CPUID[EAX=0x12,ECX=0].EBX (SGX MISCSELECT[31:0]) */ 588 FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */ 589 FEATURE_WORDS, 590 } FeatureWord; 591 592 typedef uint64_t FeatureWordArray[FEATURE_WORDS]; 593 594 /* cpuid_features bits */ 595 #define CPUID_FP87 (1U << 0) 596 #define CPUID_VME (1U << 1) 597 #define CPUID_DE (1U << 2) 598 #define CPUID_PSE (1U << 3) 599 #define CPUID_TSC (1U << 4) 600 #define CPUID_MSR (1U << 5) 601 #define CPUID_PAE (1U << 6) 602 #define CPUID_MCE (1U << 7) 603 #define CPUID_CX8 (1U << 8) 604 #define CPUID_APIC (1U << 9) 605 #define CPUID_SEP (1U << 11) /* sysenter/sysexit */ 606 #define CPUID_MTRR (1U << 12) 607 #define CPUID_PGE (1U << 13) 608 #define CPUID_MCA (1U << 14) 609 #define CPUID_CMOV (1U << 15) 610 #define CPUID_PAT (1U << 16) 611 #define CPUID_PSE36 (1U << 17) 612 #define CPUID_PN (1U << 18) 613 #define CPUID_CLFLUSH (1U << 19) 614 #define CPUID_DTS (1U << 21) 615 #define CPUID_ACPI (1U << 22) 616 #define CPUID_MMX (1U << 23) 617 #define CPUID_FXSR (1U << 24) 618 #define CPUID_SSE (1U << 25) 619 #define CPUID_SSE2 (1U << 26) 620 #define CPUID_SS (1U << 27) 621 #define CPUID_HT (1U << 28) 622 #define CPUID_TM (1U << 29) 623 #define CPUID_IA64 (1U << 30) 624 #define CPUID_PBE (1U << 31) 625 626 #define CPUID_EXT_SSE3 (1U << 0) 627 #define CPUID_EXT_PCLMULQDQ (1U << 1) 628 #define CPUID_EXT_DTES64 (1U << 2) 629 #define CPUID_EXT_MONITOR (1U << 3) 630 #define CPUID_EXT_DSCPL (1U << 4) 631 #define CPUID_EXT_VMX (1U << 5) 632 #define CPUID_EXT_SMX (1U << 6) 633 #define CPUID_EXT_EST (1U << 7) 634 #define CPUID_EXT_TM2 (1U << 8) 635 #define CPUID_EXT_SSSE3 (1U << 9) 636 #define CPUID_EXT_CID (1U << 10) 637 #define CPUID_EXT_FMA (1U << 12) 638 #define CPUID_EXT_CX16 (1U << 13) 639 #define CPUID_EXT_XTPR (1U << 14) 640 #define CPUID_EXT_PDCM (1U << 15) 641 #define CPUID_EXT_PCID (1U << 17) 642 #define CPUID_EXT_DCA (1U << 18) 643 #define CPUID_EXT_SSE41 (1U << 19) 644 #define CPUID_EXT_SSE42 (1U << 20) 645 #define CPUID_EXT_X2APIC (1U << 21) 646 #define CPUID_EXT_MOVBE (1U << 22) 647 #define CPUID_EXT_POPCNT (1U << 23) 648 #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24) 649 #define CPUID_EXT_AES (1U << 25) 650 #define CPUID_EXT_XSAVE (1U << 26) 651 #define CPUID_EXT_OSXSAVE (1U << 27) 652 #define CPUID_EXT_AVX (1U << 28) 653 #define CPUID_EXT_F16C (1U << 29) 654 #define CPUID_EXT_RDRAND (1U << 30) 655 #define CPUID_EXT_HYPERVISOR (1U << 31) 656 657 #define CPUID_EXT2_FPU (1U << 0) 658 #define CPUID_EXT2_VME (1U << 1) 659 #define CPUID_EXT2_DE (1U << 2) 660 #define CPUID_EXT2_PSE (1U << 3) 661 #define CPUID_EXT2_TSC (1U << 4) 662 #define CPUID_EXT2_MSR (1U << 5) 663 #define CPUID_EXT2_PAE (1U << 6) 664 #define CPUID_EXT2_MCE (1U << 7) 665 #define CPUID_EXT2_CX8 (1U << 8) 666 #define CPUID_EXT2_APIC (1U << 9) 667 #define CPUID_EXT2_SYSCALL (1U << 11) 668 #define CPUID_EXT2_MTRR (1U << 12) 669 #define CPUID_EXT2_PGE (1U << 13) 670 #define CPUID_EXT2_MCA (1U << 14) 671 #define CPUID_EXT2_CMOV (1U << 15) 672 #define CPUID_EXT2_PAT (1U << 16) 673 #define CPUID_EXT2_PSE36 (1U << 17) 674 #define CPUID_EXT2_MP (1U << 19) 675 #define CPUID_EXT2_NX (1U << 20) 676 #define CPUID_EXT2_MMXEXT (1U << 22) 677 #define CPUID_EXT2_MMX (1U << 23) 678 #define CPUID_EXT2_FXSR (1U << 24) 679 #define CPUID_EXT2_FFXSR (1U << 25) 680 #define CPUID_EXT2_PDPE1GB (1U << 26) 681 #define CPUID_EXT2_RDTSCP (1U << 27) 682 #define CPUID_EXT2_LM (1U << 29) 683 #define CPUID_EXT2_3DNOWEXT (1U << 30) 684 #define CPUID_EXT2_3DNOW (1U << 31) 685 686 /* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */ 687 #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \ 688 CPUID_EXT2_DE | CPUID_EXT2_PSE | \ 689 CPUID_EXT2_TSC | CPUID_EXT2_MSR | \ 690 CPUID_EXT2_PAE | CPUID_EXT2_MCE | \ 691 CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \ 692 CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \ 693 CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \ 694 CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \ 695 CPUID_EXT2_MMX | CPUID_EXT2_FXSR) 696 697 #define CPUID_EXT3_LAHF_LM (1U << 0) 698 #define CPUID_EXT3_CMP_LEG (1U << 1) 699 #define CPUID_EXT3_SVM (1U << 2) 700 #define CPUID_EXT3_EXTAPIC (1U << 3) 701 #define CPUID_EXT3_CR8LEG (1U << 4) 702 #define CPUID_EXT3_ABM (1U << 5) 703 #define CPUID_EXT3_SSE4A (1U << 6) 704 #define CPUID_EXT3_MISALIGNSSE (1U << 7) 705 #define CPUID_EXT3_3DNOWPREFETCH (1U << 8) 706 #define CPUID_EXT3_OSVW (1U << 9) 707 #define CPUID_EXT3_IBS (1U << 10) 708 #define CPUID_EXT3_XOP (1U << 11) 709 #define CPUID_EXT3_SKINIT (1U << 12) 710 #define CPUID_EXT3_WDT (1U << 13) 711 #define CPUID_EXT3_LWP (1U << 15) 712 #define CPUID_EXT3_FMA4 (1U << 16) 713 #define CPUID_EXT3_TCE (1U << 17) 714 #define CPUID_EXT3_NODEID (1U << 19) 715 #define CPUID_EXT3_TBM (1U << 21) 716 #define CPUID_EXT3_TOPOEXT (1U << 22) 717 #define CPUID_EXT3_PERFCORE (1U << 23) 718 #define CPUID_EXT3_PERFNB (1U << 24) 719 720 #define CPUID_SVM_NPT (1U << 0) 721 #define CPUID_SVM_LBRV (1U << 1) 722 #define CPUID_SVM_SVMLOCK (1U << 2) 723 #define CPUID_SVM_NRIPSAVE (1U << 3) 724 #define CPUID_SVM_TSCSCALE (1U << 4) 725 #define CPUID_SVM_VMCBCLEAN (1U << 5) 726 #define CPUID_SVM_FLUSHASID (1U << 6) 727 #define CPUID_SVM_DECODEASSIST (1U << 7) 728 #define CPUID_SVM_PAUSEFILTER (1U << 10) 729 #define CPUID_SVM_PFTHRESHOLD (1U << 12) 730 #define CPUID_SVM_AVIC (1U << 13) 731 #define CPUID_SVM_V_VMSAVE_VMLOAD (1U << 15) 732 #define CPUID_SVM_VGIF (1U << 16) 733 #define CPUID_SVM_SVME_ADDR_CHK (1U << 28) 734 735 /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ 736 #define CPUID_7_0_EBX_FSGSBASE (1U << 0) 737 /* Support SGX */ 738 #define CPUID_7_0_EBX_SGX (1U << 2) 739 /* 1st Group of Advanced Bit Manipulation Extensions */ 740 #define CPUID_7_0_EBX_BMI1 (1U << 3) 741 /* Hardware Lock Elision */ 742 #define CPUID_7_0_EBX_HLE (1U << 4) 743 /* Intel Advanced Vector Extensions 2 */ 744 #define CPUID_7_0_EBX_AVX2 (1U << 5) 745 /* Supervisor-mode Execution Prevention */ 746 #define CPUID_7_0_EBX_SMEP (1U << 7) 747 /* 2nd Group of Advanced Bit Manipulation Extensions */ 748 #define CPUID_7_0_EBX_BMI2 (1U << 8) 749 /* Enhanced REP MOVSB/STOSB */ 750 #define CPUID_7_0_EBX_ERMS (1U << 9) 751 /* Invalidate Process-Context Identifier */ 752 #define CPUID_7_0_EBX_INVPCID (1U << 10) 753 /* Restricted Transactional Memory */ 754 #define CPUID_7_0_EBX_RTM (1U << 11) 755 /* Memory Protection Extension */ 756 #define CPUID_7_0_EBX_MPX (1U << 14) 757 /* AVX-512 Foundation */ 758 #define CPUID_7_0_EBX_AVX512F (1U << 16) 759 /* AVX-512 Doubleword & Quadword Instruction */ 760 #define CPUID_7_0_EBX_AVX512DQ (1U << 17) 761 /* Read Random SEED */ 762 #define CPUID_7_0_EBX_RDSEED (1U << 18) 763 /* ADCX and ADOX instructions */ 764 #define CPUID_7_0_EBX_ADX (1U << 19) 765 /* Supervisor Mode Access Prevention */ 766 #define CPUID_7_0_EBX_SMAP (1U << 20) 767 /* AVX-512 Integer Fused Multiply Add */ 768 #define CPUID_7_0_EBX_AVX512IFMA (1U << 21) 769 /* Persistent Commit */ 770 #define CPUID_7_0_EBX_PCOMMIT (1U << 22) 771 /* Flush a Cache Line Optimized */ 772 #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) 773 /* Cache Line Write Back */ 774 #define CPUID_7_0_EBX_CLWB (1U << 24) 775 /* Intel Processor Trace */ 776 #define CPUID_7_0_EBX_INTEL_PT (1U << 25) 777 /* AVX-512 Prefetch */ 778 #define CPUID_7_0_EBX_AVX512PF (1U << 26) 779 /* AVX-512 Exponential and Reciprocal */ 780 #define CPUID_7_0_EBX_AVX512ER (1U << 27) 781 /* AVX-512 Conflict Detection */ 782 #define CPUID_7_0_EBX_AVX512CD (1U << 28) 783 /* SHA1/SHA256 Instruction Extensions */ 784 #define CPUID_7_0_EBX_SHA_NI (1U << 29) 785 /* AVX-512 Byte and Word Instructions */ 786 #define CPUID_7_0_EBX_AVX512BW (1U << 30) 787 /* AVX-512 Vector Length Extensions */ 788 #define CPUID_7_0_EBX_AVX512VL (1U << 31) 789 790 /* AVX-512 Vector Byte Manipulation Instruction */ 791 #define CPUID_7_0_ECX_AVX512_VBMI (1U << 1) 792 /* User-Mode Instruction Prevention */ 793 #define CPUID_7_0_ECX_UMIP (1U << 2) 794 /* Protection Keys for User-mode Pages */ 795 #define CPUID_7_0_ECX_PKU (1U << 3) 796 /* OS Enable Protection Keys */ 797 #define CPUID_7_0_ECX_OSPKE (1U << 4) 798 /* UMONITOR/UMWAIT/TPAUSE Instructions */ 799 #define CPUID_7_0_ECX_WAITPKG (1U << 5) 800 /* Additional AVX-512 Vector Byte Manipulation Instruction */ 801 #define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6) 802 /* Galois Field New Instructions */ 803 #define CPUID_7_0_ECX_GFNI (1U << 8) 804 /* Vector AES Instructions */ 805 #define CPUID_7_0_ECX_VAES (1U << 9) 806 /* Carry-Less Multiplication Quadword */ 807 #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10) 808 /* Vector Neural Network Instructions */ 809 #define CPUID_7_0_ECX_AVX512VNNI (1U << 11) 810 /* Support for VPOPCNT[B,W] and VPSHUFBITQMB */ 811 #define CPUID_7_0_ECX_AVX512BITALG (1U << 12) 812 /* POPCNT for vectors of DW/QW */ 813 #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) 814 /* 5-level Page Tables */ 815 #define CPUID_7_0_ECX_LA57 (1U << 16) 816 /* Read Processor ID */ 817 #define CPUID_7_0_ECX_RDPID (1U << 22) 818 /* Bus Lock Debug Exception */ 819 #define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24) 820 /* Cache Line Demote Instruction */ 821 #define CPUID_7_0_ECX_CLDEMOTE (1U << 25) 822 /* Move Doubleword as Direct Store Instruction */ 823 #define CPUID_7_0_ECX_MOVDIRI (1U << 27) 824 /* Move 64 Bytes as Direct Store Instruction */ 825 #define CPUID_7_0_ECX_MOVDIR64B (1U << 28) 826 /* Support SGX Launch Control */ 827 #define CPUID_7_0_ECX_SGX_LC (1U << 30) 828 /* Protection Keys for Supervisor-mode Pages */ 829 #define CPUID_7_0_ECX_PKS (1U << 31) 830 831 /* AVX512 Neural Network Instructions */ 832 #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) 833 /* AVX512 Multiply Accumulation Single Precision */ 834 #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) 835 /* Fast Short Rep Mov */ 836 #define CPUID_7_0_EDX_FSRM (1U << 4) 837 /* AVX512 Vector Pair Intersection to a Pair of Mask Registers */ 838 #define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8) 839 /* SERIALIZE instruction */ 840 #define CPUID_7_0_EDX_SERIALIZE (1U << 14) 841 /* TSX Suspend Load Address Tracking instruction */ 842 #define CPUID_7_0_EDX_TSX_LDTRK (1U << 16) 843 /* AVX512_FP16 instruction */ 844 #define CPUID_7_0_EDX_AVX512_FP16 (1U << 23) 845 /* Speculation Control */ 846 #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) 847 /* Single Thread Indirect Branch Predictors */ 848 #define CPUID_7_0_EDX_STIBP (1U << 27) 849 /* Arch Capabilities */ 850 #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) 851 /* Core Capability */ 852 #define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30) 853 /* Speculative Store Bypass Disable */ 854 #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) 855 856 /* AVX VNNI Instruction */ 857 #define CPUID_7_1_EAX_AVX_VNNI (1U << 4) 858 /* AVX512 BFloat16 Instruction */ 859 #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) 860 861 /* Packets which contain IP payload have LIP values */ 862 #define CPUID_14_0_ECX_LIP (1U << 31) 863 864 /* CLZERO instruction */ 865 #define CPUID_8000_0008_EBX_CLZERO (1U << 0) 866 /* Always save/restore FP error pointers */ 867 #define CPUID_8000_0008_EBX_XSAVEERPTR (1U << 2) 868 /* Write back and do not invalidate cache */ 869 #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) 870 /* Indirect Branch Prediction Barrier */ 871 #define CPUID_8000_0008_EBX_IBPB (1U << 12) 872 /* Indirect Branch Restricted Speculation */ 873 #define CPUID_8000_0008_EBX_IBRS (1U << 14) 874 /* Single Thread Indirect Branch Predictors */ 875 #define CPUID_8000_0008_EBX_STIBP (1U << 15) 876 /* Speculative Store Bypass Disable */ 877 #define CPUID_8000_0008_EBX_AMD_SSBD (1U << 24) 878 879 #define CPUID_XSAVE_XSAVEOPT (1U << 0) 880 #define CPUID_XSAVE_XSAVEC (1U << 1) 881 #define CPUID_XSAVE_XGETBV1 (1U << 2) 882 #define CPUID_XSAVE_XSAVES (1U << 3) 883 884 #define CPUID_6_EAX_ARAT (1U << 2) 885 886 /* CPUID[0x80000007].EDX flags: */ 887 #define CPUID_APM_INVTSC (1U << 8) 888 889 #define CPUID_VENDOR_SZ 12 890 891 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ 892 #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ 893 #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ 894 #define CPUID_VENDOR_INTEL "GenuineIntel" 895 896 #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ 897 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ 898 #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ 899 #define CPUID_VENDOR_AMD "AuthenticAMD" 900 901 #define CPUID_VENDOR_VIA "CentaurHauls" 902 903 #define CPUID_VENDOR_HYGON "HygonGenuine" 904 905 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ 906 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ 907 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) 908 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ 909 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ 910 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) 911 912 #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */ 913 #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */ 914 915 /* CPUID[0xB].ECX level types */ 916 #define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8) 917 #define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8) 918 #define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8) 919 #define CPUID_TOPOLOGY_LEVEL_DIE (5U << 8) 920 921 /* MSR Feature Bits */ 922 #define MSR_ARCH_CAP_RDCL_NO (1U << 0) 923 #define MSR_ARCH_CAP_IBRS_ALL (1U << 1) 924 #define MSR_ARCH_CAP_RSBA (1U << 2) 925 #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3) 926 #define MSR_ARCH_CAP_SSB_NO (1U << 4) 927 #define MSR_ARCH_CAP_MDS_NO (1U << 5) 928 #define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6) 929 #define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7) 930 #define MSR_ARCH_CAP_TAA_NO (1U << 8) 931 932 #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5) 933 934 /* VMX MSR features */ 935 #define MSR_VMX_BASIC_VMCS_REVISION_MASK 0x7FFFFFFFull 936 #define MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK (0x00001FFFull << 32) 937 #define MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK (0x003C0000ull << 32) 938 #define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49) 939 #define MSR_VMX_BASIC_INS_OUTS (1ULL << 54) 940 #define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55) 941 942 #define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full 943 #define MSR_VMX_MISC_STORE_LMA (1ULL << 5) 944 #define MSR_VMX_MISC_ACTIVITY_HLT (1ULL << 6) 945 #define MSR_VMX_MISC_ACTIVITY_SHUTDOWN (1ULL << 7) 946 #define MSR_VMX_MISC_ACTIVITY_WAIT_SIPI (1ULL << 8) 947 #define MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK 0x0E000000ull 948 #define MSR_VMX_MISC_VMWRITE_VMEXIT (1ULL << 29) 949 #define MSR_VMX_MISC_ZERO_LEN_INJECT (1ULL << 30) 950 951 #define MSR_VMX_EPT_EXECONLY (1ULL << 0) 952 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_4 (1ULL << 6) 953 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_5 (1ULL << 7) 954 #define MSR_VMX_EPT_UC (1ULL << 8) 955 #define MSR_VMX_EPT_WB (1ULL << 14) 956 #define MSR_VMX_EPT_2MB (1ULL << 16) 957 #define MSR_VMX_EPT_1GB (1ULL << 17) 958 #define MSR_VMX_EPT_INVEPT (1ULL << 20) 959 #define MSR_VMX_EPT_AD_BITS (1ULL << 21) 960 #define MSR_VMX_EPT_ADVANCED_VMEXIT_INFO (1ULL << 22) 961 #define MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT (1ULL << 25) 962 #define MSR_VMX_EPT_INVEPT_ALL_CONTEXT (1ULL << 26) 963 #define MSR_VMX_EPT_INVVPID (1ULL << 32) 964 #define MSR_VMX_EPT_INVVPID_SINGLE_ADDR (1ULL << 40) 965 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT (1ULL << 41) 966 #define MSR_VMX_EPT_INVVPID_ALL_CONTEXT (1ULL << 42) 967 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS (1ULL << 43) 968 969 #define MSR_VMX_VMFUNC_EPT_SWITCHING (1ULL << 0) 970 971 972 /* VMX controls */ 973 #define VMX_CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 974 #define VMX_CPU_BASED_USE_TSC_OFFSETING 0x00000008 975 #define VMX_CPU_BASED_HLT_EXITING 0x00000080 976 #define VMX_CPU_BASED_INVLPG_EXITING 0x00000200 977 #define VMX_CPU_BASED_MWAIT_EXITING 0x00000400 978 #define VMX_CPU_BASED_RDPMC_EXITING 0x00000800 979 #define VMX_CPU_BASED_RDTSC_EXITING 0x00001000 980 #define VMX_CPU_BASED_CR3_LOAD_EXITING 0x00008000 981 #define VMX_CPU_BASED_CR3_STORE_EXITING 0x00010000 982 #define VMX_CPU_BASED_CR8_LOAD_EXITING 0x00080000 983 #define VMX_CPU_BASED_CR8_STORE_EXITING 0x00100000 984 #define VMX_CPU_BASED_TPR_SHADOW 0x00200000 985 #define VMX_CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 986 #define VMX_CPU_BASED_MOV_DR_EXITING 0x00800000 987 #define VMX_CPU_BASED_UNCOND_IO_EXITING 0x01000000 988 #define VMX_CPU_BASED_USE_IO_BITMAPS 0x02000000 989 #define VMX_CPU_BASED_MONITOR_TRAP_FLAG 0x08000000 990 #define VMX_CPU_BASED_USE_MSR_BITMAPS 0x10000000 991 #define VMX_CPU_BASED_MONITOR_EXITING 0x20000000 992 #define VMX_CPU_BASED_PAUSE_EXITING 0x40000000 993 #define VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 994 995 #define VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 996 #define VMX_SECONDARY_EXEC_ENABLE_EPT 0x00000002 997 #define VMX_SECONDARY_EXEC_DESC 0x00000004 998 #define VMX_SECONDARY_EXEC_RDTSCP 0x00000008 999 #define VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 1000 #define VMX_SECONDARY_EXEC_ENABLE_VPID 0x00000020 1001 #define VMX_SECONDARY_EXEC_WBINVD_EXITING 0x00000040 1002 #define VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 1003 #define VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 1004 #define VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 1005 #define VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 1006 #define VMX_SECONDARY_EXEC_RDRAND_EXITING 0x00000800 1007 #define VMX_SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 1008 #define VMX_SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000 1009 #define VMX_SECONDARY_EXEC_SHADOW_VMCS 0x00004000 1010 #define VMX_SECONDARY_EXEC_ENCLS_EXITING 0x00008000 1011 #define VMX_SECONDARY_EXEC_RDSEED_EXITING 0x00010000 1012 #define VMX_SECONDARY_EXEC_ENABLE_PML 0x00020000 1013 #define VMX_SECONDARY_EXEC_XSAVES 0x00100000 1014 #define VMX_SECONDARY_EXEC_TSC_SCALING 0x02000000 1015 1016 #define VMX_PIN_BASED_EXT_INTR_MASK 0x00000001 1017 #define VMX_PIN_BASED_NMI_EXITING 0x00000008 1018 #define VMX_PIN_BASED_VIRTUAL_NMIS 0x00000020 1019 #define VMX_PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040 1020 #define VMX_PIN_BASED_POSTED_INTR 0x00000080 1021 1022 #define VMX_VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 1023 #define VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 1024 #define VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 1025 #define VMX_VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 1026 #define VMX_VM_EXIT_SAVE_IA32_PAT 0x00040000 1027 #define VMX_VM_EXIT_LOAD_IA32_PAT 0x00080000 1028 #define VMX_VM_EXIT_SAVE_IA32_EFER 0x00100000 1029 #define VMX_VM_EXIT_LOAD_IA32_EFER 0x00200000 1030 #define VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 1031 #define VMX_VM_EXIT_CLEAR_BNDCFGS 0x00800000 1032 #define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000 1033 #define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 1034 #define VMX_VM_EXIT_LOAD_IA32_PKRS 0x20000000 1035 1036 #define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 1037 #define VMX_VM_ENTRY_IA32E_MODE 0x00000200 1038 #define VMX_VM_ENTRY_SMM 0x00000400 1039 #define VMX_VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 1040 #define VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 1041 #define VMX_VM_ENTRY_LOAD_IA32_PAT 0x00004000 1042 #define VMX_VM_ENTRY_LOAD_IA32_EFER 0x00008000 1043 #define VMX_VM_ENTRY_LOAD_BNDCFGS 0x00010000 1044 #define VMX_VM_ENTRY_PT_CONCEAL_PIP 0x00020000 1045 #define VMX_VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000 1046 #define VMX_VM_ENTRY_LOAD_IA32_PKRS 0x00400000 1047 1048 /* Supported Hyper-V Enlightenments */ 1049 #define HYPERV_FEAT_RELAXED 0 1050 #define HYPERV_FEAT_VAPIC 1 1051 #define HYPERV_FEAT_TIME 2 1052 #define HYPERV_FEAT_CRASH 3 1053 #define HYPERV_FEAT_RESET 4 1054 #define HYPERV_FEAT_VPINDEX 5 1055 #define HYPERV_FEAT_RUNTIME 6 1056 #define HYPERV_FEAT_SYNIC 7 1057 #define HYPERV_FEAT_STIMER 8 1058 #define HYPERV_FEAT_FREQUENCIES 9 1059 #define HYPERV_FEAT_REENLIGHTENMENT 10 1060 #define HYPERV_FEAT_TLBFLUSH 11 1061 #define HYPERV_FEAT_EVMCS 12 1062 #define HYPERV_FEAT_IPI 13 1063 #define HYPERV_FEAT_STIMER_DIRECT 14 1064 #define HYPERV_FEAT_AVIC 15 1065 1066 #ifndef HYPERV_SPINLOCK_NEVER_NOTIFY 1067 #define HYPERV_SPINLOCK_NEVER_NOTIFY 0xFFFFFFFF 1068 #endif 1069 1070 #define EXCP00_DIVZ 0 1071 #define EXCP01_DB 1 1072 #define EXCP02_NMI 2 1073 #define EXCP03_INT3 3 1074 #define EXCP04_INTO 4 1075 #define EXCP05_BOUND 5 1076 #define EXCP06_ILLOP 6 1077 #define EXCP07_PREX 7 1078 #define EXCP08_DBLE 8 1079 #define EXCP09_XERR 9 1080 #define EXCP0A_TSS 10 1081 #define EXCP0B_NOSEG 11 1082 #define EXCP0C_STACK 12 1083 #define EXCP0D_GPF 13 1084 #define EXCP0E_PAGE 14 1085 #define EXCP10_COPR 16 1086 #define EXCP11_ALGN 17 1087 #define EXCP12_MCHK 18 1088 1089 #define EXCP_VMEXIT 0x100 /* only for system emulation */ 1090 #define EXCP_SYSCALL 0x101 /* only for user emulation */ 1091 #define EXCP_VSYSCALL 0x102 /* only for user emulation */ 1092 1093 /* i386-specific interrupt pending bits. */ 1094 #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1 1095 #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 1096 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 1097 #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 1098 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 1099 #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1 1100 #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2 1101 1102 /* Use a clearer name for this. */ 1103 #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET 1104 1105 /* Instead of computing the condition codes after each x86 instruction, 1106 * QEMU just stores one operand (called CC_SRC), the result 1107 * (called CC_DST) and the type of operation (called CC_OP). When the 1108 * condition codes are needed, the condition codes can be calculated 1109 * using this information. Condition codes are not generated if they 1110 * are only needed for conditional branches. 1111 */ 1112 typedef enum { 1113 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ 1114 CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ 1115 1116 CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ 1117 CC_OP_MULW, 1118 CC_OP_MULL, 1119 CC_OP_MULQ, 1120 1121 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1122 CC_OP_ADDW, 1123 CC_OP_ADDL, 1124 CC_OP_ADDQ, 1125 1126 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1127 CC_OP_ADCW, 1128 CC_OP_ADCL, 1129 CC_OP_ADCQ, 1130 1131 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1132 CC_OP_SUBW, 1133 CC_OP_SUBL, 1134 CC_OP_SUBQ, 1135 1136 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1137 CC_OP_SBBW, 1138 CC_OP_SBBL, 1139 CC_OP_SBBQ, 1140 1141 CC_OP_LOGICB, /* modify all flags, CC_DST = res */ 1142 CC_OP_LOGICW, 1143 CC_OP_LOGICL, 1144 CC_OP_LOGICQ, 1145 1146 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ 1147 CC_OP_INCW, 1148 CC_OP_INCL, 1149 CC_OP_INCQ, 1150 1151 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ 1152 CC_OP_DECW, 1153 CC_OP_DECL, 1154 CC_OP_DECQ, 1155 1156 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ 1157 CC_OP_SHLW, 1158 CC_OP_SHLL, 1159 CC_OP_SHLQ, 1160 1161 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ 1162 CC_OP_SARW, 1163 CC_OP_SARL, 1164 CC_OP_SARQ, 1165 1166 CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */ 1167 CC_OP_BMILGW, 1168 CC_OP_BMILGL, 1169 CC_OP_BMILGQ, 1170 1171 CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */ 1172 CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */ 1173 CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ 1174 1175 CC_OP_CLR, /* Z set, all other flags clear. */ 1176 CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */ 1177 1178 CC_OP_NB, 1179 } CCOp; 1180 1181 typedef struct SegmentCache { 1182 uint32_t selector; 1183 target_ulong base; 1184 uint32_t limit; 1185 uint32_t flags; 1186 } SegmentCache; 1187 1188 #define MMREG_UNION(n, bits) \ 1189 union n { \ 1190 uint8_t _b_##n[(bits)/8]; \ 1191 uint16_t _w_##n[(bits)/16]; \ 1192 uint32_t _l_##n[(bits)/32]; \ 1193 uint64_t _q_##n[(bits)/64]; \ 1194 float32 _s_##n[(bits)/32]; \ 1195 float64 _d_##n[(bits)/64]; \ 1196 } 1197 1198 typedef union { 1199 uint8_t _b[16]; 1200 uint16_t _w[8]; 1201 uint32_t _l[4]; 1202 uint64_t _q[2]; 1203 } XMMReg; 1204 1205 typedef union { 1206 uint8_t _b[32]; 1207 uint16_t _w[16]; 1208 uint32_t _l[8]; 1209 uint64_t _q[4]; 1210 } YMMReg; 1211 1212 typedef MMREG_UNION(ZMMReg, 512) ZMMReg; 1213 typedef MMREG_UNION(MMXReg, 64) MMXReg; 1214 1215 typedef struct BNDReg { 1216 uint64_t lb; 1217 uint64_t ub; 1218 } BNDReg; 1219 1220 typedef struct BNDCSReg { 1221 uint64_t cfgu; 1222 uint64_t sts; 1223 } BNDCSReg; 1224 1225 #define BNDCFG_ENABLE 1ULL 1226 #define BNDCFG_BNDPRESERVE 2ULL 1227 #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK 1228 1229 #ifdef HOST_WORDS_BIGENDIAN 1230 #define ZMM_B(n) _b_ZMMReg[63 - (n)] 1231 #define ZMM_W(n) _w_ZMMReg[31 - (n)] 1232 #define ZMM_L(n) _l_ZMMReg[15 - (n)] 1233 #define ZMM_S(n) _s_ZMMReg[15 - (n)] 1234 #define ZMM_Q(n) _q_ZMMReg[7 - (n)] 1235 #define ZMM_D(n) _d_ZMMReg[7 - (n)] 1236 1237 #define MMX_B(n) _b_MMXReg[7 - (n)] 1238 #define MMX_W(n) _w_MMXReg[3 - (n)] 1239 #define MMX_L(n) _l_MMXReg[1 - (n)] 1240 #define MMX_S(n) _s_MMXReg[1 - (n)] 1241 #else 1242 #define ZMM_B(n) _b_ZMMReg[n] 1243 #define ZMM_W(n) _w_ZMMReg[n] 1244 #define ZMM_L(n) _l_ZMMReg[n] 1245 #define ZMM_S(n) _s_ZMMReg[n] 1246 #define ZMM_Q(n) _q_ZMMReg[n] 1247 #define ZMM_D(n) _d_ZMMReg[n] 1248 1249 #define MMX_B(n) _b_MMXReg[n] 1250 #define MMX_W(n) _w_MMXReg[n] 1251 #define MMX_L(n) _l_MMXReg[n] 1252 #define MMX_S(n) _s_MMXReg[n] 1253 #endif 1254 #define MMX_Q(n) _q_MMXReg[n] 1255 1256 typedef union { 1257 floatx80 d __attribute__((aligned(16))); 1258 MMXReg mmx; 1259 } FPReg; 1260 1261 typedef struct { 1262 uint64_t base; 1263 uint64_t mask; 1264 } MTRRVar; 1265 1266 #define CPU_NB_REGS64 16 1267 #define CPU_NB_REGS32 8 1268 1269 #ifdef TARGET_X86_64 1270 #define CPU_NB_REGS CPU_NB_REGS64 1271 #else 1272 #define CPU_NB_REGS CPU_NB_REGS32 1273 #endif 1274 1275 #define MAX_FIXED_COUNTERS 3 1276 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) 1277 1278 #define TARGET_INSN_START_EXTRA_WORDS 1 1279 1280 #define NB_OPMASK_REGS 8 1281 1282 /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish 1283 * that APIC ID hasn't been set yet 1284 */ 1285 #define UNASSIGNED_APIC_ID 0xFFFFFFFF 1286 1287 typedef union X86LegacyXSaveArea { 1288 struct { 1289 uint16_t fcw; 1290 uint16_t fsw; 1291 uint8_t ftw; 1292 uint8_t reserved; 1293 uint16_t fpop; 1294 uint64_t fpip; 1295 uint64_t fpdp; 1296 uint32_t mxcsr; 1297 uint32_t mxcsr_mask; 1298 FPReg fpregs[8]; 1299 uint8_t xmm_regs[16][16]; 1300 }; 1301 uint8_t data[512]; 1302 } X86LegacyXSaveArea; 1303 1304 typedef struct X86XSaveHeader { 1305 uint64_t xstate_bv; 1306 uint64_t xcomp_bv; 1307 uint64_t reserve0; 1308 uint8_t reserved[40]; 1309 } X86XSaveHeader; 1310 1311 /* Ext. save area 2: AVX State */ 1312 typedef struct XSaveAVX { 1313 uint8_t ymmh[16][16]; 1314 } XSaveAVX; 1315 1316 /* Ext. save area 3: BNDREG */ 1317 typedef struct XSaveBNDREG { 1318 BNDReg bnd_regs[4]; 1319 } XSaveBNDREG; 1320 1321 /* Ext. save area 4: BNDCSR */ 1322 typedef union XSaveBNDCSR { 1323 BNDCSReg bndcsr; 1324 uint8_t data[64]; 1325 } XSaveBNDCSR; 1326 1327 /* Ext. save area 5: Opmask */ 1328 typedef struct XSaveOpmask { 1329 uint64_t opmask_regs[NB_OPMASK_REGS]; 1330 } XSaveOpmask; 1331 1332 /* Ext. save area 6: ZMM_Hi256 */ 1333 typedef struct XSaveZMM_Hi256 { 1334 uint8_t zmm_hi256[16][32]; 1335 } XSaveZMM_Hi256; 1336 1337 /* Ext. save area 7: Hi16_ZMM */ 1338 typedef struct XSaveHi16_ZMM { 1339 uint8_t hi16_zmm[16][64]; 1340 } XSaveHi16_ZMM; 1341 1342 /* Ext. save area 9: PKRU state */ 1343 typedef struct XSavePKRU { 1344 uint32_t pkru; 1345 uint32_t padding; 1346 } XSavePKRU; 1347 1348 QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100); 1349 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40); 1350 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40); 1351 QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40); 1352 QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200); 1353 QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400); 1354 QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8); 1355 1356 typedef struct ExtSaveArea { 1357 uint32_t feature, bits; 1358 uint32_t offset, size; 1359 } ExtSaveArea; 1360 1361 #define XSAVE_STATE_AREA_COUNT (XSTATE_PKRU_BIT + 1) 1362 1363 extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT]; 1364 1365 typedef enum TPRAccess { 1366 TPR_ACCESS_READ, 1367 TPR_ACCESS_WRITE, 1368 } TPRAccess; 1369 1370 /* Cache information data structures: */ 1371 1372 enum CacheType { 1373 DATA_CACHE, 1374 INSTRUCTION_CACHE, 1375 UNIFIED_CACHE 1376 }; 1377 1378 typedef struct CPUCacheInfo { 1379 enum CacheType type; 1380 uint8_t level; 1381 /* Size in bytes */ 1382 uint32_t size; 1383 /* Line size, in bytes */ 1384 uint16_t line_size; 1385 /* 1386 * Associativity. 1387 * Note: representation of fully-associative caches is not implemented 1388 */ 1389 uint8_t associativity; 1390 /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */ 1391 uint8_t partitions; 1392 /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */ 1393 uint32_t sets; 1394 /* 1395 * Lines per tag. 1396 * AMD-specific: CPUID[0x80000005], CPUID[0x80000006]. 1397 * (Is this synonym to @partitions?) 1398 */ 1399 uint8_t lines_per_tag; 1400 1401 /* Self-initializing cache */ 1402 bool self_init; 1403 /* 1404 * WBINVD/INVD is not guaranteed to act upon lower level caches of 1405 * non-originating threads sharing this cache. 1406 * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0] 1407 */ 1408 bool no_invd_sharing; 1409 /* 1410 * Cache is inclusive of lower cache levels. 1411 * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1]. 1412 */ 1413 bool inclusive; 1414 /* 1415 * A complex function is used to index the cache, potentially using all 1416 * address bits. CPUID[4].EDX[bit 2]. 1417 */ 1418 bool complex_indexing; 1419 } CPUCacheInfo; 1420 1421 1422 typedef struct CPUCaches { 1423 CPUCacheInfo *l1d_cache; 1424 CPUCacheInfo *l1i_cache; 1425 CPUCacheInfo *l2_cache; 1426 CPUCacheInfo *l3_cache; 1427 } CPUCaches; 1428 1429 typedef struct HVFX86LazyFlags { 1430 target_ulong result; 1431 target_ulong auxbits; 1432 } HVFX86LazyFlags; 1433 1434 typedef struct CPUArchState { 1435 /* standard registers */ 1436 target_ulong regs[CPU_NB_REGS]; 1437 target_ulong eip; 1438 target_ulong eflags; /* eflags register. During CPU emulation, CC 1439 flags and DF are set to zero because they are 1440 stored elsewhere */ 1441 1442 /* emulator internal eflags handling */ 1443 target_ulong cc_dst; 1444 target_ulong cc_src; 1445 target_ulong cc_src2; 1446 uint32_t cc_op; 1447 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ 1448 uint32_t hflags; /* TB flags, see HF_xxx constants. These flags 1449 are known at translation time. */ 1450 uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ 1451 1452 /* segments */ 1453 SegmentCache segs[6]; /* selector values */ 1454 SegmentCache ldt; 1455 SegmentCache tr; 1456 SegmentCache gdt; /* only base and limit are used */ 1457 SegmentCache idt; /* only base and limit are used */ 1458 1459 target_ulong cr[5]; /* NOTE: cr1 is unused */ 1460 1461 bool pdptrs_valid; 1462 uint64_t pdptrs[4]; 1463 int32_t a20_mask; 1464 1465 BNDReg bnd_regs[4]; 1466 BNDCSReg bndcs_regs; 1467 uint64_t msr_bndcfgs; 1468 uint64_t efer; 1469 1470 /* Beginning of state preserved by INIT (dummy marker). */ 1471 struct {} start_init_save; 1472 1473 /* FPU state */ 1474 unsigned int fpstt; /* top of stack index */ 1475 uint16_t fpus; 1476 uint16_t fpuc; 1477 uint8_t fptags[8]; /* 0 = valid, 1 = empty */ 1478 FPReg fpregs[8]; 1479 /* KVM-only so far */ 1480 uint16_t fpop; 1481 uint16_t fpcs; 1482 uint16_t fpds; 1483 uint64_t fpip; 1484 uint64_t fpdp; 1485 1486 /* emulator internal variables */ 1487 float_status fp_status; 1488 floatx80 ft0; 1489 1490 float_status mmx_status; /* for 3DNow! float ops */ 1491 float_status sse_status; 1492 uint32_t mxcsr; 1493 ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32]; 1494 ZMMReg xmm_t0; 1495 MMXReg mmx_t0; 1496 1497 XMMReg ymmh_regs[CPU_NB_REGS]; 1498 1499 uint64_t opmask_regs[NB_OPMASK_REGS]; 1500 YMMReg zmmh_regs[CPU_NB_REGS]; 1501 ZMMReg hi16_zmm_regs[CPU_NB_REGS]; 1502 1503 /* sysenter registers */ 1504 uint32_t sysenter_cs; 1505 target_ulong sysenter_esp; 1506 target_ulong sysenter_eip; 1507 uint64_t star; 1508 1509 uint64_t vm_hsave; 1510 1511 #ifdef TARGET_X86_64 1512 target_ulong lstar; 1513 target_ulong cstar; 1514 target_ulong fmask; 1515 target_ulong kernelgsbase; 1516 #endif 1517 1518 uint64_t tsc; 1519 uint64_t tsc_adjust; 1520 uint64_t tsc_deadline; 1521 uint64_t tsc_aux; 1522 1523 uint64_t xcr0; 1524 1525 uint64_t mcg_status; 1526 uint64_t msr_ia32_misc_enable; 1527 uint64_t msr_ia32_feature_control; 1528 uint64_t msr_ia32_sgxlepubkeyhash[4]; 1529 1530 uint64_t msr_fixed_ctr_ctrl; 1531 uint64_t msr_global_ctrl; 1532 uint64_t msr_global_status; 1533 uint64_t msr_global_ovf_ctrl; 1534 uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS]; 1535 uint64_t msr_gp_counters[MAX_GP_COUNTERS]; 1536 uint64_t msr_gp_evtsel[MAX_GP_COUNTERS]; 1537 1538 uint64_t pat; 1539 uint32_t smbase; 1540 uint64_t msr_smi_count; 1541 1542 uint32_t pkru; 1543 uint32_t pkrs; 1544 uint32_t tsx_ctrl; 1545 1546 uint64_t spec_ctrl; 1547 uint64_t amd_tsc_scale_msr; 1548 uint64_t virt_ssbd; 1549 1550 /* End of state preserved by INIT (dummy marker). */ 1551 struct {} end_init_save; 1552 1553 uint64_t system_time_msr; 1554 uint64_t wall_clock_msr; 1555 uint64_t steal_time_msr; 1556 uint64_t async_pf_en_msr; 1557 uint64_t async_pf_int_msr; 1558 uint64_t pv_eoi_en_msr; 1559 uint64_t poll_control_msr; 1560 1561 /* Partition-wide HV MSRs, will be updated only on the first vcpu */ 1562 uint64_t msr_hv_hypercall; 1563 uint64_t msr_hv_guest_os_id; 1564 uint64_t msr_hv_tsc; 1565 1566 /* Per-VCPU HV MSRs */ 1567 uint64_t msr_hv_vapic; 1568 uint64_t msr_hv_crash_params[HV_CRASH_PARAMS]; 1569 uint64_t msr_hv_runtime; 1570 uint64_t msr_hv_synic_control; 1571 uint64_t msr_hv_synic_evt_page; 1572 uint64_t msr_hv_synic_msg_page; 1573 uint64_t msr_hv_synic_sint[HV_SINT_COUNT]; 1574 uint64_t msr_hv_stimer_config[HV_STIMER_COUNT]; 1575 uint64_t msr_hv_stimer_count[HV_STIMER_COUNT]; 1576 uint64_t msr_hv_reenlightenment_control; 1577 uint64_t msr_hv_tsc_emulation_control; 1578 uint64_t msr_hv_tsc_emulation_status; 1579 1580 uint64_t msr_rtit_ctrl; 1581 uint64_t msr_rtit_status; 1582 uint64_t msr_rtit_output_base; 1583 uint64_t msr_rtit_output_mask; 1584 uint64_t msr_rtit_cr3_match; 1585 uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS]; 1586 1587 /* exception/interrupt handling */ 1588 int error_code; 1589 int exception_is_int; 1590 target_ulong exception_next_eip; 1591 target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */ 1592 union { 1593 struct CPUBreakpoint *cpu_breakpoint[4]; 1594 struct CPUWatchpoint *cpu_watchpoint[4]; 1595 }; /* break/watchpoints for dr[0..3] */ 1596 int old_exception; /* exception in flight */ 1597 1598 uint64_t vm_vmcb; 1599 uint64_t tsc_offset; 1600 uint64_t intercept; 1601 uint16_t intercept_cr_read; 1602 uint16_t intercept_cr_write; 1603 uint16_t intercept_dr_read; 1604 uint16_t intercept_dr_write; 1605 uint32_t intercept_exceptions; 1606 uint64_t nested_cr3; 1607 uint32_t nested_pg_mode; 1608 uint8_t v_tpr; 1609 uint32_t int_ctl; 1610 1611 /* KVM states, automatically cleared on reset */ 1612 uint8_t nmi_injected; 1613 uint8_t nmi_pending; 1614 1615 uintptr_t retaddr; 1616 1617 /* Fields up to this point are cleared by a CPU reset */ 1618 struct {} end_reset_fields; 1619 1620 /* Fields after this point are preserved across CPU reset. */ 1621 1622 /* processor features (e.g. for CPUID insn) */ 1623 /* Minimum cpuid leaf 7 value */ 1624 uint32_t cpuid_level_func7; 1625 /* Actual cpuid leaf 7 value */ 1626 uint32_t cpuid_min_level_func7; 1627 /* Minimum level/xlevel/xlevel2, based on CPU model + features */ 1628 uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2; 1629 /* Maximum level/xlevel/xlevel2 value for auto-assignment: */ 1630 uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2; 1631 /* Actual level/xlevel/xlevel2 value: */ 1632 uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2; 1633 uint32_t cpuid_vendor1; 1634 uint32_t cpuid_vendor2; 1635 uint32_t cpuid_vendor3; 1636 uint32_t cpuid_version; 1637 FeatureWordArray features; 1638 /* Features that were explicitly enabled/disabled */ 1639 FeatureWordArray user_features; 1640 uint32_t cpuid_model[12]; 1641 /* Cache information for CPUID. When legacy-cache=on, the cache data 1642 * on each CPUID leaf will be different, because we keep compatibility 1643 * with old QEMU versions. 1644 */ 1645 CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd; 1646 1647 /* MTRRs */ 1648 uint64_t mtrr_fixed[11]; 1649 uint64_t mtrr_deftype; 1650 MTRRVar mtrr_var[MSR_MTRRcap_VCNT]; 1651 1652 /* For KVM */ 1653 uint32_t mp_state; 1654 int32_t exception_nr; 1655 int32_t interrupt_injected; 1656 uint8_t soft_interrupt; 1657 uint8_t exception_pending; 1658 uint8_t exception_injected; 1659 uint8_t has_error_code; 1660 uint8_t exception_has_payload; 1661 uint64_t exception_payload; 1662 uint32_t ins_len; 1663 uint32_t sipi_vector; 1664 bool tsc_valid; 1665 int64_t tsc_khz; 1666 int64_t user_tsc_khz; /* for sanity check only */ 1667 uint64_t apic_bus_freq; 1668 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 1669 void *xsave_buf; 1670 uint32_t xsave_buf_len; 1671 #endif 1672 #if defined(CONFIG_KVM) 1673 struct kvm_nested_state *nested_state; 1674 #endif 1675 #if defined(CONFIG_HVF) 1676 HVFX86LazyFlags hvf_lflags; 1677 void *hvf_mmio_buf; 1678 #endif 1679 1680 uint64_t mcg_cap; 1681 uint64_t mcg_ctl; 1682 uint64_t mcg_ext_ctl; 1683 uint64_t mce_banks[MCE_BANKS_DEF*4]; 1684 uint64_t xstate_bv; 1685 1686 /* vmstate */ 1687 uint16_t fpus_vmstate; 1688 uint16_t fptag_vmstate; 1689 uint16_t fpregs_format_vmstate; 1690 1691 uint64_t xss; 1692 uint32_t umwait; 1693 1694 TPRAccess tpr_access_type; 1695 1696 unsigned nr_dies; 1697 } CPUX86State; 1698 1699 struct kvm_msrs; 1700 1701 /** 1702 * X86CPU: 1703 * @env: #CPUX86State 1704 * @migratable: If set, only migratable flags will be accepted when "enforce" 1705 * mode is used, and only migratable flags will be included in the "host" 1706 * CPU model. 1707 * 1708 * An x86 CPU. 1709 */ 1710 struct ArchCPU { 1711 /*< private >*/ 1712 CPUState parent_obj; 1713 /*< public >*/ 1714 1715 CPUNegativeOffsetState neg; 1716 CPUX86State env; 1717 VMChangeStateEntry *vmsentry; 1718 1719 uint64_t ucode_rev; 1720 1721 uint32_t hyperv_spinlock_attempts; 1722 char *hyperv_vendor; 1723 bool hyperv_synic_kvm_only; 1724 uint64_t hyperv_features; 1725 bool hyperv_passthrough; 1726 OnOffAuto hyperv_no_nonarch_cs; 1727 uint32_t hyperv_vendor_id[3]; 1728 uint32_t hyperv_interface_id[4]; 1729 uint32_t hyperv_limits[3]; 1730 uint32_t hyperv_nested[4]; 1731 bool hyperv_enforce_cpuid; 1732 uint32_t hyperv_ver_id_build; 1733 uint16_t hyperv_ver_id_major; 1734 uint16_t hyperv_ver_id_minor; 1735 uint32_t hyperv_ver_id_sp; 1736 uint8_t hyperv_ver_id_sb; 1737 uint32_t hyperv_ver_id_sn; 1738 1739 bool check_cpuid; 1740 bool enforce_cpuid; 1741 /* 1742 * Force features to be enabled even if the host doesn't support them. 1743 * This is dangerous and should be done only for testing CPUID 1744 * compatibility. 1745 */ 1746 bool force_features; 1747 bool expose_kvm; 1748 bool expose_tcg; 1749 bool migratable; 1750 bool migrate_smi_count; 1751 bool max_features; /* Enable all supported features automatically */ 1752 uint32_t apic_id; 1753 1754 /* Enables publishing of TSC increment and Local APIC bus frequencies to 1755 * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */ 1756 bool vmware_cpuid_freq; 1757 1758 /* if true the CPUID code directly forward host cache leaves to the guest */ 1759 bool cache_info_passthrough; 1760 1761 /* if true the CPUID code directly forwards 1762 * host monitor/mwait leaves to the guest */ 1763 struct { 1764 uint32_t eax; 1765 uint32_t ebx; 1766 uint32_t ecx; 1767 uint32_t edx; 1768 } mwait; 1769 1770 /* Features that were filtered out because of missing host capabilities */ 1771 FeatureWordArray filtered_features; 1772 1773 /* Enable PMU CPUID bits. This can't be enabled by default yet because 1774 * it doesn't have ABI stability guarantees, as it passes all PMU CPUID 1775 * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel 1776 * capabilities) directly to the guest. 1777 */ 1778 bool enable_pmu; 1779 1780 /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is 1781 * disabled by default to avoid breaking migration between QEMU with 1782 * different LMCE configurations. 1783 */ 1784 bool enable_lmce; 1785 1786 /* Compatibility bits for old machine types. 1787 * If true present virtual l3 cache for VM, the vcpus in the same virtual 1788 * socket share an virtual l3 cache. 1789 */ 1790 bool enable_l3_cache; 1791 1792 /* Compatibility bits for old machine types. 1793 * If true present the old cache topology information 1794 */ 1795 bool legacy_cache; 1796 1797 /* Compatibility bits for old machine types: */ 1798 bool enable_cpuid_0xb; 1799 1800 /* Enable auto level-increase for all CPUID leaves */ 1801 bool full_cpuid_auto_level; 1802 1803 /* Only advertise CPUID leaves defined by the vendor */ 1804 bool vendor_cpuid_only; 1805 1806 /* Enable auto level-increase for Intel Processor Trace leave */ 1807 bool intel_pt_auto_level; 1808 1809 /* if true fill the top bits of the MTRR_PHYSMASKn variable range */ 1810 bool fill_mtrr_mask; 1811 1812 /* if true override the phys_bits value with a value read from the host */ 1813 bool host_phys_bits; 1814 1815 /* if set, limit maximum value for phys_bits when host_phys_bits is true */ 1816 uint8_t host_phys_bits_limit; 1817 1818 /* Stop SMI delivery for migration compatibility with old machines */ 1819 bool kvm_no_smi_migration; 1820 1821 /* Forcefully disable KVM PV features not exposed in guest CPUIDs */ 1822 bool kvm_pv_enforce_cpuid; 1823 1824 /* Number of physical address bits supported */ 1825 uint32_t phys_bits; 1826 1827 /* in order to simplify APIC support, we leave this pointer to the 1828 user */ 1829 struct DeviceState *apic_state; 1830 struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram; 1831 Notifier machine_done; 1832 1833 struct kvm_msrs *kvm_msr_buf; 1834 1835 int32_t node_id; /* NUMA node this CPU belongs to */ 1836 int32_t socket_id; 1837 int32_t die_id; 1838 int32_t core_id; 1839 int32_t thread_id; 1840 1841 int32_t hv_max_vps; 1842 }; 1843 1844 1845 #ifndef CONFIG_USER_ONLY 1846 extern const VMStateDescription vmstate_x86_cpu; 1847 #endif 1848 1849 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); 1850 1851 int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, 1852 int cpuid, void *opaque); 1853 int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, 1854 int cpuid, void *opaque); 1855 int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 1856 void *opaque); 1857 int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 1858 void *opaque); 1859 1860 void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, 1861 Error **errp); 1862 1863 void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags); 1864 1865 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, 1866 MemTxAttrs *attrs); 1867 1868 int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); 1869 int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 1870 1871 void x86_cpu_list(void); 1872 int cpu_x86_support_mca_broadcast(CPUX86State *env); 1873 1874 #ifndef CONFIG_USER_ONLY 1875 int cpu_get_pic_interrupt(CPUX86State *s); 1876 1877 /* MSDOS compatibility mode FPU exception support */ 1878 void x86_register_ferr_irq(qemu_irq irq); 1879 void fpu_check_raise_ferr_irq(CPUX86State *s); 1880 void cpu_set_ignne(void); 1881 void cpu_clear_ignne(void); 1882 #endif 1883 1884 /* mpx_helper.c */ 1885 void cpu_sync_bndcs_hflags(CPUX86State *env); 1886 1887 /* this function must always be used to load data in the segment 1888 cache: it synchronizes the hflags with the segment cache values */ 1889 static inline void cpu_x86_load_seg_cache(CPUX86State *env, 1890 X86Seg seg_reg, unsigned int selector, 1891 target_ulong base, 1892 unsigned int limit, 1893 unsigned int flags) 1894 { 1895 SegmentCache *sc; 1896 unsigned int new_hflags; 1897 1898 sc = &env->segs[seg_reg]; 1899 sc->selector = selector; 1900 sc->base = base; 1901 sc->limit = limit; 1902 sc->flags = flags; 1903 1904 /* update the hidden flags */ 1905 { 1906 if (seg_reg == R_CS) { 1907 #ifdef TARGET_X86_64 1908 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { 1909 /* long mode */ 1910 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 1911 env->hflags &= ~(HF_ADDSEG_MASK); 1912 } else 1913 #endif 1914 { 1915 /* legacy / compatibility case */ 1916 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) 1917 >> (DESC_B_SHIFT - HF_CS32_SHIFT); 1918 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | 1919 new_hflags; 1920 } 1921 } 1922 if (seg_reg == R_SS) { 1923 int cpl = (flags >> DESC_DPL_SHIFT) & 3; 1924 #if HF_CPL_MASK != 3 1925 #error HF_CPL_MASK is hardcoded 1926 #endif 1927 env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl; 1928 /* Possibly switch between BNDCFGS and BNDCFGU */ 1929 cpu_sync_bndcs_hflags(env); 1930 } 1931 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) 1932 >> (DESC_B_SHIFT - HF_SS32_SHIFT); 1933 if (env->hflags & HF_CS64_MASK) { 1934 /* zero base assumed for DS, ES and SS in long mode */ 1935 } else if (!(env->cr[0] & CR0_PE_MASK) || 1936 (env->eflags & VM_MASK) || 1937 !(env->hflags & HF_CS32_MASK)) { 1938 /* XXX: try to avoid this test. The problem comes from the 1939 fact that is real mode or vm86 mode we only modify the 1940 'base' and 'selector' fields of the segment cache to go 1941 faster. A solution may be to force addseg to one in 1942 translate-i386.c. */ 1943 new_hflags |= HF_ADDSEG_MASK; 1944 } else { 1945 new_hflags |= ((env->segs[R_DS].base | 1946 env->segs[R_ES].base | 1947 env->segs[R_SS].base) != 0) << 1948 HF_ADDSEG_SHIFT; 1949 } 1950 env->hflags = (env->hflags & 1951 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; 1952 } 1953 } 1954 1955 static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, 1956 uint8_t sipi_vector) 1957 { 1958 CPUState *cs = CPU(cpu); 1959 CPUX86State *env = &cpu->env; 1960 1961 env->eip = 0; 1962 cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, 1963 sipi_vector << 12, 1964 env->segs[R_CS].limit, 1965 env->segs[R_CS].flags); 1966 cs->halted = 0; 1967 } 1968 1969 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, 1970 target_ulong *base, unsigned int *limit, 1971 unsigned int *flags); 1972 1973 /* op_helper.c */ 1974 /* used for debug or cpu save/restore */ 1975 1976 /* cpu-exec.c */ 1977 /* the following helpers are only usable in user mode simulation as 1978 they can trigger unexpected exceptions */ 1979 void cpu_x86_load_seg(CPUX86State *s, X86Seg seg_reg, int selector); 1980 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); 1981 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); 1982 void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr); 1983 void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr); 1984 1985 /* cpu.c */ 1986 void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 1987 uint32_t vendor2, uint32_t vendor3); 1988 typedef struct PropValue { 1989 const char *prop, *value; 1990 } PropValue; 1991 void x86_cpu_apply_props(X86CPU *cpu, PropValue *props); 1992 1993 uint32_t cpu_x86_virtual_addr_width(CPUX86State *env); 1994 1995 /* cpu.c other functions (cpuid) */ 1996 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 1997 uint32_t *eax, uint32_t *ebx, 1998 uint32_t *ecx, uint32_t *edx); 1999 void cpu_clear_apic_feature(CPUX86State *env); 2000 void host_cpuid(uint32_t function, uint32_t count, 2001 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); 2002 2003 /* helper.c */ 2004 void x86_cpu_set_a20(X86CPU *cpu, int a20_state); 2005 2006 #ifndef CONFIG_USER_ONLY 2007 static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) 2008 { 2009 return !!attrs.secure; 2010 } 2011 2012 static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs) 2013 { 2014 return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs)); 2015 } 2016 2017 /* 2018 * load efer and update the corresponding hflags. XXX: do consistency 2019 * checks with cpuid bits? 2020 */ 2021 void cpu_load_efer(CPUX86State *env, uint64_t val); 2022 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr); 2023 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr); 2024 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr); 2025 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr); 2026 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val); 2027 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val); 2028 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val); 2029 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val); 2030 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val); 2031 #endif 2032 2033 /* will be suppressed */ 2034 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); 2035 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); 2036 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); 2037 void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7); 2038 2039 /* hw/pc.c */ 2040 uint64_t cpu_get_tsc(CPUX86State *env); 2041 2042 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU 2043 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX) 2044 #define CPU_RESOLVING_TYPE TYPE_X86_CPU 2045 2046 #ifdef TARGET_X86_64 2047 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64") 2048 #else 2049 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32") 2050 #endif 2051 2052 #define cpu_list x86_cpu_list 2053 2054 /* MMU modes definitions */ 2055 #define MMU_KSMAP_IDX 0 2056 #define MMU_USER_IDX 1 2057 #define MMU_KNOSMAP_IDX 2 2058 static inline int cpu_mmu_index(CPUX86State *env, bool ifetch) 2059 { 2060 return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : 2061 (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK)) 2062 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; 2063 } 2064 2065 static inline int cpu_mmu_index_kernel(CPUX86State *env) 2066 { 2067 return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX : 2068 ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) 2069 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; 2070 } 2071 2072 #define CC_DST (env->cc_dst) 2073 #define CC_SRC (env->cc_src) 2074 #define CC_SRC2 (env->cc_src2) 2075 #define CC_OP (env->cc_op) 2076 2077 #include "exec/cpu-all.h" 2078 #include "svm.h" 2079 2080 #if !defined(CONFIG_USER_ONLY) 2081 #include "hw/i386/apic.h" 2082 #endif 2083 2084 static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc, 2085 target_ulong *cs_base, uint32_t *flags) 2086 { 2087 *cs_base = env->segs[R_CS].base; 2088 *pc = *cs_base + env->eip; 2089 *flags = env->hflags | 2090 (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); 2091 } 2092 2093 void do_cpu_init(X86CPU *cpu); 2094 void do_cpu_sipi(X86CPU *cpu); 2095 2096 #define MCE_INJECT_BROADCAST 1 2097 #define MCE_INJECT_UNCOND_AO 2 2098 2099 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, 2100 uint64_t status, uint64_t mcg_status, uint64_t addr, 2101 uint64_t misc, int flags); 2102 2103 uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); 2104 2105 static inline uint32_t cpu_compute_eflags(CPUX86State *env) 2106 { 2107 uint32_t eflags = env->eflags; 2108 if (tcg_enabled()) { 2109 eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK); 2110 } 2111 return eflags; 2112 } 2113 2114 static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env) 2115 { 2116 return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 }); 2117 } 2118 2119 static inline int32_t x86_get_a20_mask(CPUX86State *env) 2120 { 2121 if (env->hflags & HF_SMM_MASK) { 2122 return -1; 2123 } else { 2124 return env->a20_mask; 2125 } 2126 } 2127 2128 static inline bool cpu_has_vmx(CPUX86State *env) 2129 { 2130 return env->features[FEAT_1_ECX] & CPUID_EXT_VMX; 2131 } 2132 2133 static inline bool cpu_has_svm(CPUX86State *env) 2134 { 2135 return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM; 2136 } 2137 2138 /* 2139 * In order for a vCPU to enter VMX operation it must have CR4.VMXE set. 2140 * Since it was set, CR4.VMXE must remain set as long as vCPU is in 2141 * VMX operation. This is because CR4.VMXE is one of the bits set 2142 * in MSR_IA32_VMX_CR4_FIXED1. 2143 * 2144 * There is one exception to above statement when vCPU enters SMM mode. 2145 * When a vCPU enters SMM mode, it temporarily exit VMX operation and 2146 * may also reset CR4.VMXE during execution in SMM mode. 2147 * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation 2148 * and CR4.VMXE is restored to it's original value of being set. 2149 * 2150 * Therefore, when vCPU is not in SMM mode, we can infer whether 2151 * VMX is being used by examining CR4.VMXE. Otherwise, we cannot 2152 * know for certain. 2153 */ 2154 static inline bool cpu_vmx_maybe_enabled(CPUX86State *env) 2155 { 2156 return cpu_has_vmx(env) && 2157 ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK)); 2158 } 2159 2160 /* excp_helper.c */ 2161 int get_pg_mode(CPUX86State *env); 2162 2163 /* fpu_helper.c */ 2164 void update_fp_status(CPUX86State *env); 2165 void update_mxcsr_status(CPUX86State *env); 2166 void update_mxcsr_from_sse_status(CPUX86State *env); 2167 2168 static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr) 2169 { 2170 env->mxcsr = mxcsr; 2171 if (tcg_enabled()) { 2172 update_mxcsr_status(env); 2173 } 2174 } 2175 2176 static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc) 2177 { 2178 env->fpuc = fpuc; 2179 if (tcg_enabled()) { 2180 update_fp_status(env); 2181 } 2182 } 2183 2184 /* mem_helper.c */ 2185 void helper_lock_init(void); 2186 2187 /* svm_helper.c */ 2188 #ifdef CONFIG_USER_ONLY 2189 static inline void 2190 cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, 2191 uint64_t param, uintptr_t retaddr) 2192 { /* no-op */ } 2193 static inline bool 2194 cpu_svm_has_intercept(CPUX86State *env, uint32_t type) 2195 { return false; } 2196 #else 2197 void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, 2198 uint64_t param, uintptr_t retaddr); 2199 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type); 2200 #endif 2201 2202 /* apic.c */ 2203 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); 2204 void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip, 2205 TPRAccess access); 2206 2207 /* Special values for X86CPUVersion: */ 2208 2209 /* Resolve to latest CPU version */ 2210 #define CPU_VERSION_LATEST -1 2211 2212 /* 2213 * Resolve to version defined by current machine type. 2214 * See x86_cpu_set_default_version() 2215 */ 2216 #define CPU_VERSION_AUTO -2 2217 2218 /* Don't resolve to any versioned CPU models, like old QEMU versions */ 2219 #define CPU_VERSION_LEGACY 0 2220 2221 typedef int X86CPUVersion; 2222 2223 /* 2224 * Set default CPU model version for CPU models having 2225 * version == CPU_VERSION_AUTO. 2226 */ 2227 void x86_cpu_set_default_version(X86CPUVersion version); 2228 2229 #define APIC_DEFAULT_ADDRESS 0xfee00000 2230 #define APIC_SPACE_SIZE 0x100000 2231 2232 /* cpu-dump.c */ 2233 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags); 2234 2235 /* cpu.c */ 2236 bool cpu_is_bsp(X86CPU *cpu); 2237 2238 void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen); 2239 void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen); 2240 void x86_update_hflags(CPUX86State* env); 2241 2242 static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat) 2243 { 2244 return !!(cpu->hyperv_features & BIT(feat)); 2245 } 2246 2247 static inline uint64_t cr4_reserved_bits(CPUX86State *env) 2248 { 2249 uint64_t reserved_bits = CR4_RESERVED_MASK; 2250 if (!env->features[FEAT_XSAVE]) { 2251 reserved_bits |= CR4_OSXSAVE_MASK; 2252 } 2253 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMEP)) { 2254 reserved_bits |= CR4_SMEP_MASK; 2255 } 2256 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) { 2257 reserved_bits |= CR4_SMAP_MASK; 2258 } 2259 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE)) { 2260 reserved_bits |= CR4_FSGSBASE_MASK; 2261 } 2262 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) { 2263 reserved_bits |= CR4_PKE_MASK; 2264 } 2265 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57)) { 2266 reserved_bits |= CR4_LA57_MASK; 2267 } 2268 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) { 2269 reserved_bits |= CR4_UMIP_MASK; 2270 } 2271 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) { 2272 reserved_bits |= CR4_PKS_MASK; 2273 } 2274 return reserved_bits; 2275 } 2276 2277 static inline bool ctl_has_irq(CPUX86State *env) 2278 { 2279 uint32_t int_prio; 2280 uint32_t tpr; 2281 2282 int_prio = (env->int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT; 2283 tpr = env->int_ctl & V_TPR_MASK; 2284 2285 if (env->int_ctl & V_IGN_TPR_MASK) { 2286 return (env->int_ctl & V_IRQ_MASK); 2287 } 2288 2289 return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr); 2290 } 2291 2292 hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type, 2293 int *prot); 2294 #if defined(TARGET_X86_64) && \ 2295 defined(CONFIG_USER_ONLY) && \ 2296 defined(CONFIG_LINUX) 2297 # define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20) 2298 #endif 2299 2300 #endif /* I386_CPU_H */ 2301