1fcf5ef2aSThomas Huth /* 2fcf5ef2aSThomas Huth * i386 virtual CPU header 3fcf5ef2aSThomas Huth * 4fcf5ef2aSThomas Huth * Copyright (c) 2003 Fabrice Bellard 5fcf5ef2aSThomas Huth * 6fcf5ef2aSThomas Huth * This library is free software; you can redistribute it and/or 7fcf5ef2aSThomas Huth * modify it under the terms of the GNU Lesser General Public 8fcf5ef2aSThomas Huth * License as published by the Free Software Foundation; either 9fcf5ef2aSThomas Huth * version 2 of the License, or (at your option) any later version. 10fcf5ef2aSThomas Huth * 11fcf5ef2aSThomas Huth * This library is distributed in the hope that it will be useful, 12fcf5ef2aSThomas Huth * but WITHOUT ANY WARRANTY; without even the implied warranty of 13fcf5ef2aSThomas Huth * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14fcf5ef2aSThomas Huth * Lesser General Public License for more details. 15fcf5ef2aSThomas Huth * 16fcf5ef2aSThomas Huth * You should have received a copy of the GNU Lesser General Public 17fcf5ef2aSThomas Huth * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18fcf5ef2aSThomas Huth */ 19fcf5ef2aSThomas Huth 20fcf5ef2aSThomas Huth #ifndef I386_CPU_H 21fcf5ef2aSThomas Huth #define I386_CPU_H 22fcf5ef2aSThomas Huth 2314a48c1dSMarkus Armbruster #include "sysemu/tcg.h" 24fcf5ef2aSThomas Huth #include "cpu-qom.h" 255e953812SRoman Kagan #include "hyperv-proto.h" 26c97d6d2cSSergio Andres Gomez Del Real #include "exec/cpu-defs.h" 27c97d6d2cSSergio Andres Gomez Del Real 2872c1701fSAlex Bennée /* The x86 has a strong memory model with some store-after-load re-ordering */ 2972c1701fSAlex Bennée #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) 3072c1701fSAlex Bennée 31fcf5ef2aSThomas Huth /* Maximum instruction code size */ 32fcf5ef2aSThomas Huth #define TARGET_MAX_INSN_SIZE 16 33fcf5ef2aSThomas Huth 34fcf5ef2aSThomas Huth /* support for self modifying code even if the modified instruction is 35fcf5ef2aSThomas Huth close to the modifying instruction */ 36fcf5ef2aSThomas Huth #define TARGET_HAS_PRECISE_SMC 37fcf5ef2aSThomas Huth 38fcf5ef2aSThomas Huth #ifdef TARGET_X86_64 39fcf5ef2aSThomas Huth #define I386_ELF_MACHINE EM_X86_64 40fcf5ef2aSThomas Huth #define ELF_MACHINE_UNAME "x86_64" 41fcf5ef2aSThomas Huth #else 42fcf5ef2aSThomas Huth #define I386_ELF_MACHINE EM_386 43fcf5ef2aSThomas Huth #define ELF_MACHINE_UNAME "i686" 44fcf5ef2aSThomas Huth #endif 45fcf5ef2aSThomas Huth 466701d81dSPaolo Bonzini enum { 476701d81dSPaolo Bonzini R_EAX = 0, 486701d81dSPaolo Bonzini R_ECX = 1, 496701d81dSPaolo Bonzini R_EDX = 2, 506701d81dSPaolo Bonzini R_EBX = 3, 516701d81dSPaolo Bonzini R_ESP = 4, 526701d81dSPaolo Bonzini R_EBP = 5, 536701d81dSPaolo Bonzini R_ESI = 6, 546701d81dSPaolo Bonzini R_EDI = 7, 556701d81dSPaolo Bonzini R_R8 = 8, 566701d81dSPaolo Bonzini R_R9 = 9, 576701d81dSPaolo Bonzini R_R10 = 10, 586701d81dSPaolo Bonzini R_R11 = 11, 596701d81dSPaolo Bonzini R_R12 = 12, 606701d81dSPaolo Bonzini R_R13 = 13, 616701d81dSPaolo Bonzini R_R14 = 14, 626701d81dSPaolo Bonzini R_R15 = 15, 63fcf5ef2aSThomas Huth 646701d81dSPaolo Bonzini R_AL = 0, 656701d81dSPaolo Bonzini R_CL = 1, 666701d81dSPaolo Bonzini R_DL = 2, 676701d81dSPaolo Bonzini R_BL = 3, 686701d81dSPaolo Bonzini R_AH = 4, 696701d81dSPaolo Bonzini R_CH = 5, 706701d81dSPaolo Bonzini R_DH = 6, 716701d81dSPaolo Bonzini R_BH = 7, 726701d81dSPaolo Bonzini }; 73fcf5ef2aSThomas Huth 746701d81dSPaolo Bonzini typedef enum X86Seg { 756701d81dSPaolo Bonzini R_ES = 0, 766701d81dSPaolo Bonzini R_CS = 1, 776701d81dSPaolo Bonzini R_SS = 2, 786701d81dSPaolo Bonzini R_DS = 3, 796701d81dSPaolo Bonzini R_FS = 4, 806701d81dSPaolo Bonzini R_GS = 5, 816701d81dSPaolo Bonzini R_LDTR = 6, 826701d81dSPaolo Bonzini R_TR = 7, 836701d81dSPaolo Bonzini } X86Seg; 84fcf5ef2aSThomas Huth 85fcf5ef2aSThomas Huth /* segment descriptor fields */ 86c97d6d2cSSergio Andres Gomez Del Real #define DESC_G_SHIFT 23 87c97d6d2cSSergio Andres Gomez Del Real #define DESC_G_MASK (1 << DESC_G_SHIFT) 88fcf5ef2aSThomas Huth #define DESC_B_SHIFT 22 89fcf5ef2aSThomas Huth #define DESC_B_MASK (1 << DESC_B_SHIFT) 90fcf5ef2aSThomas Huth #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ 91fcf5ef2aSThomas Huth #define DESC_L_MASK (1 << DESC_L_SHIFT) 92c97d6d2cSSergio Andres Gomez Del Real #define DESC_AVL_SHIFT 20 93c97d6d2cSSergio Andres Gomez Del Real #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT) 94c97d6d2cSSergio Andres Gomez Del Real #define DESC_P_SHIFT 15 95c97d6d2cSSergio Andres Gomez Del Real #define DESC_P_MASK (1 << DESC_P_SHIFT) 96fcf5ef2aSThomas Huth #define DESC_DPL_SHIFT 13 97fcf5ef2aSThomas Huth #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) 98c97d6d2cSSergio Andres Gomez Del Real #define DESC_S_SHIFT 12 99c97d6d2cSSergio Andres Gomez Del Real #define DESC_S_MASK (1 << DESC_S_SHIFT) 100fcf5ef2aSThomas Huth #define DESC_TYPE_SHIFT 8 101fcf5ef2aSThomas Huth #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) 102fcf5ef2aSThomas Huth #define DESC_A_MASK (1 << 8) 103fcf5ef2aSThomas Huth 104fcf5ef2aSThomas Huth #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ 105fcf5ef2aSThomas Huth #define DESC_C_MASK (1 << 10) /* code: conforming */ 106fcf5ef2aSThomas Huth #define DESC_R_MASK (1 << 9) /* code: readable */ 107fcf5ef2aSThomas Huth 108fcf5ef2aSThomas Huth #define DESC_E_MASK (1 << 10) /* data: expansion direction */ 109fcf5ef2aSThomas Huth #define DESC_W_MASK (1 << 9) /* data: writable */ 110fcf5ef2aSThomas Huth 111fcf5ef2aSThomas Huth #define DESC_TSS_BUSY_MASK (1 << 9) 112fcf5ef2aSThomas Huth 113fcf5ef2aSThomas Huth /* eflags masks */ 114fcf5ef2aSThomas Huth #define CC_C 0x0001 115fcf5ef2aSThomas Huth #define CC_P 0x0004 116fcf5ef2aSThomas Huth #define CC_A 0x0010 117fcf5ef2aSThomas Huth #define CC_Z 0x0040 118fcf5ef2aSThomas Huth #define CC_S 0x0080 119fcf5ef2aSThomas Huth #define CC_O 0x0800 120fcf5ef2aSThomas Huth 121fcf5ef2aSThomas Huth #define TF_SHIFT 8 122fcf5ef2aSThomas Huth #define IOPL_SHIFT 12 123fcf5ef2aSThomas Huth #define VM_SHIFT 17 124fcf5ef2aSThomas Huth 125fcf5ef2aSThomas Huth #define TF_MASK 0x00000100 126fcf5ef2aSThomas Huth #define IF_MASK 0x00000200 127fcf5ef2aSThomas Huth #define DF_MASK 0x00000400 128fcf5ef2aSThomas Huth #define IOPL_MASK 0x00003000 129fcf5ef2aSThomas Huth #define NT_MASK 0x00004000 130fcf5ef2aSThomas Huth #define RF_MASK 0x00010000 131fcf5ef2aSThomas Huth #define VM_MASK 0x00020000 132fcf5ef2aSThomas Huth #define AC_MASK 0x00040000 133fcf5ef2aSThomas Huth #define VIF_MASK 0x00080000 134fcf5ef2aSThomas Huth #define VIP_MASK 0x00100000 135fcf5ef2aSThomas Huth #define ID_MASK 0x00200000 136fcf5ef2aSThomas Huth 137fcf5ef2aSThomas Huth /* hidden flags - used internally by qemu to represent additional cpu 138fcf5ef2aSThomas Huth states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We 139fcf5ef2aSThomas Huth avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit 140fcf5ef2aSThomas Huth positions to ease oring with eflags. */ 141fcf5ef2aSThomas Huth /* current cpl */ 142fcf5ef2aSThomas Huth #define HF_CPL_SHIFT 0 143fcf5ef2aSThomas Huth /* true if hardware interrupts must be disabled for next instruction */ 144fcf5ef2aSThomas Huth #define HF_INHIBIT_IRQ_SHIFT 3 145fcf5ef2aSThomas Huth /* 16 or 32 segments */ 146fcf5ef2aSThomas Huth #define HF_CS32_SHIFT 4 147fcf5ef2aSThomas Huth #define HF_SS32_SHIFT 5 148fcf5ef2aSThomas Huth /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ 149fcf5ef2aSThomas Huth #define HF_ADDSEG_SHIFT 6 150fcf5ef2aSThomas Huth /* copy of CR0.PE (protected mode) */ 151fcf5ef2aSThomas Huth #define HF_PE_SHIFT 7 152fcf5ef2aSThomas Huth #define HF_TF_SHIFT 8 /* must be same as eflags */ 153fcf5ef2aSThomas Huth #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ 154fcf5ef2aSThomas Huth #define HF_EM_SHIFT 10 155fcf5ef2aSThomas Huth #define HF_TS_SHIFT 11 156fcf5ef2aSThomas Huth #define HF_IOPL_SHIFT 12 /* must be same as eflags */ 157fcf5ef2aSThomas Huth #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ 158fcf5ef2aSThomas Huth #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ 159fcf5ef2aSThomas Huth #define HF_RF_SHIFT 16 /* must be same as eflags */ 160fcf5ef2aSThomas Huth #define HF_VM_SHIFT 17 /* must be same as eflags */ 161fcf5ef2aSThomas Huth #define HF_AC_SHIFT 18 /* must be same as eflags */ 162fcf5ef2aSThomas Huth #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ 163fcf5ef2aSThomas Huth #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ 164f8dc4c64SPaolo Bonzini #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */ 165fcf5ef2aSThomas Huth #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ 166fcf5ef2aSThomas Huth #define HF_SMAP_SHIFT 23 /* CR4.SMAP */ 167fcf5ef2aSThomas Huth #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */ 168fcf5ef2aSThomas Huth #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */ 169fcf5ef2aSThomas Huth #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */ 170fcf5ef2aSThomas Huth 171fcf5ef2aSThomas Huth #define HF_CPL_MASK (3 << HF_CPL_SHIFT) 172fcf5ef2aSThomas Huth #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) 173fcf5ef2aSThomas Huth #define HF_CS32_MASK (1 << HF_CS32_SHIFT) 174fcf5ef2aSThomas Huth #define HF_SS32_MASK (1 << HF_SS32_SHIFT) 175fcf5ef2aSThomas Huth #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) 176fcf5ef2aSThomas Huth #define HF_PE_MASK (1 << HF_PE_SHIFT) 177fcf5ef2aSThomas Huth #define HF_TF_MASK (1 << HF_TF_SHIFT) 178fcf5ef2aSThomas Huth #define HF_MP_MASK (1 << HF_MP_SHIFT) 179fcf5ef2aSThomas Huth #define HF_EM_MASK (1 << HF_EM_SHIFT) 180fcf5ef2aSThomas Huth #define HF_TS_MASK (1 << HF_TS_SHIFT) 181fcf5ef2aSThomas Huth #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) 182fcf5ef2aSThomas Huth #define HF_LMA_MASK (1 << HF_LMA_SHIFT) 183fcf5ef2aSThomas Huth #define HF_CS64_MASK (1 << HF_CS64_SHIFT) 184fcf5ef2aSThomas Huth #define HF_RF_MASK (1 << HF_RF_SHIFT) 185fcf5ef2aSThomas Huth #define HF_VM_MASK (1 << HF_VM_SHIFT) 186fcf5ef2aSThomas Huth #define HF_AC_MASK (1 << HF_AC_SHIFT) 187fcf5ef2aSThomas Huth #define HF_SMM_MASK (1 << HF_SMM_SHIFT) 188fcf5ef2aSThomas Huth #define HF_SVME_MASK (1 << HF_SVME_SHIFT) 189f8dc4c64SPaolo Bonzini #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT) 190fcf5ef2aSThomas Huth #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) 191fcf5ef2aSThomas Huth #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) 192fcf5ef2aSThomas Huth #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT) 193fcf5ef2aSThomas Huth #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT) 194fcf5ef2aSThomas Huth #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT) 195fcf5ef2aSThomas Huth 196fcf5ef2aSThomas Huth /* hflags2 */ 197fcf5ef2aSThomas Huth 198fcf5ef2aSThomas Huth #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ 199fcf5ef2aSThomas Huth #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ 200fcf5ef2aSThomas Huth #define HF2_NMI_SHIFT 2 /* CPU serving NMI */ 201fcf5ef2aSThomas Huth #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ 202fcf5ef2aSThomas Huth #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */ 203fcf5ef2aSThomas Huth #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */ 204fe441054SJan Kiszka #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */ 205fcf5ef2aSThomas Huth 206fcf5ef2aSThomas Huth #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) 207fcf5ef2aSThomas Huth #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) 208fcf5ef2aSThomas Huth #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) 209fcf5ef2aSThomas Huth #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) 210fcf5ef2aSThomas Huth #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT) 211fcf5ef2aSThomas Huth #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT) 212fe441054SJan Kiszka #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT) 213fcf5ef2aSThomas Huth 214fcf5ef2aSThomas Huth #define CR0_PE_SHIFT 0 215fcf5ef2aSThomas Huth #define CR0_MP_SHIFT 1 216fcf5ef2aSThomas Huth 217fcf5ef2aSThomas Huth #define CR0_PE_MASK (1U << 0) 218fcf5ef2aSThomas Huth #define CR0_MP_MASK (1U << 1) 219fcf5ef2aSThomas Huth #define CR0_EM_MASK (1U << 2) 220fcf5ef2aSThomas Huth #define CR0_TS_MASK (1U << 3) 221fcf5ef2aSThomas Huth #define CR0_ET_MASK (1U << 4) 222fcf5ef2aSThomas Huth #define CR0_NE_MASK (1U << 5) 223fcf5ef2aSThomas Huth #define CR0_WP_MASK (1U << 16) 224fcf5ef2aSThomas Huth #define CR0_AM_MASK (1U << 18) 225fcf5ef2aSThomas Huth #define CR0_PG_MASK (1U << 31) 226fcf5ef2aSThomas Huth 227fcf5ef2aSThomas Huth #define CR4_VME_MASK (1U << 0) 228fcf5ef2aSThomas Huth #define CR4_PVI_MASK (1U << 1) 229fcf5ef2aSThomas Huth #define CR4_TSD_MASK (1U << 2) 230fcf5ef2aSThomas Huth #define CR4_DE_MASK (1U << 3) 231fcf5ef2aSThomas Huth #define CR4_PSE_MASK (1U << 4) 232fcf5ef2aSThomas Huth #define CR4_PAE_MASK (1U << 5) 233fcf5ef2aSThomas Huth #define CR4_MCE_MASK (1U << 6) 234fcf5ef2aSThomas Huth #define CR4_PGE_MASK (1U << 7) 235fcf5ef2aSThomas Huth #define CR4_PCE_MASK (1U << 8) 236fcf5ef2aSThomas Huth #define CR4_OSFXSR_SHIFT 9 237fcf5ef2aSThomas Huth #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT) 238fcf5ef2aSThomas Huth #define CR4_OSXMMEXCPT_MASK (1U << 10) 2396c7c3c21SKirill A. Shutemov #define CR4_LA57_MASK (1U << 12) 240fcf5ef2aSThomas Huth #define CR4_VMXE_MASK (1U << 13) 241fcf5ef2aSThomas Huth #define CR4_SMXE_MASK (1U << 14) 242fcf5ef2aSThomas Huth #define CR4_FSGSBASE_MASK (1U << 16) 243fcf5ef2aSThomas Huth #define CR4_PCIDE_MASK (1U << 17) 244fcf5ef2aSThomas Huth #define CR4_OSXSAVE_MASK (1U << 18) 245fcf5ef2aSThomas Huth #define CR4_SMEP_MASK (1U << 20) 246fcf5ef2aSThomas Huth #define CR4_SMAP_MASK (1U << 21) 247fcf5ef2aSThomas Huth #define CR4_PKE_MASK (1U << 22) 248fcf5ef2aSThomas Huth 249fcf5ef2aSThomas Huth #define DR6_BD (1 << 13) 250fcf5ef2aSThomas Huth #define DR6_BS (1 << 14) 251fcf5ef2aSThomas Huth #define DR6_BT (1 << 15) 252fcf5ef2aSThomas Huth #define DR6_FIXED_1 0xffff0ff0 253fcf5ef2aSThomas Huth 254fcf5ef2aSThomas Huth #define DR7_GD (1 << 13) 255fcf5ef2aSThomas Huth #define DR7_TYPE_SHIFT 16 256fcf5ef2aSThomas Huth #define DR7_LEN_SHIFT 18 257fcf5ef2aSThomas Huth #define DR7_FIXED_1 0x00000400 258fcf5ef2aSThomas Huth #define DR7_GLOBAL_BP_MASK 0xaa 259fcf5ef2aSThomas Huth #define DR7_LOCAL_BP_MASK 0x55 260fcf5ef2aSThomas Huth #define DR7_MAX_BP 4 261fcf5ef2aSThomas Huth #define DR7_TYPE_BP_INST 0x0 262fcf5ef2aSThomas Huth #define DR7_TYPE_DATA_WR 0x1 263fcf5ef2aSThomas Huth #define DR7_TYPE_IO_RW 0x2 264fcf5ef2aSThomas Huth #define DR7_TYPE_DATA_RW 0x3 265fcf5ef2aSThomas Huth 266fcf5ef2aSThomas Huth #define PG_PRESENT_BIT 0 267fcf5ef2aSThomas Huth #define PG_RW_BIT 1 268fcf5ef2aSThomas Huth #define PG_USER_BIT 2 269fcf5ef2aSThomas Huth #define PG_PWT_BIT 3 270fcf5ef2aSThomas Huth #define PG_PCD_BIT 4 271fcf5ef2aSThomas Huth #define PG_ACCESSED_BIT 5 272fcf5ef2aSThomas Huth #define PG_DIRTY_BIT 6 273fcf5ef2aSThomas Huth #define PG_PSE_BIT 7 274fcf5ef2aSThomas Huth #define PG_GLOBAL_BIT 8 275fcf5ef2aSThomas Huth #define PG_PSE_PAT_BIT 12 276fcf5ef2aSThomas Huth #define PG_PKRU_BIT 59 277fcf5ef2aSThomas Huth #define PG_NX_BIT 63 278fcf5ef2aSThomas Huth 279fcf5ef2aSThomas Huth #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) 280fcf5ef2aSThomas Huth #define PG_RW_MASK (1 << PG_RW_BIT) 281fcf5ef2aSThomas Huth #define PG_USER_MASK (1 << PG_USER_BIT) 282fcf5ef2aSThomas Huth #define PG_PWT_MASK (1 << PG_PWT_BIT) 283fcf5ef2aSThomas Huth #define PG_PCD_MASK (1 << PG_PCD_BIT) 284fcf5ef2aSThomas Huth #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) 285fcf5ef2aSThomas Huth #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) 286fcf5ef2aSThomas Huth #define PG_PSE_MASK (1 << PG_PSE_BIT) 287fcf5ef2aSThomas Huth #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) 288fcf5ef2aSThomas Huth #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT) 289fcf5ef2aSThomas Huth #define PG_ADDRESS_MASK 0x000ffffffffff000LL 290fcf5ef2aSThomas Huth #define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK) 291fcf5ef2aSThomas Huth #define PG_HI_USER_MASK 0x7ff0000000000000LL 292fcf5ef2aSThomas Huth #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT) 293fcf5ef2aSThomas Huth #define PG_NX_MASK (1ULL << PG_NX_BIT) 294fcf5ef2aSThomas Huth 295fcf5ef2aSThomas Huth #define PG_ERROR_W_BIT 1 296fcf5ef2aSThomas Huth 297fcf5ef2aSThomas Huth #define PG_ERROR_P_MASK 0x01 298fcf5ef2aSThomas Huth #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) 299fcf5ef2aSThomas Huth #define PG_ERROR_U_MASK 0x04 300fcf5ef2aSThomas Huth #define PG_ERROR_RSVD_MASK 0x08 301fcf5ef2aSThomas Huth #define PG_ERROR_I_D_MASK 0x10 302fcf5ef2aSThomas Huth #define PG_ERROR_PK_MASK 0x20 303fcf5ef2aSThomas Huth 304fcf5ef2aSThomas Huth #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ 305fcf5ef2aSThomas Huth #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ 306fcf5ef2aSThomas Huth #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */ 307fcf5ef2aSThomas Huth 308fcf5ef2aSThomas Huth #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) 309fcf5ef2aSThomas Huth #define MCE_BANKS_DEF 10 310fcf5ef2aSThomas Huth 311fcf5ef2aSThomas Huth #define MCG_CAP_BANKS_MASK 0xff 312fcf5ef2aSThomas Huth 313fcf5ef2aSThomas Huth #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ 314fcf5ef2aSThomas Huth #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ 315fcf5ef2aSThomas Huth #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ 316fcf5ef2aSThomas Huth #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */ 317fcf5ef2aSThomas Huth 318fcf5ef2aSThomas Huth #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */ 319fcf5ef2aSThomas Huth 320fcf5ef2aSThomas Huth #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ 321fcf5ef2aSThomas Huth #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ 322fcf5ef2aSThomas Huth #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ 323fcf5ef2aSThomas Huth #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ 324fcf5ef2aSThomas Huth #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ 325fcf5ef2aSThomas Huth #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ 326fcf5ef2aSThomas Huth #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ 327fcf5ef2aSThomas Huth #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ 328fcf5ef2aSThomas Huth #define MCI_STATUS_AR (1ULL<<55) /* Action required */ 329fcf5ef2aSThomas Huth 330fcf5ef2aSThomas Huth /* MISC register defines */ 331fcf5ef2aSThomas Huth #define MCM_ADDR_SEGOFF 0 /* segment offset */ 332fcf5ef2aSThomas Huth #define MCM_ADDR_LINEAR 1 /* linear address */ 333fcf5ef2aSThomas Huth #define MCM_ADDR_PHYS 2 /* physical address */ 334fcf5ef2aSThomas Huth #define MCM_ADDR_MEM 3 /* memory address */ 335fcf5ef2aSThomas Huth #define MCM_ADDR_GENERIC 7 /* generic */ 336fcf5ef2aSThomas Huth 337fcf5ef2aSThomas Huth #define MSR_IA32_TSC 0x10 338fcf5ef2aSThomas Huth #define MSR_IA32_APICBASE 0x1b 339fcf5ef2aSThomas Huth #define MSR_IA32_APICBASE_BSP (1<<8) 340fcf5ef2aSThomas Huth #define MSR_IA32_APICBASE_ENABLE (1<<11) 341fcf5ef2aSThomas Huth #define MSR_IA32_APICBASE_EXTD (1 << 10) 342fcf5ef2aSThomas Huth #define MSR_IA32_APICBASE_BASE (0xfffffU<<12) 343fcf5ef2aSThomas Huth #define MSR_IA32_FEATURE_CONTROL 0x0000003a 344fcf5ef2aSThomas Huth #define MSR_TSC_ADJUST 0x0000003b 345a33a2cfeSPaolo Bonzini #define MSR_IA32_SPEC_CTRL 0x48 346cfeea0c0SKonrad Rzeszutek Wilk #define MSR_VIRT_SSBD 0xc001011f 3478c80c99fSRobert Hoo #define MSR_IA32_PRED_CMD 0x49 348597360c0SXiaoyao Li #define MSR_IA32_CORE_CAPABILITY 0xcf 3498c80c99fSRobert Hoo #define MSR_IA32_ARCH_CAPABILITIES 0x10a 350fcf5ef2aSThomas Huth #define MSR_IA32_TSCDEADLINE 0x6e0 351fcf5ef2aSThomas Huth 352fcf5ef2aSThomas Huth #define FEATURE_CONTROL_LOCKED (1<<0) 353fcf5ef2aSThomas Huth #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) 354fcf5ef2aSThomas Huth #define FEATURE_CONTROL_LMCE (1<<20) 355fcf5ef2aSThomas Huth 356fcf5ef2aSThomas Huth #define MSR_P6_PERFCTR0 0xc1 357fcf5ef2aSThomas Huth 358fcf5ef2aSThomas Huth #define MSR_IA32_SMBASE 0x9e 359e13713dbSLiran Alon #define MSR_SMI_COUNT 0x34 360fcf5ef2aSThomas Huth #define MSR_MTRRcap 0xfe 361fcf5ef2aSThomas Huth #define MSR_MTRRcap_VCNT 8 362fcf5ef2aSThomas Huth #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) 363fcf5ef2aSThomas Huth #define MSR_MTRRcap_WC_SUPPORTED (1 << 10) 364fcf5ef2aSThomas Huth 365fcf5ef2aSThomas Huth #define MSR_IA32_SYSENTER_CS 0x174 366fcf5ef2aSThomas Huth #define MSR_IA32_SYSENTER_ESP 0x175 367fcf5ef2aSThomas Huth #define MSR_IA32_SYSENTER_EIP 0x176 368fcf5ef2aSThomas Huth 369fcf5ef2aSThomas Huth #define MSR_MCG_CAP 0x179 370fcf5ef2aSThomas Huth #define MSR_MCG_STATUS 0x17a 371fcf5ef2aSThomas Huth #define MSR_MCG_CTL 0x17b 372fcf5ef2aSThomas Huth #define MSR_MCG_EXT_CTL 0x4d0 373fcf5ef2aSThomas Huth 374fcf5ef2aSThomas Huth #define MSR_P6_EVNTSEL0 0x186 375fcf5ef2aSThomas Huth 376fcf5ef2aSThomas Huth #define MSR_IA32_PERF_STATUS 0x198 377fcf5ef2aSThomas Huth 378fcf5ef2aSThomas Huth #define MSR_IA32_MISC_ENABLE 0x1a0 379fcf5ef2aSThomas Huth /* Indicates good rep/movs microcode on some processors: */ 380fcf5ef2aSThomas Huth #define MSR_IA32_MISC_ENABLE_DEFAULT 1 3814cfd7babSWanpeng Li #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) 382fcf5ef2aSThomas Huth 383fcf5ef2aSThomas Huth #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) 384fcf5ef2aSThomas Huth #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) 385fcf5ef2aSThomas Huth 386fcf5ef2aSThomas Huth #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2) 387fcf5ef2aSThomas Huth 388fcf5ef2aSThomas Huth #define MSR_MTRRfix64K_00000 0x250 389fcf5ef2aSThomas Huth #define MSR_MTRRfix16K_80000 0x258 390fcf5ef2aSThomas Huth #define MSR_MTRRfix16K_A0000 0x259 391fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_C0000 0x268 392fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_C8000 0x269 393fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_D0000 0x26a 394fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_D8000 0x26b 395fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_E0000 0x26c 396fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_E8000 0x26d 397fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_F0000 0x26e 398fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_F8000 0x26f 399fcf5ef2aSThomas Huth 400fcf5ef2aSThomas Huth #define MSR_PAT 0x277 401fcf5ef2aSThomas Huth 402fcf5ef2aSThomas Huth #define MSR_MTRRdefType 0x2ff 403fcf5ef2aSThomas Huth 404fcf5ef2aSThomas Huth #define MSR_CORE_PERF_FIXED_CTR0 0x309 405fcf5ef2aSThomas Huth #define MSR_CORE_PERF_FIXED_CTR1 0x30a 406fcf5ef2aSThomas Huth #define MSR_CORE_PERF_FIXED_CTR2 0x30b 407fcf5ef2aSThomas Huth #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d 408fcf5ef2aSThomas Huth #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e 409fcf5ef2aSThomas Huth #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f 410fcf5ef2aSThomas Huth #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 411fcf5ef2aSThomas Huth 412fcf5ef2aSThomas Huth #define MSR_MC0_CTL 0x400 413fcf5ef2aSThomas Huth #define MSR_MC0_STATUS 0x401 414fcf5ef2aSThomas Huth #define MSR_MC0_ADDR 0x402 415fcf5ef2aSThomas Huth #define MSR_MC0_MISC 0x403 416fcf5ef2aSThomas Huth 417b77146e9SChao Peng #define MSR_IA32_RTIT_OUTPUT_BASE 0x560 418b77146e9SChao Peng #define MSR_IA32_RTIT_OUTPUT_MASK 0x561 419b77146e9SChao Peng #define MSR_IA32_RTIT_CTL 0x570 420b77146e9SChao Peng #define MSR_IA32_RTIT_STATUS 0x571 421b77146e9SChao Peng #define MSR_IA32_RTIT_CR3_MATCH 0x572 422b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR0_A 0x580 423b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR0_B 0x581 424b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR1_A 0x582 425b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR1_B 0x583 426b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR2_A 0x584 427b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR2_B 0x585 428b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR3_A 0x586 429b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR3_B 0x587 430b77146e9SChao Peng #define MAX_RTIT_ADDRS 8 431b77146e9SChao Peng 432fcf5ef2aSThomas Huth #define MSR_EFER 0xc0000080 433fcf5ef2aSThomas Huth 434fcf5ef2aSThomas Huth #define MSR_EFER_SCE (1 << 0) 435fcf5ef2aSThomas Huth #define MSR_EFER_LME (1 << 8) 436fcf5ef2aSThomas Huth #define MSR_EFER_LMA (1 << 10) 437fcf5ef2aSThomas Huth #define MSR_EFER_NXE (1 << 11) 438fcf5ef2aSThomas Huth #define MSR_EFER_SVME (1 << 12) 439fcf5ef2aSThomas Huth #define MSR_EFER_FFXSR (1 << 14) 440fcf5ef2aSThomas Huth 441fcf5ef2aSThomas Huth #define MSR_STAR 0xc0000081 442fcf5ef2aSThomas Huth #define MSR_LSTAR 0xc0000082 443fcf5ef2aSThomas Huth #define MSR_CSTAR 0xc0000083 444fcf5ef2aSThomas Huth #define MSR_FMASK 0xc0000084 445fcf5ef2aSThomas Huth #define MSR_FSBASE 0xc0000100 446fcf5ef2aSThomas Huth #define MSR_GSBASE 0xc0000101 447fcf5ef2aSThomas Huth #define MSR_KERNELGSBASE 0xc0000102 448fcf5ef2aSThomas Huth #define MSR_TSC_AUX 0xc0000103 449fcf5ef2aSThomas Huth 450fcf5ef2aSThomas Huth #define MSR_VM_HSAVE_PA 0xc0010117 451fcf5ef2aSThomas Huth 452fcf5ef2aSThomas Huth #define MSR_IA32_BNDCFGS 0x00000d90 453fcf5ef2aSThomas Huth #define MSR_IA32_XSS 0x00000da0 454fcf5ef2aSThomas Huth 455fcf5ef2aSThomas Huth #define XSTATE_FP_BIT 0 456fcf5ef2aSThomas Huth #define XSTATE_SSE_BIT 1 457fcf5ef2aSThomas Huth #define XSTATE_YMM_BIT 2 458fcf5ef2aSThomas Huth #define XSTATE_BNDREGS_BIT 3 459fcf5ef2aSThomas Huth #define XSTATE_BNDCSR_BIT 4 460fcf5ef2aSThomas Huth #define XSTATE_OPMASK_BIT 5 461fcf5ef2aSThomas Huth #define XSTATE_ZMM_Hi256_BIT 6 462fcf5ef2aSThomas Huth #define XSTATE_Hi16_ZMM_BIT 7 463fcf5ef2aSThomas Huth #define XSTATE_PKRU_BIT 9 464fcf5ef2aSThomas Huth 465fcf5ef2aSThomas Huth #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT) 466fcf5ef2aSThomas Huth #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT) 467fcf5ef2aSThomas Huth #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT) 468fcf5ef2aSThomas Huth #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT) 469fcf5ef2aSThomas Huth #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT) 470fcf5ef2aSThomas Huth #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT) 471fcf5ef2aSThomas Huth #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT) 472fcf5ef2aSThomas Huth #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) 473fcf5ef2aSThomas Huth #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) 474fcf5ef2aSThomas Huth 475fcf5ef2aSThomas Huth /* CPUID feature words */ 476fcf5ef2aSThomas Huth typedef enum FeatureWord { 477fcf5ef2aSThomas Huth FEAT_1_EDX, /* CPUID[1].EDX */ 478fcf5ef2aSThomas Huth FEAT_1_ECX, /* CPUID[1].ECX */ 479fcf5ef2aSThomas Huth FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ 480fcf5ef2aSThomas Huth FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */ 481fcf5ef2aSThomas Huth FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */ 482fcf5ef2aSThomas Huth FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ 483fcf5ef2aSThomas Huth FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ 484fcf5ef2aSThomas Huth FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ 4851b3420e1SEduardo Habkost FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ 486fcf5ef2aSThomas Huth FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ 487fcf5ef2aSThomas Huth FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ 488be777326SWanpeng Li FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */ 489fcf5ef2aSThomas Huth FEAT_HYPERV_EAX, /* CPUID[4000_0003].EAX */ 490fcf5ef2aSThomas Huth FEAT_HYPERV_EBX, /* CPUID[4000_0003].EBX */ 491fcf5ef2aSThomas Huth FEAT_HYPERV_EDX, /* CPUID[4000_0003].EDX */ 492a2b107dbSVitaly Kuznetsov FEAT_HV_RECOMM_EAX, /* CPUID[4000_0004].EAX */ 493a2b107dbSVitaly Kuznetsov FEAT_HV_NESTED_EAX, /* CPUID[4000_000A].EAX */ 494fcf5ef2aSThomas Huth FEAT_SVM, /* CPUID[8000_000A].EDX */ 495fcf5ef2aSThomas Huth FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */ 496fcf5ef2aSThomas Huth FEAT_6_EAX, /* CPUID[6].EAX */ 497fcf5ef2aSThomas Huth FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */ 498fcf5ef2aSThomas Huth FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */ 499d86f9636SRobert Hoo FEAT_ARCH_CAPABILITIES, 500597360c0SXiaoyao Li FEAT_CORE_CAPABILITY, 501fcf5ef2aSThomas Huth FEATURE_WORDS, 502fcf5ef2aSThomas Huth } FeatureWord; 503fcf5ef2aSThomas Huth 504fcf5ef2aSThomas Huth typedef uint32_t FeatureWordArray[FEATURE_WORDS]; 505fcf5ef2aSThomas Huth 506fcf5ef2aSThomas Huth /* cpuid_features bits */ 507fcf5ef2aSThomas Huth #define CPUID_FP87 (1U << 0) 508fcf5ef2aSThomas Huth #define CPUID_VME (1U << 1) 509fcf5ef2aSThomas Huth #define CPUID_DE (1U << 2) 510fcf5ef2aSThomas Huth #define CPUID_PSE (1U << 3) 511fcf5ef2aSThomas Huth #define CPUID_TSC (1U << 4) 512fcf5ef2aSThomas Huth #define CPUID_MSR (1U << 5) 513fcf5ef2aSThomas Huth #define CPUID_PAE (1U << 6) 514fcf5ef2aSThomas Huth #define CPUID_MCE (1U << 7) 515fcf5ef2aSThomas Huth #define CPUID_CX8 (1U << 8) 516fcf5ef2aSThomas Huth #define CPUID_APIC (1U << 9) 517fcf5ef2aSThomas Huth #define CPUID_SEP (1U << 11) /* sysenter/sysexit */ 518fcf5ef2aSThomas Huth #define CPUID_MTRR (1U << 12) 519fcf5ef2aSThomas Huth #define CPUID_PGE (1U << 13) 520fcf5ef2aSThomas Huth #define CPUID_MCA (1U << 14) 521fcf5ef2aSThomas Huth #define CPUID_CMOV (1U << 15) 522fcf5ef2aSThomas Huth #define CPUID_PAT (1U << 16) 523fcf5ef2aSThomas Huth #define CPUID_PSE36 (1U << 17) 524fcf5ef2aSThomas Huth #define CPUID_PN (1U << 18) 525fcf5ef2aSThomas Huth #define CPUID_CLFLUSH (1U << 19) 526fcf5ef2aSThomas Huth #define CPUID_DTS (1U << 21) 527fcf5ef2aSThomas Huth #define CPUID_ACPI (1U << 22) 528fcf5ef2aSThomas Huth #define CPUID_MMX (1U << 23) 529fcf5ef2aSThomas Huth #define CPUID_FXSR (1U << 24) 530fcf5ef2aSThomas Huth #define CPUID_SSE (1U << 25) 531fcf5ef2aSThomas Huth #define CPUID_SSE2 (1U << 26) 532fcf5ef2aSThomas Huth #define CPUID_SS (1U << 27) 533fcf5ef2aSThomas Huth #define CPUID_HT (1U << 28) 534fcf5ef2aSThomas Huth #define CPUID_TM (1U << 29) 535fcf5ef2aSThomas Huth #define CPUID_IA64 (1U << 30) 536fcf5ef2aSThomas Huth #define CPUID_PBE (1U << 31) 537fcf5ef2aSThomas Huth 538fcf5ef2aSThomas Huth #define CPUID_EXT_SSE3 (1U << 0) 539fcf5ef2aSThomas Huth #define CPUID_EXT_PCLMULQDQ (1U << 1) 540fcf5ef2aSThomas Huth #define CPUID_EXT_DTES64 (1U << 2) 541fcf5ef2aSThomas Huth #define CPUID_EXT_MONITOR (1U << 3) 542fcf5ef2aSThomas Huth #define CPUID_EXT_DSCPL (1U << 4) 543fcf5ef2aSThomas Huth #define CPUID_EXT_VMX (1U << 5) 544fcf5ef2aSThomas Huth #define CPUID_EXT_SMX (1U << 6) 545fcf5ef2aSThomas Huth #define CPUID_EXT_EST (1U << 7) 546fcf5ef2aSThomas Huth #define CPUID_EXT_TM2 (1U << 8) 547fcf5ef2aSThomas Huth #define CPUID_EXT_SSSE3 (1U << 9) 548fcf5ef2aSThomas Huth #define CPUID_EXT_CID (1U << 10) 549fcf5ef2aSThomas Huth #define CPUID_EXT_FMA (1U << 12) 550fcf5ef2aSThomas Huth #define CPUID_EXT_CX16 (1U << 13) 551fcf5ef2aSThomas Huth #define CPUID_EXT_XTPR (1U << 14) 552fcf5ef2aSThomas Huth #define CPUID_EXT_PDCM (1U << 15) 553fcf5ef2aSThomas Huth #define CPUID_EXT_PCID (1U << 17) 554fcf5ef2aSThomas Huth #define CPUID_EXT_DCA (1U << 18) 555fcf5ef2aSThomas Huth #define CPUID_EXT_SSE41 (1U << 19) 556fcf5ef2aSThomas Huth #define CPUID_EXT_SSE42 (1U << 20) 557fcf5ef2aSThomas Huth #define CPUID_EXT_X2APIC (1U << 21) 558fcf5ef2aSThomas Huth #define CPUID_EXT_MOVBE (1U << 22) 559fcf5ef2aSThomas Huth #define CPUID_EXT_POPCNT (1U << 23) 560fcf5ef2aSThomas Huth #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24) 561fcf5ef2aSThomas Huth #define CPUID_EXT_AES (1U << 25) 562fcf5ef2aSThomas Huth #define CPUID_EXT_XSAVE (1U << 26) 563fcf5ef2aSThomas Huth #define CPUID_EXT_OSXSAVE (1U << 27) 564fcf5ef2aSThomas Huth #define CPUID_EXT_AVX (1U << 28) 565fcf5ef2aSThomas Huth #define CPUID_EXT_F16C (1U << 29) 566fcf5ef2aSThomas Huth #define CPUID_EXT_RDRAND (1U << 30) 567fcf5ef2aSThomas Huth #define CPUID_EXT_HYPERVISOR (1U << 31) 568fcf5ef2aSThomas Huth 569fcf5ef2aSThomas Huth #define CPUID_EXT2_FPU (1U << 0) 570fcf5ef2aSThomas Huth #define CPUID_EXT2_VME (1U << 1) 571fcf5ef2aSThomas Huth #define CPUID_EXT2_DE (1U << 2) 572fcf5ef2aSThomas Huth #define CPUID_EXT2_PSE (1U << 3) 573fcf5ef2aSThomas Huth #define CPUID_EXT2_TSC (1U << 4) 574fcf5ef2aSThomas Huth #define CPUID_EXT2_MSR (1U << 5) 575fcf5ef2aSThomas Huth #define CPUID_EXT2_PAE (1U << 6) 576fcf5ef2aSThomas Huth #define CPUID_EXT2_MCE (1U << 7) 577fcf5ef2aSThomas Huth #define CPUID_EXT2_CX8 (1U << 8) 578fcf5ef2aSThomas Huth #define CPUID_EXT2_APIC (1U << 9) 579fcf5ef2aSThomas Huth #define CPUID_EXT2_SYSCALL (1U << 11) 580fcf5ef2aSThomas Huth #define CPUID_EXT2_MTRR (1U << 12) 581fcf5ef2aSThomas Huth #define CPUID_EXT2_PGE (1U << 13) 582fcf5ef2aSThomas Huth #define CPUID_EXT2_MCA (1U << 14) 583fcf5ef2aSThomas Huth #define CPUID_EXT2_CMOV (1U << 15) 584fcf5ef2aSThomas Huth #define CPUID_EXT2_PAT (1U << 16) 585fcf5ef2aSThomas Huth #define CPUID_EXT2_PSE36 (1U << 17) 586fcf5ef2aSThomas Huth #define CPUID_EXT2_MP (1U << 19) 587fcf5ef2aSThomas Huth #define CPUID_EXT2_NX (1U << 20) 588fcf5ef2aSThomas Huth #define CPUID_EXT2_MMXEXT (1U << 22) 589fcf5ef2aSThomas Huth #define CPUID_EXT2_MMX (1U << 23) 590fcf5ef2aSThomas Huth #define CPUID_EXT2_FXSR (1U << 24) 591fcf5ef2aSThomas Huth #define CPUID_EXT2_FFXSR (1U << 25) 592fcf5ef2aSThomas Huth #define CPUID_EXT2_PDPE1GB (1U << 26) 593fcf5ef2aSThomas Huth #define CPUID_EXT2_RDTSCP (1U << 27) 594fcf5ef2aSThomas Huth #define CPUID_EXT2_LM (1U << 29) 595fcf5ef2aSThomas Huth #define CPUID_EXT2_3DNOWEXT (1U << 30) 596fcf5ef2aSThomas Huth #define CPUID_EXT2_3DNOW (1U << 31) 597fcf5ef2aSThomas Huth 598fcf5ef2aSThomas Huth /* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */ 599fcf5ef2aSThomas Huth #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \ 600fcf5ef2aSThomas Huth CPUID_EXT2_DE | CPUID_EXT2_PSE | \ 601fcf5ef2aSThomas Huth CPUID_EXT2_TSC | CPUID_EXT2_MSR | \ 602fcf5ef2aSThomas Huth CPUID_EXT2_PAE | CPUID_EXT2_MCE | \ 603fcf5ef2aSThomas Huth CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \ 604fcf5ef2aSThomas Huth CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \ 605fcf5ef2aSThomas Huth CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \ 606fcf5ef2aSThomas Huth CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \ 607fcf5ef2aSThomas Huth CPUID_EXT2_MMX | CPUID_EXT2_FXSR) 608fcf5ef2aSThomas Huth 609fcf5ef2aSThomas Huth #define CPUID_EXT3_LAHF_LM (1U << 0) 610fcf5ef2aSThomas Huth #define CPUID_EXT3_CMP_LEG (1U << 1) 611fcf5ef2aSThomas Huth #define CPUID_EXT3_SVM (1U << 2) 612fcf5ef2aSThomas Huth #define CPUID_EXT3_EXTAPIC (1U << 3) 613fcf5ef2aSThomas Huth #define CPUID_EXT3_CR8LEG (1U << 4) 614fcf5ef2aSThomas Huth #define CPUID_EXT3_ABM (1U << 5) 615fcf5ef2aSThomas Huth #define CPUID_EXT3_SSE4A (1U << 6) 616fcf5ef2aSThomas Huth #define CPUID_EXT3_MISALIGNSSE (1U << 7) 617fcf5ef2aSThomas Huth #define CPUID_EXT3_3DNOWPREFETCH (1U << 8) 618fcf5ef2aSThomas Huth #define CPUID_EXT3_OSVW (1U << 9) 619fcf5ef2aSThomas Huth #define CPUID_EXT3_IBS (1U << 10) 620fcf5ef2aSThomas Huth #define CPUID_EXT3_XOP (1U << 11) 621fcf5ef2aSThomas Huth #define CPUID_EXT3_SKINIT (1U << 12) 622fcf5ef2aSThomas Huth #define CPUID_EXT3_WDT (1U << 13) 623fcf5ef2aSThomas Huth #define CPUID_EXT3_LWP (1U << 15) 624fcf5ef2aSThomas Huth #define CPUID_EXT3_FMA4 (1U << 16) 625fcf5ef2aSThomas Huth #define CPUID_EXT3_TCE (1U << 17) 626fcf5ef2aSThomas Huth #define CPUID_EXT3_NODEID (1U << 19) 627fcf5ef2aSThomas Huth #define CPUID_EXT3_TBM (1U << 21) 628fcf5ef2aSThomas Huth #define CPUID_EXT3_TOPOEXT (1U << 22) 629fcf5ef2aSThomas Huth #define CPUID_EXT3_PERFCORE (1U << 23) 630fcf5ef2aSThomas Huth #define CPUID_EXT3_PERFNB (1U << 24) 631fcf5ef2aSThomas Huth 632fcf5ef2aSThomas Huth #define CPUID_SVM_NPT (1U << 0) 633fcf5ef2aSThomas Huth #define CPUID_SVM_LBRV (1U << 1) 634fcf5ef2aSThomas Huth #define CPUID_SVM_SVMLOCK (1U << 2) 635fcf5ef2aSThomas Huth #define CPUID_SVM_NRIPSAVE (1U << 3) 636fcf5ef2aSThomas Huth #define CPUID_SVM_TSCSCALE (1U << 4) 637fcf5ef2aSThomas Huth #define CPUID_SVM_VMCBCLEAN (1U << 5) 638fcf5ef2aSThomas Huth #define CPUID_SVM_FLUSHASID (1U << 6) 639fcf5ef2aSThomas Huth #define CPUID_SVM_DECODEASSIST (1U << 7) 640fcf5ef2aSThomas Huth #define CPUID_SVM_PAUSEFILTER (1U << 10) 641fcf5ef2aSThomas Huth #define CPUID_SVM_PFTHRESHOLD (1U << 12) 642fcf5ef2aSThomas Huth 643fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_FSGSBASE (1U << 0) 644fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_BMI1 (1U << 3) 645fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_HLE (1U << 4) 646fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_AVX2 (1U << 5) 647fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_SMEP (1U << 7) 648fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_BMI2 (1U << 8) 649fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_ERMS (1U << 9) 650fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_INVPCID (1U << 10) 651fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_RTM (1U << 11) 652fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_MPX (1U << 14) 653fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_AVX512F (1U << 16) /* AVX-512 Foundation */ 654fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_AVX512DQ (1U << 17) /* AVX-512 Doubleword & Quadword Instrs */ 655fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_RDSEED (1U << 18) 656fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_ADX (1U << 19) 657fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_SMAP (1U << 20) 658fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_AVX512IFMA (1U << 21) /* AVX-512 Integer Fused Multiply Add */ 659fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_PCOMMIT (1U << 22) /* Persistent Commit */ 660fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Flush a Cache Line Optimized */ 661fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_CLWB (1U << 24) /* Cache Line Write Back */ 662e37a5c7fSChao Peng #define CPUID_7_0_EBX_INTEL_PT (1U << 25) /* Intel Processor Trace */ 663fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */ 664fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */ 665fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */ 666638cbd45SYi Sun #define CPUID_7_0_EBX_SHA_NI (1U << 29) /* SHA1/SHA256 Instruction Extensions */ 667fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_AVX512BW (1U << 30) /* AVX-512 Byte and Word Instructions */ 668fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_AVX512VL (1U << 31) /* AVX-512 Vector Length Extensions */ 669fcf5ef2aSThomas Huth 670c97d6d2cSSergio Andres Gomez Del Real #define CPUID_7_0_ECX_AVX512BMI (1U << 1) 671fcf5ef2aSThomas Huth #define CPUID_7_0_ECX_VBMI (1U << 1) /* AVX-512 Vector Byte Manipulation Instrs */ 672fcf5ef2aSThomas Huth #define CPUID_7_0_ECX_UMIP (1U << 2) 673fcf5ef2aSThomas Huth #define CPUID_7_0_ECX_PKU (1U << 3) 674fcf5ef2aSThomas Huth #define CPUID_7_0_ECX_OSPKE (1U << 4) 675aff9e6e4SYang Zhong #define CPUID_7_0_ECX_VBMI2 (1U << 6) /* Additional VBMI Instrs */ 676aff9e6e4SYang Zhong #define CPUID_7_0_ECX_GFNI (1U << 8) 677aff9e6e4SYang Zhong #define CPUID_7_0_ECX_VAES (1U << 9) 678aff9e6e4SYang Zhong #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10) 679aff9e6e4SYang Zhong #define CPUID_7_0_ECX_AVX512VNNI (1U << 11) 680aff9e6e4SYang Zhong #define CPUID_7_0_ECX_AVX512BITALG (1U << 12) 681f7754377SHe Chen #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) /* POPCNT for vectors of DW/QW */ 6826c7c3c21SKirill A. Shutemov #define CPUID_7_0_ECX_LA57 (1U << 16) 683fcf5ef2aSThomas Huth #define CPUID_7_0_ECX_RDPID (1U << 22) 6840da0fb06SJingqi Liu #define CPUID_7_0_ECX_CLDEMOTE (1U << 25) /* CLDEMOTE Instruction */ 68524261de4SLiu Jingqi #define CPUID_7_0_ECX_MOVDIRI (1U << 27) /* MOVDIRI Instruction */ 6861c65775fSLiu Jingqi #define CPUID_7_0_ECX_MOVDIR64B (1U << 28) /* MOVDIR64B Instruction */ 687fcf5ef2aSThomas Huth 688fcf5ef2aSThomas Huth #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) /* AVX512 Neural Network Instructions */ 689fcf5ef2aSThomas Huth #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) /* AVX512 Multiply Accumulation Single Precision */ 690a2381f09SEduardo Habkost #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Speculation Control */ 6913fc7c731SRobert Hoo #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) /*Arch Capabilities*/ 692597360c0SXiaoyao Li #define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30) /*Core Capability*/ 693d19d1f96SDaniel P. Berrangé #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) /* Speculative Store Bypass Disable */ 694fcf5ef2aSThomas Huth 69559a80a19SRobert Hoo #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) /* Write back and 69659a80a19SRobert Hoo do not invalidate cache */ 6971b3420e1SEduardo Habkost #define CPUID_8000_0008_EBX_IBPB (1U << 12) /* Indirect Branch Prediction Barrier */ 6981b3420e1SEduardo Habkost 699fcf5ef2aSThomas Huth #define CPUID_XSAVE_XSAVEOPT (1U << 0) 700fcf5ef2aSThomas Huth #define CPUID_XSAVE_XSAVEC (1U << 1) 701fcf5ef2aSThomas Huth #define CPUID_XSAVE_XGETBV1 (1U << 2) 702fcf5ef2aSThomas Huth #define CPUID_XSAVE_XSAVES (1U << 3) 703fcf5ef2aSThomas Huth 704fcf5ef2aSThomas Huth #define CPUID_6_EAX_ARAT (1U << 2) 705fcf5ef2aSThomas Huth 706fcf5ef2aSThomas Huth /* CPUID[0x80000007].EDX flags: */ 707fcf5ef2aSThomas Huth #define CPUID_APM_INVTSC (1U << 8) 708fcf5ef2aSThomas Huth 709fcf5ef2aSThomas Huth #define CPUID_VENDOR_SZ 12 710fcf5ef2aSThomas Huth 711fcf5ef2aSThomas Huth #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ 712fcf5ef2aSThomas Huth #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ 713fcf5ef2aSThomas Huth #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ 714fcf5ef2aSThomas Huth #define CPUID_VENDOR_INTEL "GenuineIntel" 715fcf5ef2aSThomas Huth 716fcf5ef2aSThomas Huth #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ 717fcf5ef2aSThomas Huth #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ 718fcf5ef2aSThomas Huth #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ 719fcf5ef2aSThomas Huth #define CPUID_VENDOR_AMD "AuthenticAMD" 720fcf5ef2aSThomas Huth 721fcf5ef2aSThomas Huth #define CPUID_VENDOR_VIA "CentaurHauls" 722fcf5ef2aSThomas Huth 7238d031cecSPu Wen #define CPUID_VENDOR_HYGON "HygonGenuine" 7248d031cecSPu Wen 72518ab37baSLiran Alon #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ 72618ab37baSLiran Alon (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ 72718ab37baSLiran Alon (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) 72818ab37baSLiran Alon #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ 72918ab37baSLiran Alon (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ 73018ab37baSLiran Alon (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) 73118ab37baSLiran Alon 732fcf5ef2aSThomas Huth #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */ 733fcf5ef2aSThomas Huth #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */ 734fcf5ef2aSThomas Huth 735fcf5ef2aSThomas Huth /* CPUID[0xB].ECX level types */ 736fcf5ef2aSThomas Huth #define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8) 737fcf5ef2aSThomas Huth #define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8) 738fcf5ef2aSThomas Huth #define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8) 739a94e1428SLike Xu #define CPUID_TOPOLOGY_LEVEL_DIE (5U << 8) 740fcf5ef2aSThomas Huth 741d86f9636SRobert Hoo /* MSR Feature Bits */ 742d86f9636SRobert Hoo #define MSR_ARCH_CAP_RDCL_NO (1U << 0) 743d86f9636SRobert Hoo #define MSR_ARCH_CAP_IBRS_ALL (1U << 1) 744d86f9636SRobert Hoo #define MSR_ARCH_CAP_RSBA (1U << 2) 745d86f9636SRobert Hoo #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3) 746d86f9636SRobert Hoo #define MSR_ARCH_CAP_SSB_NO (1U << 4) 747d86f9636SRobert Hoo 748597360c0SXiaoyao Li #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5) 749597360c0SXiaoyao Li 7502d384d7cSVitaly Kuznetsov /* Supported Hyper-V Enlightenments */ 7512d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_RELAXED 0 7522d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_VAPIC 1 7532d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_TIME 2 7542d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_CRASH 3 7552d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_RESET 4 7562d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_VPINDEX 5 7572d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_RUNTIME 6 7582d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_SYNIC 7 7592d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_STIMER 8 7602d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_FREQUENCIES 9 7612d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_REENLIGHTENMENT 10 7622d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_TLBFLUSH 11 7632d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_EVMCS 12 7642d384d7cSVitaly Kuznetsov #define HYPERV_FEAT_IPI 13 765128531d9SVitaly Kuznetsov #define HYPERV_FEAT_STIMER_DIRECT 14 7662d384d7cSVitaly Kuznetsov 767fcf5ef2aSThomas Huth #ifndef HYPERV_SPINLOCK_NEVER_RETRY 768fcf5ef2aSThomas Huth #define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF 769fcf5ef2aSThomas Huth #endif 770fcf5ef2aSThomas Huth 771fcf5ef2aSThomas Huth #define EXCP00_DIVZ 0 772fcf5ef2aSThomas Huth #define EXCP01_DB 1 773fcf5ef2aSThomas Huth #define EXCP02_NMI 2 774fcf5ef2aSThomas Huth #define EXCP03_INT3 3 775fcf5ef2aSThomas Huth #define EXCP04_INTO 4 776fcf5ef2aSThomas Huth #define EXCP05_BOUND 5 777fcf5ef2aSThomas Huth #define EXCP06_ILLOP 6 778fcf5ef2aSThomas Huth #define EXCP07_PREX 7 779fcf5ef2aSThomas Huth #define EXCP08_DBLE 8 780fcf5ef2aSThomas Huth #define EXCP09_XERR 9 781fcf5ef2aSThomas Huth #define EXCP0A_TSS 10 782fcf5ef2aSThomas Huth #define EXCP0B_NOSEG 11 783fcf5ef2aSThomas Huth #define EXCP0C_STACK 12 784fcf5ef2aSThomas Huth #define EXCP0D_GPF 13 785fcf5ef2aSThomas Huth #define EXCP0E_PAGE 14 786fcf5ef2aSThomas Huth #define EXCP10_COPR 16 787fcf5ef2aSThomas Huth #define EXCP11_ALGN 17 788fcf5ef2aSThomas Huth #define EXCP12_MCHK 18 789fcf5ef2aSThomas Huth 790fcf5ef2aSThomas Huth #define EXCP_SYSCALL 0x100 /* only happens in user only emulation 791fcf5ef2aSThomas Huth for syscall instruction */ 79210cde894SPaolo Bonzini #define EXCP_VMEXIT 0x100 793fcf5ef2aSThomas Huth 794fcf5ef2aSThomas Huth /* i386-specific interrupt pending bits. */ 795fcf5ef2aSThomas Huth #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1 796fcf5ef2aSThomas Huth #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 797fcf5ef2aSThomas Huth #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 798fcf5ef2aSThomas Huth #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 799fcf5ef2aSThomas Huth #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 800fcf5ef2aSThomas Huth #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1 801fcf5ef2aSThomas Huth #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2 802fcf5ef2aSThomas Huth 803fcf5ef2aSThomas Huth /* Use a clearer name for this. */ 804fcf5ef2aSThomas Huth #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET 805fcf5ef2aSThomas Huth 806fcf5ef2aSThomas Huth /* Instead of computing the condition codes after each x86 instruction, 807fcf5ef2aSThomas Huth * QEMU just stores one operand (called CC_SRC), the result 808fcf5ef2aSThomas Huth * (called CC_DST) and the type of operation (called CC_OP). When the 809fcf5ef2aSThomas Huth * condition codes are needed, the condition codes can be calculated 810fcf5ef2aSThomas Huth * using this information. Condition codes are not generated if they 811fcf5ef2aSThomas Huth * are only needed for conditional branches. 812fcf5ef2aSThomas Huth */ 813fcf5ef2aSThomas Huth typedef enum { 814fcf5ef2aSThomas Huth CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ 815fcf5ef2aSThomas Huth CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ 816fcf5ef2aSThomas Huth 817fcf5ef2aSThomas Huth CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ 818fcf5ef2aSThomas Huth CC_OP_MULW, 819fcf5ef2aSThomas Huth CC_OP_MULL, 820fcf5ef2aSThomas Huth CC_OP_MULQ, 821fcf5ef2aSThomas Huth 822fcf5ef2aSThomas Huth CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 823fcf5ef2aSThomas Huth CC_OP_ADDW, 824fcf5ef2aSThomas Huth CC_OP_ADDL, 825fcf5ef2aSThomas Huth CC_OP_ADDQ, 826fcf5ef2aSThomas Huth 827fcf5ef2aSThomas Huth CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 828fcf5ef2aSThomas Huth CC_OP_ADCW, 829fcf5ef2aSThomas Huth CC_OP_ADCL, 830fcf5ef2aSThomas Huth CC_OP_ADCQ, 831fcf5ef2aSThomas Huth 832fcf5ef2aSThomas Huth CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 833fcf5ef2aSThomas Huth CC_OP_SUBW, 834fcf5ef2aSThomas Huth CC_OP_SUBL, 835fcf5ef2aSThomas Huth CC_OP_SUBQ, 836fcf5ef2aSThomas Huth 837fcf5ef2aSThomas Huth CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 838fcf5ef2aSThomas Huth CC_OP_SBBW, 839fcf5ef2aSThomas Huth CC_OP_SBBL, 840fcf5ef2aSThomas Huth CC_OP_SBBQ, 841fcf5ef2aSThomas Huth 842fcf5ef2aSThomas Huth CC_OP_LOGICB, /* modify all flags, CC_DST = res */ 843fcf5ef2aSThomas Huth CC_OP_LOGICW, 844fcf5ef2aSThomas Huth CC_OP_LOGICL, 845fcf5ef2aSThomas Huth CC_OP_LOGICQ, 846fcf5ef2aSThomas Huth 847fcf5ef2aSThomas Huth CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ 848fcf5ef2aSThomas Huth CC_OP_INCW, 849fcf5ef2aSThomas Huth CC_OP_INCL, 850fcf5ef2aSThomas Huth CC_OP_INCQ, 851fcf5ef2aSThomas Huth 852fcf5ef2aSThomas Huth CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ 853fcf5ef2aSThomas Huth CC_OP_DECW, 854fcf5ef2aSThomas Huth CC_OP_DECL, 855fcf5ef2aSThomas Huth CC_OP_DECQ, 856fcf5ef2aSThomas Huth 857fcf5ef2aSThomas Huth CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ 858fcf5ef2aSThomas Huth CC_OP_SHLW, 859fcf5ef2aSThomas Huth CC_OP_SHLL, 860fcf5ef2aSThomas Huth CC_OP_SHLQ, 861fcf5ef2aSThomas Huth 862fcf5ef2aSThomas Huth CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ 863fcf5ef2aSThomas Huth CC_OP_SARW, 864fcf5ef2aSThomas Huth CC_OP_SARL, 865fcf5ef2aSThomas Huth CC_OP_SARQ, 866fcf5ef2aSThomas Huth 867fcf5ef2aSThomas Huth CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */ 868fcf5ef2aSThomas Huth CC_OP_BMILGW, 869fcf5ef2aSThomas Huth CC_OP_BMILGL, 870fcf5ef2aSThomas Huth CC_OP_BMILGQ, 871fcf5ef2aSThomas Huth 872fcf5ef2aSThomas Huth CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */ 873fcf5ef2aSThomas Huth CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */ 874fcf5ef2aSThomas Huth CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ 875fcf5ef2aSThomas Huth 876fcf5ef2aSThomas Huth CC_OP_CLR, /* Z set, all other flags clear. */ 8774885c3c4SRichard Henderson CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */ 878fcf5ef2aSThomas Huth 879fcf5ef2aSThomas Huth CC_OP_NB, 880fcf5ef2aSThomas Huth } CCOp; 881fcf5ef2aSThomas Huth 882fcf5ef2aSThomas Huth typedef struct SegmentCache { 883fcf5ef2aSThomas Huth uint32_t selector; 884fcf5ef2aSThomas Huth target_ulong base; 885fcf5ef2aSThomas Huth uint32_t limit; 886fcf5ef2aSThomas Huth uint32_t flags; 887fcf5ef2aSThomas Huth } SegmentCache; 888fcf5ef2aSThomas Huth 889fcf5ef2aSThomas Huth #define MMREG_UNION(n, bits) \ 890fcf5ef2aSThomas Huth union n { \ 891fcf5ef2aSThomas Huth uint8_t _b_##n[(bits)/8]; \ 892fcf5ef2aSThomas Huth uint16_t _w_##n[(bits)/16]; \ 893fcf5ef2aSThomas Huth uint32_t _l_##n[(bits)/32]; \ 894fcf5ef2aSThomas Huth uint64_t _q_##n[(bits)/64]; \ 895fcf5ef2aSThomas Huth float32 _s_##n[(bits)/32]; \ 896fcf5ef2aSThomas Huth float64 _d_##n[(bits)/64]; \ 897fcf5ef2aSThomas Huth } 898fcf5ef2aSThomas Huth 899c97d6d2cSSergio Andres Gomez Del Real typedef union { 900c97d6d2cSSergio Andres Gomez Del Real uint8_t _b[16]; 901c97d6d2cSSergio Andres Gomez Del Real uint16_t _w[8]; 902c97d6d2cSSergio Andres Gomez Del Real uint32_t _l[4]; 903c97d6d2cSSergio Andres Gomez Del Real uint64_t _q[2]; 904c97d6d2cSSergio Andres Gomez Del Real } XMMReg; 905c97d6d2cSSergio Andres Gomez Del Real 906c97d6d2cSSergio Andres Gomez Del Real typedef union { 907c97d6d2cSSergio Andres Gomez Del Real uint8_t _b[32]; 908c97d6d2cSSergio Andres Gomez Del Real uint16_t _w[16]; 909c97d6d2cSSergio Andres Gomez Del Real uint32_t _l[8]; 910c97d6d2cSSergio Andres Gomez Del Real uint64_t _q[4]; 911c97d6d2cSSergio Andres Gomez Del Real } YMMReg; 912c97d6d2cSSergio Andres Gomez Del Real 913fcf5ef2aSThomas Huth typedef MMREG_UNION(ZMMReg, 512) ZMMReg; 914fcf5ef2aSThomas Huth typedef MMREG_UNION(MMXReg, 64) MMXReg; 915fcf5ef2aSThomas Huth 916fcf5ef2aSThomas Huth typedef struct BNDReg { 917fcf5ef2aSThomas Huth uint64_t lb; 918fcf5ef2aSThomas Huth uint64_t ub; 919fcf5ef2aSThomas Huth } BNDReg; 920fcf5ef2aSThomas Huth 921fcf5ef2aSThomas Huth typedef struct BNDCSReg { 922fcf5ef2aSThomas Huth uint64_t cfgu; 923fcf5ef2aSThomas Huth uint64_t sts; 924fcf5ef2aSThomas Huth } BNDCSReg; 925fcf5ef2aSThomas Huth 926fcf5ef2aSThomas Huth #define BNDCFG_ENABLE 1ULL 927fcf5ef2aSThomas Huth #define BNDCFG_BNDPRESERVE 2ULL 928fcf5ef2aSThomas Huth #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK 929fcf5ef2aSThomas Huth 930fcf5ef2aSThomas Huth #ifdef HOST_WORDS_BIGENDIAN 931fcf5ef2aSThomas Huth #define ZMM_B(n) _b_ZMMReg[63 - (n)] 932fcf5ef2aSThomas Huth #define ZMM_W(n) _w_ZMMReg[31 - (n)] 933fcf5ef2aSThomas Huth #define ZMM_L(n) _l_ZMMReg[15 - (n)] 934fcf5ef2aSThomas Huth #define ZMM_S(n) _s_ZMMReg[15 - (n)] 935fcf5ef2aSThomas Huth #define ZMM_Q(n) _q_ZMMReg[7 - (n)] 936fcf5ef2aSThomas Huth #define ZMM_D(n) _d_ZMMReg[7 - (n)] 937fcf5ef2aSThomas Huth 938fcf5ef2aSThomas Huth #define MMX_B(n) _b_MMXReg[7 - (n)] 939fcf5ef2aSThomas Huth #define MMX_W(n) _w_MMXReg[3 - (n)] 940fcf5ef2aSThomas Huth #define MMX_L(n) _l_MMXReg[1 - (n)] 941fcf5ef2aSThomas Huth #define MMX_S(n) _s_MMXReg[1 - (n)] 942fcf5ef2aSThomas Huth #else 943fcf5ef2aSThomas Huth #define ZMM_B(n) _b_ZMMReg[n] 944fcf5ef2aSThomas Huth #define ZMM_W(n) _w_ZMMReg[n] 945fcf5ef2aSThomas Huth #define ZMM_L(n) _l_ZMMReg[n] 946fcf5ef2aSThomas Huth #define ZMM_S(n) _s_ZMMReg[n] 947fcf5ef2aSThomas Huth #define ZMM_Q(n) _q_ZMMReg[n] 948fcf5ef2aSThomas Huth #define ZMM_D(n) _d_ZMMReg[n] 949fcf5ef2aSThomas Huth 950fcf5ef2aSThomas Huth #define MMX_B(n) _b_MMXReg[n] 951fcf5ef2aSThomas Huth #define MMX_W(n) _w_MMXReg[n] 952fcf5ef2aSThomas Huth #define MMX_L(n) _l_MMXReg[n] 953fcf5ef2aSThomas Huth #define MMX_S(n) _s_MMXReg[n] 954fcf5ef2aSThomas Huth #endif 955fcf5ef2aSThomas Huth #define MMX_Q(n) _q_MMXReg[n] 956fcf5ef2aSThomas Huth 957fcf5ef2aSThomas Huth typedef union { 958fcf5ef2aSThomas Huth floatx80 d __attribute__((aligned(16))); 959fcf5ef2aSThomas Huth MMXReg mmx; 960fcf5ef2aSThomas Huth } FPReg; 961fcf5ef2aSThomas Huth 962fcf5ef2aSThomas Huth typedef struct { 963fcf5ef2aSThomas Huth uint64_t base; 964fcf5ef2aSThomas Huth uint64_t mask; 965fcf5ef2aSThomas Huth } MTRRVar; 966fcf5ef2aSThomas Huth 967fcf5ef2aSThomas Huth #define CPU_NB_REGS64 16 968fcf5ef2aSThomas Huth #define CPU_NB_REGS32 8 969fcf5ef2aSThomas Huth 970fcf5ef2aSThomas Huth #ifdef TARGET_X86_64 971fcf5ef2aSThomas Huth #define CPU_NB_REGS CPU_NB_REGS64 972fcf5ef2aSThomas Huth #else 973fcf5ef2aSThomas Huth #define CPU_NB_REGS CPU_NB_REGS32 974fcf5ef2aSThomas Huth #endif 975fcf5ef2aSThomas Huth 976fcf5ef2aSThomas Huth #define MAX_FIXED_COUNTERS 3 977fcf5ef2aSThomas Huth #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) 978fcf5ef2aSThomas Huth 979fcf5ef2aSThomas Huth #define TARGET_INSN_START_EXTRA_WORDS 1 980fcf5ef2aSThomas Huth 981fcf5ef2aSThomas Huth #define NB_OPMASK_REGS 8 982fcf5ef2aSThomas Huth 983fcf5ef2aSThomas Huth /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish 984fcf5ef2aSThomas Huth * that APIC ID hasn't been set yet 985fcf5ef2aSThomas Huth */ 986fcf5ef2aSThomas Huth #define UNASSIGNED_APIC_ID 0xFFFFFFFF 987fcf5ef2aSThomas Huth 988fcf5ef2aSThomas Huth typedef union X86LegacyXSaveArea { 989fcf5ef2aSThomas Huth struct { 990fcf5ef2aSThomas Huth uint16_t fcw; 991fcf5ef2aSThomas Huth uint16_t fsw; 992fcf5ef2aSThomas Huth uint8_t ftw; 993fcf5ef2aSThomas Huth uint8_t reserved; 994fcf5ef2aSThomas Huth uint16_t fpop; 995fcf5ef2aSThomas Huth uint64_t fpip; 996fcf5ef2aSThomas Huth uint64_t fpdp; 997fcf5ef2aSThomas Huth uint32_t mxcsr; 998fcf5ef2aSThomas Huth uint32_t mxcsr_mask; 999fcf5ef2aSThomas Huth FPReg fpregs[8]; 1000fcf5ef2aSThomas Huth uint8_t xmm_regs[16][16]; 1001fcf5ef2aSThomas Huth }; 1002fcf5ef2aSThomas Huth uint8_t data[512]; 1003fcf5ef2aSThomas Huth } X86LegacyXSaveArea; 1004fcf5ef2aSThomas Huth 1005fcf5ef2aSThomas Huth typedef struct X86XSaveHeader { 1006fcf5ef2aSThomas Huth uint64_t xstate_bv; 1007fcf5ef2aSThomas Huth uint64_t xcomp_bv; 1008fcf5ef2aSThomas Huth uint64_t reserve0; 1009fcf5ef2aSThomas Huth uint8_t reserved[40]; 1010fcf5ef2aSThomas Huth } X86XSaveHeader; 1011fcf5ef2aSThomas Huth 1012fcf5ef2aSThomas Huth /* Ext. save area 2: AVX State */ 1013fcf5ef2aSThomas Huth typedef struct XSaveAVX { 1014fcf5ef2aSThomas Huth uint8_t ymmh[16][16]; 1015fcf5ef2aSThomas Huth } XSaveAVX; 1016fcf5ef2aSThomas Huth 1017fcf5ef2aSThomas Huth /* Ext. save area 3: BNDREG */ 1018fcf5ef2aSThomas Huth typedef struct XSaveBNDREG { 1019fcf5ef2aSThomas Huth BNDReg bnd_regs[4]; 1020fcf5ef2aSThomas Huth } XSaveBNDREG; 1021fcf5ef2aSThomas Huth 1022fcf5ef2aSThomas Huth /* Ext. save area 4: BNDCSR */ 1023fcf5ef2aSThomas Huth typedef union XSaveBNDCSR { 1024fcf5ef2aSThomas Huth BNDCSReg bndcsr; 1025fcf5ef2aSThomas Huth uint8_t data[64]; 1026fcf5ef2aSThomas Huth } XSaveBNDCSR; 1027fcf5ef2aSThomas Huth 1028fcf5ef2aSThomas Huth /* Ext. save area 5: Opmask */ 1029fcf5ef2aSThomas Huth typedef struct XSaveOpmask { 1030fcf5ef2aSThomas Huth uint64_t opmask_regs[NB_OPMASK_REGS]; 1031fcf5ef2aSThomas Huth } XSaveOpmask; 1032fcf5ef2aSThomas Huth 1033fcf5ef2aSThomas Huth /* Ext. save area 6: ZMM_Hi256 */ 1034fcf5ef2aSThomas Huth typedef struct XSaveZMM_Hi256 { 1035fcf5ef2aSThomas Huth uint8_t zmm_hi256[16][32]; 1036fcf5ef2aSThomas Huth } XSaveZMM_Hi256; 1037fcf5ef2aSThomas Huth 1038fcf5ef2aSThomas Huth /* Ext. save area 7: Hi16_ZMM */ 1039fcf5ef2aSThomas Huth typedef struct XSaveHi16_ZMM { 1040fcf5ef2aSThomas Huth uint8_t hi16_zmm[16][64]; 1041fcf5ef2aSThomas Huth } XSaveHi16_ZMM; 1042fcf5ef2aSThomas Huth 1043fcf5ef2aSThomas Huth /* Ext. save area 9: PKRU state */ 1044fcf5ef2aSThomas Huth typedef struct XSavePKRU { 1045fcf5ef2aSThomas Huth uint32_t pkru; 1046fcf5ef2aSThomas Huth uint32_t padding; 1047fcf5ef2aSThomas Huth } XSavePKRU; 1048fcf5ef2aSThomas Huth 1049fcf5ef2aSThomas Huth typedef struct X86XSaveArea { 1050fcf5ef2aSThomas Huth X86LegacyXSaveArea legacy; 1051fcf5ef2aSThomas Huth X86XSaveHeader header; 1052fcf5ef2aSThomas Huth 1053fcf5ef2aSThomas Huth /* Extended save areas: */ 1054fcf5ef2aSThomas Huth 1055fcf5ef2aSThomas Huth /* AVX State: */ 1056fcf5ef2aSThomas Huth XSaveAVX avx_state; 1057fcf5ef2aSThomas Huth uint8_t padding[960 - 576 - sizeof(XSaveAVX)]; 1058fcf5ef2aSThomas Huth /* MPX State: */ 1059fcf5ef2aSThomas Huth XSaveBNDREG bndreg_state; 1060fcf5ef2aSThomas Huth XSaveBNDCSR bndcsr_state; 1061fcf5ef2aSThomas Huth /* AVX-512 State: */ 1062fcf5ef2aSThomas Huth XSaveOpmask opmask_state; 1063fcf5ef2aSThomas Huth XSaveZMM_Hi256 zmm_hi256_state; 1064fcf5ef2aSThomas Huth XSaveHi16_ZMM hi16_zmm_state; 1065fcf5ef2aSThomas Huth /* PKRU State: */ 1066fcf5ef2aSThomas Huth XSavePKRU pkru_state; 1067fcf5ef2aSThomas Huth } X86XSaveArea; 1068fcf5ef2aSThomas Huth 1069fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != 0x240); 1070fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100); 1071fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != 0x3c0); 1072fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40); 1073fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != 0x400); 1074fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40); 1075fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != 0x440); 1076fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40); 1077fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != 0x480); 1078fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200); 1079fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != 0x680); 1080fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400); 1081fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != 0xA80); 1082fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8); 1083fcf5ef2aSThomas Huth 1084fcf5ef2aSThomas Huth typedef enum TPRAccess { 1085fcf5ef2aSThomas Huth TPR_ACCESS_READ, 1086fcf5ef2aSThomas Huth TPR_ACCESS_WRITE, 1087fcf5ef2aSThomas Huth } TPRAccess; 1088fcf5ef2aSThomas Huth 10897e3482f8SEduardo Habkost /* Cache information data structures: */ 10907e3482f8SEduardo Habkost 10917e3482f8SEduardo Habkost enum CacheType { 10925f00335aSEduardo Habkost DATA_CACHE, 10935f00335aSEduardo Habkost INSTRUCTION_CACHE, 10947e3482f8SEduardo Habkost UNIFIED_CACHE 10957e3482f8SEduardo Habkost }; 10967e3482f8SEduardo Habkost 10977e3482f8SEduardo Habkost typedef struct CPUCacheInfo { 10987e3482f8SEduardo Habkost enum CacheType type; 10997e3482f8SEduardo Habkost uint8_t level; 11007e3482f8SEduardo Habkost /* Size in bytes */ 11017e3482f8SEduardo Habkost uint32_t size; 11027e3482f8SEduardo Habkost /* Line size, in bytes */ 11037e3482f8SEduardo Habkost uint16_t line_size; 11047e3482f8SEduardo Habkost /* 11057e3482f8SEduardo Habkost * Associativity. 11067e3482f8SEduardo Habkost * Note: representation of fully-associative caches is not implemented 11077e3482f8SEduardo Habkost */ 11087e3482f8SEduardo Habkost uint8_t associativity; 11097e3482f8SEduardo Habkost /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */ 11107e3482f8SEduardo Habkost uint8_t partitions; 11117e3482f8SEduardo Habkost /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */ 11127e3482f8SEduardo Habkost uint32_t sets; 11137e3482f8SEduardo Habkost /* 11147e3482f8SEduardo Habkost * Lines per tag. 11157e3482f8SEduardo Habkost * AMD-specific: CPUID[0x80000005], CPUID[0x80000006]. 11167e3482f8SEduardo Habkost * (Is this synonym to @partitions?) 11177e3482f8SEduardo Habkost */ 11187e3482f8SEduardo Habkost uint8_t lines_per_tag; 11197e3482f8SEduardo Habkost 11207e3482f8SEduardo Habkost /* Self-initializing cache */ 11217e3482f8SEduardo Habkost bool self_init; 11227e3482f8SEduardo Habkost /* 11237e3482f8SEduardo Habkost * WBINVD/INVD is not guaranteed to act upon lower level caches of 11247e3482f8SEduardo Habkost * non-originating threads sharing this cache. 11257e3482f8SEduardo Habkost * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0] 11267e3482f8SEduardo Habkost */ 11277e3482f8SEduardo Habkost bool no_invd_sharing; 11287e3482f8SEduardo Habkost /* 11297e3482f8SEduardo Habkost * Cache is inclusive of lower cache levels. 11307e3482f8SEduardo Habkost * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1]. 11317e3482f8SEduardo Habkost */ 11327e3482f8SEduardo Habkost bool inclusive; 11337e3482f8SEduardo Habkost /* 11347e3482f8SEduardo Habkost * A complex function is used to index the cache, potentially using all 11357e3482f8SEduardo Habkost * address bits. CPUID[4].EDX[bit 2]. 11367e3482f8SEduardo Habkost */ 11377e3482f8SEduardo Habkost bool complex_indexing; 11387e3482f8SEduardo Habkost } CPUCacheInfo; 11397e3482f8SEduardo Habkost 11407e3482f8SEduardo Habkost 11416aaeb054SBabu Moger typedef struct CPUCaches { 1142a9f27ea9SEduardo Habkost CPUCacheInfo *l1d_cache; 1143a9f27ea9SEduardo Habkost CPUCacheInfo *l1i_cache; 1144a9f27ea9SEduardo Habkost CPUCacheInfo *l2_cache; 1145a9f27ea9SEduardo Habkost CPUCacheInfo *l3_cache; 11466aaeb054SBabu Moger } CPUCaches; 11477e3482f8SEduardo Habkost 1148fcf5ef2aSThomas Huth typedef struct CPUX86State { 1149fcf5ef2aSThomas Huth /* standard registers */ 1150fcf5ef2aSThomas Huth target_ulong regs[CPU_NB_REGS]; 1151fcf5ef2aSThomas Huth target_ulong eip; 1152fcf5ef2aSThomas Huth target_ulong eflags; /* eflags register. During CPU emulation, CC 1153fcf5ef2aSThomas Huth flags and DF are set to zero because they are 1154fcf5ef2aSThomas Huth stored elsewhere */ 1155fcf5ef2aSThomas Huth 1156fcf5ef2aSThomas Huth /* emulator internal eflags handling */ 1157fcf5ef2aSThomas Huth target_ulong cc_dst; 1158fcf5ef2aSThomas Huth target_ulong cc_src; 1159fcf5ef2aSThomas Huth target_ulong cc_src2; 1160fcf5ef2aSThomas Huth uint32_t cc_op; 1161fcf5ef2aSThomas Huth int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ 1162fcf5ef2aSThomas Huth uint32_t hflags; /* TB flags, see HF_xxx constants. These flags 1163fcf5ef2aSThomas Huth are known at translation time. */ 1164fcf5ef2aSThomas Huth uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ 1165fcf5ef2aSThomas Huth 1166fcf5ef2aSThomas Huth /* segments */ 1167fcf5ef2aSThomas Huth SegmentCache segs[6]; /* selector values */ 1168fcf5ef2aSThomas Huth SegmentCache ldt; 1169fcf5ef2aSThomas Huth SegmentCache tr; 1170fcf5ef2aSThomas Huth SegmentCache gdt; /* only base and limit are used */ 1171fcf5ef2aSThomas Huth SegmentCache idt; /* only base and limit are used */ 1172fcf5ef2aSThomas Huth 1173fcf5ef2aSThomas Huth target_ulong cr[5]; /* NOTE: cr1 is unused */ 1174fcf5ef2aSThomas Huth int32_t a20_mask; 1175fcf5ef2aSThomas Huth 1176fcf5ef2aSThomas Huth BNDReg bnd_regs[4]; 1177fcf5ef2aSThomas Huth BNDCSReg bndcs_regs; 1178fcf5ef2aSThomas Huth uint64_t msr_bndcfgs; 1179fcf5ef2aSThomas Huth uint64_t efer; 1180fcf5ef2aSThomas Huth 1181fcf5ef2aSThomas Huth /* Beginning of state preserved by INIT (dummy marker). */ 1182fcf5ef2aSThomas Huth struct {} start_init_save; 1183fcf5ef2aSThomas Huth 1184fcf5ef2aSThomas Huth /* FPU state */ 1185fcf5ef2aSThomas Huth unsigned int fpstt; /* top of stack index */ 1186fcf5ef2aSThomas Huth uint16_t fpus; 1187fcf5ef2aSThomas Huth uint16_t fpuc; 1188fcf5ef2aSThomas Huth uint8_t fptags[8]; /* 0 = valid, 1 = empty */ 1189fcf5ef2aSThomas Huth FPReg fpregs[8]; 1190fcf5ef2aSThomas Huth /* KVM-only so far */ 1191fcf5ef2aSThomas Huth uint16_t fpop; 1192fcf5ef2aSThomas Huth uint64_t fpip; 1193fcf5ef2aSThomas Huth uint64_t fpdp; 1194fcf5ef2aSThomas Huth 1195fcf5ef2aSThomas Huth /* emulator internal variables */ 1196fcf5ef2aSThomas Huth float_status fp_status; 1197fcf5ef2aSThomas Huth floatx80 ft0; 1198fcf5ef2aSThomas Huth 1199fcf5ef2aSThomas Huth float_status mmx_status; /* for 3DNow! float ops */ 1200fcf5ef2aSThomas Huth float_status sse_status; 1201fcf5ef2aSThomas Huth uint32_t mxcsr; 1202fcf5ef2aSThomas Huth ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32]; 1203fcf5ef2aSThomas Huth ZMMReg xmm_t0; 1204fcf5ef2aSThomas Huth MMXReg mmx_t0; 1205fcf5ef2aSThomas Huth 1206c97d6d2cSSergio Andres Gomez Del Real XMMReg ymmh_regs[CPU_NB_REGS]; 1207c97d6d2cSSergio Andres Gomez Del Real 1208fcf5ef2aSThomas Huth uint64_t opmask_regs[NB_OPMASK_REGS]; 1209c97d6d2cSSergio Andres Gomez Del Real YMMReg zmmh_regs[CPU_NB_REGS]; 1210c97d6d2cSSergio Andres Gomez Del Real ZMMReg hi16_zmm_regs[CPU_NB_REGS]; 1211fcf5ef2aSThomas Huth 1212fcf5ef2aSThomas Huth /* sysenter registers */ 1213fcf5ef2aSThomas Huth uint32_t sysenter_cs; 1214fcf5ef2aSThomas Huth target_ulong sysenter_esp; 1215fcf5ef2aSThomas Huth target_ulong sysenter_eip; 1216fcf5ef2aSThomas Huth uint64_t star; 1217fcf5ef2aSThomas Huth 1218fcf5ef2aSThomas Huth uint64_t vm_hsave; 1219fcf5ef2aSThomas Huth 1220fcf5ef2aSThomas Huth #ifdef TARGET_X86_64 1221fcf5ef2aSThomas Huth target_ulong lstar; 1222fcf5ef2aSThomas Huth target_ulong cstar; 1223fcf5ef2aSThomas Huth target_ulong fmask; 1224fcf5ef2aSThomas Huth target_ulong kernelgsbase; 1225fcf5ef2aSThomas Huth #endif 1226fcf5ef2aSThomas Huth 1227fcf5ef2aSThomas Huth uint64_t tsc; 1228fcf5ef2aSThomas Huth uint64_t tsc_adjust; 1229fcf5ef2aSThomas Huth uint64_t tsc_deadline; 1230fcf5ef2aSThomas Huth uint64_t tsc_aux; 1231fcf5ef2aSThomas Huth 1232fcf5ef2aSThomas Huth uint64_t xcr0; 1233fcf5ef2aSThomas Huth 1234fcf5ef2aSThomas Huth uint64_t mcg_status; 1235fcf5ef2aSThomas Huth uint64_t msr_ia32_misc_enable; 1236fcf5ef2aSThomas Huth uint64_t msr_ia32_feature_control; 1237fcf5ef2aSThomas Huth 1238fcf5ef2aSThomas Huth uint64_t msr_fixed_ctr_ctrl; 1239fcf5ef2aSThomas Huth uint64_t msr_global_ctrl; 1240fcf5ef2aSThomas Huth uint64_t msr_global_status; 1241fcf5ef2aSThomas Huth uint64_t msr_global_ovf_ctrl; 1242fcf5ef2aSThomas Huth uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS]; 1243fcf5ef2aSThomas Huth uint64_t msr_gp_counters[MAX_GP_COUNTERS]; 1244fcf5ef2aSThomas Huth uint64_t msr_gp_evtsel[MAX_GP_COUNTERS]; 1245fcf5ef2aSThomas Huth 1246fcf5ef2aSThomas Huth uint64_t pat; 1247fcf5ef2aSThomas Huth uint32_t smbase; 1248e13713dbSLiran Alon uint64_t msr_smi_count; 1249fcf5ef2aSThomas Huth 1250fcf5ef2aSThomas Huth uint32_t pkru; 1251fcf5ef2aSThomas Huth 1252a33a2cfeSPaolo Bonzini uint64_t spec_ctrl; 1253cfeea0c0SKonrad Rzeszutek Wilk uint64_t virt_ssbd; 1254a33a2cfeSPaolo Bonzini 1255fcf5ef2aSThomas Huth /* End of state preserved by INIT (dummy marker). */ 1256fcf5ef2aSThomas Huth struct {} end_init_save; 1257fcf5ef2aSThomas Huth 1258fcf5ef2aSThomas Huth uint64_t system_time_msr; 1259fcf5ef2aSThomas Huth uint64_t wall_clock_msr; 1260fcf5ef2aSThomas Huth uint64_t steal_time_msr; 1261fcf5ef2aSThomas Huth uint64_t async_pf_en_msr; 1262fcf5ef2aSThomas Huth uint64_t pv_eoi_en_msr; 1263fcf5ef2aSThomas Huth 1264da1cc323SEvgeny Yakovlev /* Partition-wide HV MSRs, will be updated only on the first vcpu */ 1265fcf5ef2aSThomas Huth uint64_t msr_hv_hypercall; 1266fcf5ef2aSThomas Huth uint64_t msr_hv_guest_os_id; 1267fcf5ef2aSThomas Huth uint64_t msr_hv_tsc; 1268da1cc323SEvgeny Yakovlev 1269da1cc323SEvgeny Yakovlev /* Per-VCPU HV MSRs */ 1270da1cc323SEvgeny Yakovlev uint64_t msr_hv_vapic; 12715e953812SRoman Kagan uint64_t msr_hv_crash_params[HV_CRASH_PARAMS]; 1272fcf5ef2aSThomas Huth uint64_t msr_hv_runtime; 1273fcf5ef2aSThomas Huth uint64_t msr_hv_synic_control; 1274fcf5ef2aSThomas Huth uint64_t msr_hv_synic_evt_page; 1275fcf5ef2aSThomas Huth uint64_t msr_hv_synic_msg_page; 12765e953812SRoman Kagan uint64_t msr_hv_synic_sint[HV_SINT_COUNT]; 12775e953812SRoman Kagan uint64_t msr_hv_stimer_config[HV_STIMER_COUNT]; 12785e953812SRoman Kagan uint64_t msr_hv_stimer_count[HV_STIMER_COUNT]; 1279ba6a4fd9SVitaly Kuznetsov uint64_t msr_hv_reenlightenment_control; 1280ba6a4fd9SVitaly Kuznetsov uint64_t msr_hv_tsc_emulation_control; 1281ba6a4fd9SVitaly Kuznetsov uint64_t msr_hv_tsc_emulation_status; 1282fcf5ef2aSThomas Huth 1283b77146e9SChao Peng uint64_t msr_rtit_ctrl; 1284b77146e9SChao Peng uint64_t msr_rtit_status; 1285b77146e9SChao Peng uint64_t msr_rtit_output_base; 1286b77146e9SChao Peng uint64_t msr_rtit_output_mask; 1287b77146e9SChao Peng uint64_t msr_rtit_cr3_match; 1288b77146e9SChao Peng uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS]; 1289b77146e9SChao Peng 1290fcf5ef2aSThomas Huth /* exception/interrupt handling */ 1291fcf5ef2aSThomas Huth int error_code; 1292fcf5ef2aSThomas Huth int exception_is_int; 1293fcf5ef2aSThomas Huth target_ulong exception_next_eip; 1294fcf5ef2aSThomas Huth target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */ 1295fcf5ef2aSThomas Huth union { 1296fcf5ef2aSThomas Huth struct CPUBreakpoint *cpu_breakpoint[4]; 1297fcf5ef2aSThomas Huth struct CPUWatchpoint *cpu_watchpoint[4]; 1298fcf5ef2aSThomas Huth }; /* break/watchpoints for dr[0..3] */ 1299fcf5ef2aSThomas Huth int old_exception; /* exception in flight */ 1300fcf5ef2aSThomas Huth 1301fcf5ef2aSThomas Huth uint64_t vm_vmcb; 1302fcf5ef2aSThomas Huth uint64_t tsc_offset; 1303fcf5ef2aSThomas Huth uint64_t intercept; 1304fcf5ef2aSThomas Huth uint16_t intercept_cr_read; 1305fcf5ef2aSThomas Huth uint16_t intercept_cr_write; 1306fcf5ef2aSThomas Huth uint16_t intercept_dr_read; 1307fcf5ef2aSThomas Huth uint16_t intercept_dr_write; 1308fcf5ef2aSThomas Huth uint32_t intercept_exceptions; 1309fe441054SJan Kiszka uint64_t nested_cr3; 1310fe441054SJan Kiszka uint32_t nested_pg_mode; 1311fcf5ef2aSThomas Huth uint8_t v_tpr; 1312fcf5ef2aSThomas Huth 1313fcf5ef2aSThomas Huth /* KVM states, automatically cleared on reset */ 1314fcf5ef2aSThomas Huth uint8_t nmi_injected; 1315fcf5ef2aSThomas Huth uint8_t nmi_pending; 1316fcf5ef2aSThomas Huth 1317fe441054SJan Kiszka uintptr_t retaddr; 1318fe441054SJan Kiszka 13191f5c00cfSAlex Bennée /* Fields up to this point are cleared by a CPU reset */ 13201f5c00cfSAlex Bennée struct {} end_reset_fields; 13211f5c00cfSAlex Bennée 1322e8b5fae5SRichard Henderson /* Fields after this point are preserved across CPU reset. */ 1323fcf5ef2aSThomas Huth 1324fcf5ef2aSThomas Huth /* processor features (e.g. for CPUID insn) */ 1325fcf5ef2aSThomas Huth /* Minimum level/xlevel/xlevel2, based on CPU model + features */ 1326fcf5ef2aSThomas Huth uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2; 1327fcf5ef2aSThomas Huth /* Maximum level/xlevel/xlevel2 value for auto-assignment: */ 1328fcf5ef2aSThomas Huth uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2; 1329fcf5ef2aSThomas Huth /* Actual level/xlevel/xlevel2 value: */ 1330fcf5ef2aSThomas Huth uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2; 1331fcf5ef2aSThomas Huth uint32_t cpuid_vendor1; 1332fcf5ef2aSThomas Huth uint32_t cpuid_vendor2; 1333fcf5ef2aSThomas Huth uint32_t cpuid_vendor3; 1334fcf5ef2aSThomas Huth uint32_t cpuid_version; 1335fcf5ef2aSThomas Huth FeatureWordArray features; 1336d4a606b3SEduardo Habkost /* Features that were explicitly enabled/disabled */ 1337d4a606b3SEduardo Habkost FeatureWordArray user_features; 1338fcf5ef2aSThomas Huth uint32_t cpuid_model[12]; 1339a9f27ea9SEduardo Habkost /* Cache information for CPUID. When legacy-cache=on, the cache data 1340a9f27ea9SEduardo Habkost * on each CPUID leaf will be different, because we keep compatibility 1341a9f27ea9SEduardo Habkost * with old QEMU versions. 1342a9f27ea9SEduardo Habkost */ 1343a9f27ea9SEduardo Habkost CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd; 1344fcf5ef2aSThomas Huth 1345fcf5ef2aSThomas Huth /* MTRRs */ 1346fcf5ef2aSThomas Huth uint64_t mtrr_fixed[11]; 1347fcf5ef2aSThomas Huth uint64_t mtrr_deftype; 1348fcf5ef2aSThomas Huth MTRRVar mtrr_var[MSR_MTRRcap_VCNT]; 1349fcf5ef2aSThomas Huth 1350fcf5ef2aSThomas Huth /* For KVM */ 1351fcf5ef2aSThomas Huth uint32_t mp_state; 1352fd13f23bSLiran Alon int32_t exception_nr; 1353fcf5ef2aSThomas Huth int32_t interrupt_injected; 1354fcf5ef2aSThomas Huth uint8_t soft_interrupt; 1355fd13f23bSLiran Alon uint8_t exception_pending; 1356fd13f23bSLiran Alon uint8_t exception_injected; 1357fcf5ef2aSThomas Huth uint8_t has_error_code; 1358fd13f23bSLiran Alon uint8_t exception_has_payload; 1359fd13f23bSLiran Alon uint64_t exception_payload; 1360c97d6d2cSSergio Andres Gomez Del Real uint32_t ins_len; 1361fcf5ef2aSThomas Huth uint32_t sipi_vector; 1362fcf5ef2aSThomas Huth bool tsc_valid; 1363fcf5ef2aSThomas Huth int64_t tsc_khz; 1364fcf5ef2aSThomas Huth int64_t user_tsc_khz; /* for sanity check only */ 13655b8063c4SLiran Alon #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 13665b8063c4SLiran Alon void *xsave_buf; 13675b8063c4SLiran Alon #endif 1368ebbfef2fSLiran Alon #if defined(CONFIG_KVM) 1369ebbfef2fSLiran Alon struct kvm_nested_state *nested_state; 1370ebbfef2fSLiran Alon #endif 1371c97d6d2cSSergio Andres Gomez Del Real #if defined(CONFIG_HVF) 1372c97d6d2cSSergio Andres Gomez Del Real HVFX86EmulatorState *hvf_emul; 1373c97d6d2cSSergio Andres Gomez Del Real #endif 1374fcf5ef2aSThomas Huth 1375fcf5ef2aSThomas Huth uint64_t mcg_cap; 1376fcf5ef2aSThomas Huth uint64_t mcg_ctl; 1377fcf5ef2aSThomas Huth uint64_t mcg_ext_ctl; 1378fcf5ef2aSThomas Huth uint64_t mce_banks[MCE_BANKS_DEF*4]; 1379fcf5ef2aSThomas Huth uint64_t xstate_bv; 1380fcf5ef2aSThomas Huth 1381fcf5ef2aSThomas Huth /* vmstate */ 1382fcf5ef2aSThomas Huth uint16_t fpus_vmstate; 1383fcf5ef2aSThomas Huth uint16_t fptag_vmstate; 1384fcf5ef2aSThomas Huth uint16_t fpregs_format_vmstate; 1385fcf5ef2aSThomas Huth 1386fcf5ef2aSThomas Huth uint64_t xss; 1387fcf5ef2aSThomas Huth 1388fcf5ef2aSThomas Huth TPRAccess tpr_access_type; 1389c26ae610SLike Xu 1390c26ae610SLike Xu unsigned nr_dies; 1391fcf5ef2aSThomas Huth } CPUX86State; 1392fcf5ef2aSThomas Huth 1393fcf5ef2aSThomas Huth struct kvm_msrs; 1394fcf5ef2aSThomas Huth 1395fcf5ef2aSThomas Huth /** 1396fcf5ef2aSThomas Huth * X86CPU: 1397fcf5ef2aSThomas Huth * @env: #CPUX86State 1398fcf5ef2aSThomas Huth * @migratable: If set, only migratable flags will be accepted when "enforce" 1399fcf5ef2aSThomas Huth * mode is used, and only migratable flags will be included in the "host" 1400fcf5ef2aSThomas Huth * CPU model. 1401fcf5ef2aSThomas Huth * 1402fcf5ef2aSThomas Huth * An x86 CPU. 1403fcf5ef2aSThomas Huth */ 1404fcf5ef2aSThomas Huth struct X86CPU { 1405fcf5ef2aSThomas Huth /*< private >*/ 1406fcf5ef2aSThomas Huth CPUState parent_obj; 1407fcf5ef2aSThomas Huth /*< public >*/ 1408fcf5ef2aSThomas Huth 14095b146dc7SRichard Henderson CPUNegativeOffsetState neg; 1410fcf5ef2aSThomas Huth CPUX86State env; 1411fcf5ef2aSThomas Huth 14124f2beda4SEduardo Habkost uint32_t hyperv_spinlock_attempts; 1413fcf5ef2aSThomas Huth char *hyperv_vendor_id; 14149b4cf107SRoman Kagan bool hyperv_synic_kvm_only; 14152d384d7cSVitaly Kuznetsov uint64_t hyperv_features; 1416e48ddcc6SVitaly Kuznetsov bool hyperv_passthrough; 14172d384d7cSVitaly Kuznetsov 1418fcf5ef2aSThomas Huth bool check_cpuid; 1419fcf5ef2aSThomas Huth bool enforce_cpuid; 1420dac1deaeSEduardo Habkost /* 1421dac1deaeSEduardo Habkost * Force features to be enabled even if the host doesn't support them. 1422dac1deaeSEduardo Habkost * This is dangerous and should be done only for testing CPUID 1423dac1deaeSEduardo Habkost * compatibility. 1424dac1deaeSEduardo Habkost */ 1425dac1deaeSEduardo Habkost bool force_features; 1426fcf5ef2aSThomas Huth bool expose_kvm; 14271ce36bfeSDaniel P. Berrange bool expose_tcg; 1428fcf5ef2aSThomas Huth bool migratable; 1429990e0be2SPaolo Bonzini bool migrate_smi_count; 143044bd8e53SEduardo Habkost bool max_features; /* Enable all supported features automatically */ 1431fcf5ef2aSThomas Huth uint32_t apic_id; 1432fcf5ef2aSThomas Huth 14339954a158SPhil Dennis-Jordan /* Enables publishing of TSC increment and Local APIC bus frequencies to 14349954a158SPhil Dennis-Jordan * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */ 14359954a158SPhil Dennis-Jordan bool vmware_cpuid_freq; 14369954a158SPhil Dennis-Jordan 1437fcf5ef2aSThomas Huth /* if true the CPUID code directly forward host cache leaves to the guest */ 1438fcf5ef2aSThomas Huth bool cache_info_passthrough; 1439fcf5ef2aSThomas Huth 14402266d443SMichael S. Tsirkin /* if true the CPUID code directly forwards 14412266d443SMichael S. Tsirkin * host monitor/mwait leaves to the guest */ 14422266d443SMichael S. Tsirkin struct { 14432266d443SMichael S. Tsirkin uint32_t eax; 14442266d443SMichael S. Tsirkin uint32_t ebx; 14452266d443SMichael S. Tsirkin uint32_t ecx; 14462266d443SMichael S. Tsirkin uint32_t edx; 14472266d443SMichael S. Tsirkin } mwait; 14482266d443SMichael S. Tsirkin 1449fcf5ef2aSThomas Huth /* Features that were filtered out because of missing host capabilities */ 1450f69ecddbSWei Yang FeatureWordArray filtered_features; 1451fcf5ef2aSThomas Huth 1452fcf5ef2aSThomas Huth /* Enable PMU CPUID bits. This can't be enabled by default yet because 1453fcf5ef2aSThomas Huth * it doesn't have ABI stability guarantees, as it passes all PMU CPUID 1454fcf5ef2aSThomas Huth * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel 1455fcf5ef2aSThomas Huth * capabilities) directly to the guest. 1456fcf5ef2aSThomas Huth */ 1457fcf5ef2aSThomas Huth bool enable_pmu; 1458fcf5ef2aSThomas Huth 1459fcf5ef2aSThomas Huth /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is 1460fcf5ef2aSThomas Huth * disabled by default to avoid breaking migration between QEMU with 1461fcf5ef2aSThomas Huth * different LMCE configurations. 1462fcf5ef2aSThomas Huth */ 1463fcf5ef2aSThomas Huth bool enable_lmce; 1464fcf5ef2aSThomas Huth 1465fcf5ef2aSThomas Huth /* Compatibility bits for old machine types. 1466fcf5ef2aSThomas Huth * If true present virtual l3 cache for VM, the vcpus in the same virtual 1467fcf5ef2aSThomas Huth * socket share an virtual l3 cache. 1468fcf5ef2aSThomas Huth */ 1469fcf5ef2aSThomas Huth bool enable_l3_cache; 1470fcf5ef2aSThomas Huth 1471ab8f992eSBabu Moger /* Compatibility bits for old machine types. 1472ab8f992eSBabu Moger * If true present the old cache topology information 1473ab8f992eSBabu Moger */ 1474ab8f992eSBabu Moger bool legacy_cache; 1475ab8f992eSBabu Moger 1476fcf5ef2aSThomas Huth /* Compatibility bits for old machine types: */ 1477fcf5ef2aSThomas Huth bool enable_cpuid_0xb; 1478fcf5ef2aSThomas Huth 1479fcf5ef2aSThomas Huth /* Enable auto level-increase for all CPUID leaves */ 1480fcf5ef2aSThomas Huth bool full_cpuid_auto_level; 1481fcf5ef2aSThomas Huth 1482f24c3a79SLuwei Kang /* Enable auto level-increase for Intel Processor Trace leave */ 1483f24c3a79SLuwei Kang bool intel_pt_auto_level; 1484f24c3a79SLuwei Kang 1485fcf5ef2aSThomas Huth /* if true fill the top bits of the MTRR_PHYSMASKn variable range */ 1486fcf5ef2aSThomas Huth bool fill_mtrr_mask; 1487fcf5ef2aSThomas Huth 1488fcf5ef2aSThomas Huth /* if true override the phys_bits value with a value read from the host */ 1489fcf5ef2aSThomas Huth bool host_phys_bits; 1490fcf5ef2aSThomas Huth 1491258fe08bSEduardo Habkost /* if set, limit maximum value for phys_bits when host_phys_bits is true */ 1492258fe08bSEduardo Habkost uint8_t host_phys_bits_limit; 1493258fe08bSEduardo Habkost 1494fc3a1fd7SDr. David Alan Gilbert /* Stop SMI delivery for migration compatibility with old machines */ 1495fc3a1fd7SDr. David Alan Gilbert bool kvm_no_smi_migration; 1496fc3a1fd7SDr. David Alan Gilbert 1497fcf5ef2aSThomas Huth /* Number of physical address bits supported */ 1498fcf5ef2aSThomas Huth uint32_t phys_bits; 1499fcf5ef2aSThomas Huth 1500fcf5ef2aSThomas Huth /* in order to simplify APIC support, we leave this pointer to the 1501fcf5ef2aSThomas Huth user */ 1502fcf5ef2aSThomas Huth struct DeviceState *apic_state; 1503fcf5ef2aSThomas Huth struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram; 1504fcf5ef2aSThomas Huth Notifier machine_done; 1505fcf5ef2aSThomas Huth 1506fcf5ef2aSThomas Huth struct kvm_msrs *kvm_msr_buf; 1507fcf5ef2aSThomas Huth 150815f8b142SIgor Mammedov int32_t node_id; /* NUMA node this CPU belongs to */ 1509fcf5ef2aSThomas Huth int32_t socket_id; 1510176d2cdaSLike Xu int32_t die_id; 1511fcf5ef2aSThomas Huth int32_t core_id; 1512fcf5ef2aSThomas Huth int32_t thread_id; 15136c69dfb6SGonglei 15146c69dfb6SGonglei int32_t hv_max_vps; 1515fcf5ef2aSThomas Huth }; 1516fcf5ef2aSThomas Huth 1517fcf5ef2aSThomas Huth 1518fcf5ef2aSThomas Huth #ifndef CONFIG_USER_ONLY 1519*8a9358ccSMarkus Armbruster extern VMStateDescription vmstate_x86_cpu; 1520fcf5ef2aSThomas Huth #endif 1521fcf5ef2aSThomas Huth 1522fcf5ef2aSThomas Huth /** 1523fcf5ef2aSThomas Huth * x86_cpu_do_interrupt: 1524fcf5ef2aSThomas Huth * @cpu: vCPU the interrupt is to be handled by. 1525fcf5ef2aSThomas Huth */ 1526fcf5ef2aSThomas Huth void x86_cpu_do_interrupt(CPUState *cpu); 1527fcf5ef2aSThomas Huth bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req); 152892d5f1a4SPaolo Bonzini int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); 1529fcf5ef2aSThomas Huth 1530fcf5ef2aSThomas Huth int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, 1531fcf5ef2aSThomas Huth int cpuid, void *opaque); 1532fcf5ef2aSThomas Huth int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, 1533fcf5ef2aSThomas Huth int cpuid, void *opaque); 1534fcf5ef2aSThomas Huth int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 1535fcf5ef2aSThomas Huth void *opaque); 1536fcf5ef2aSThomas Huth int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 1537fcf5ef2aSThomas Huth void *opaque); 1538fcf5ef2aSThomas Huth 1539fcf5ef2aSThomas Huth void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, 1540fcf5ef2aSThomas Huth Error **errp); 1541fcf5ef2aSThomas Huth 154290c84c56SMarkus Armbruster void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags); 1543fcf5ef2aSThomas Huth 1544fcf5ef2aSThomas Huth hwaddr x86_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); 1545fcf5ef2aSThomas Huth 1546fcf5ef2aSThomas Huth int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); 1547fcf5ef2aSThomas Huth int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 1548fcf5ef2aSThomas Huth 1549fcf5ef2aSThomas Huth void x86_cpu_exec_enter(CPUState *cpu); 1550fcf5ef2aSThomas Huth void x86_cpu_exec_exit(CPUState *cpu); 1551fcf5ef2aSThomas Huth 15520442428aSMarkus Armbruster void x86_cpu_list(void); 1553fcf5ef2aSThomas Huth int cpu_x86_support_mca_broadcast(CPUX86State *env); 1554fcf5ef2aSThomas Huth 1555fcf5ef2aSThomas Huth int cpu_get_pic_interrupt(CPUX86State *s); 1556fcf5ef2aSThomas Huth /* MSDOS compatibility mode FPU exception support */ 1557fcf5ef2aSThomas Huth void cpu_set_ferr(CPUX86State *s); 15585e76d84eSPaolo Bonzini /* mpx_helper.c */ 15595e76d84eSPaolo Bonzini void cpu_sync_bndcs_hflags(CPUX86State *env); 1560fcf5ef2aSThomas Huth 1561fcf5ef2aSThomas Huth /* this function must always be used to load data in the segment 1562fcf5ef2aSThomas Huth cache: it synchronizes the hflags with the segment cache values */ 1563fcf5ef2aSThomas Huth static inline void cpu_x86_load_seg_cache(CPUX86State *env, 1564fcf5ef2aSThomas Huth int seg_reg, unsigned int selector, 1565fcf5ef2aSThomas Huth target_ulong base, 1566fcf5ef2aSThomas Huth unsigned int limit, 1567fcf5ef2aSThomas Huth unsigned int flags) 1568fcf5ef2aSThomas Huth { 1569fcf5ef2aSThomas Huth SegmentCache *sc; 1570fcf5ef2aSThomas Huth unsigned int new_hflags; 1571fcf5ef2aSThomas Huth 1572fcf5ef2aSThomas Huth sc = &env->segs[seg_reg]; 1573fcf5ef2aSThomas Huth sc->selector = selector; 1574fcf5ef2aSThomas Huth sc->base = base; 1575fcf5ef2aSThomas Huth sc->limit = limit; 1576fcf5ef2aSThomas Huth sc->flags = flags; 1577fcf5ef2aSThomas Huth 1578fcf5ef2aSThomas Huth /* update the hidden flags */ 1579fcf5ef2aSThomas Huth { 1580fcf5ef2aSThomas Huth if (seg_reg == R_CS) { 1581fcf5ef2aSThomas Huth #ifdef TARGET_X86_64 1582fcf5ef2aSThomas Huth if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { 1583fcf5ef2aSThomas Huth /* long mode */ 1584fcf5ef2aSThomas Huth env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 1585fcf5ef2aSThomas Huth env->hflags &= ~(HF_ADDSEG_MASK); 1586fcf5ef2aSThomas Huth } else 1587fcf5ef2aSThomas Huth #endif 1588fcf5ef2aSThomas Huth { 1589fcf5ef2aSThomas Huth /* legacy / compatibility case */ 1590fcf5ef2aSThomas Huth new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) 1591fcf5ef2aSThomas Huth >> (DESC_B_SHIFT - HF_CS32_SHIFT); 1592fcf5ef2aSThomas Huth env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | 1593fcf5ef2aSThomas Huth new_hflags; 1594fcf5ef2aSThomas Huth } 1595fcf5ef2aSThomas Huth } 1596fcf5ef2aSThomas Huth if (seg_reg == R_SS) { 1597fcf5ef2aSThomas Huth int cpl = (flags >> DESC_DPL_SHIFT) & 3; 1598fcf5ef2aSThomas Huth #if HF_CPL_MASK != 3 1599fcf5ef2aSThomas Huth #error HF_CPL_MASK is hardcoded 1600fcf5ef2aSThomas Huth #endif 1601fcf5ef2aSThomas Huth env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl; 16025e76d84eSPaolo Bonzini /* Possibly switch between BNDCFGS and BNDCFGU */ 16035e76d84eSPaolo Bonzini cpu_sync_bndcs_hflags(env); 1604fcf5ef2aSThomas Huth } 1605fcf5ef2aSThomas Huth new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) 1606fcf5ef2aSThomas Huth >> (DESC_B_SHIFT - HF_SS32_SHIFT); 1607fcf5ef2aSThomas Huth if (env->hflags & HF_CS64_MASK) { 1608fcf5ef2aSThomas Huth /* zero base assumed for DS, ES and SS in long mode */ 1609fcf5ef2aSThomas Huth } else if (!(env->cr[0] & CR0_PE_MASK) || 1610fcf5ef2aSThomas Huth (env->eflags & VM_MASK) || 1611fcf5ef2aSThomas Huth !(env->hflags & HF_CS32_MASK)) { 1612fcf5ef2aSThomas Huth /* XXX: try to avoid this test. The problem comes from the 1613fcf5ef2aSThomas Huth fact that is real mode or vm86 mode we only modify the 1614fcf5ef2aSThomas Huth 'base' and 'selector' fields of the segment cache to go 1615fcf5ef2aSThomas Huth faster. A solution may be to force addseg to one in 1616fcf5ef2aSThomas Huth translate-i386.c. */ 1617fcf5ef2aSThomas Huth new_hflags |= HF_ADDSEG_MASK; 1618fcf5ef2aSThomas Huth } else { 1619fcf5ef2aSThomas Huth new_hflags |= ((env->segs[R_DS].base | 1620fcf5ef2aSThomas Huth env->segs[R_ES].base | 1621fcf5ef2aSThomas Huth env->segs[R_SS].base) != 0) << 1622fcf5ef2aSThomas Huth HF_ADDSEG_SHIFT; 1623fcf5ef2aSThomas Huth } 1624fcf5ef2aSThomas Huth env->hflags = (env->hflags & 1625fcf5ef2aSThomas Huth ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; 1626fcf5ef2aSThomas Huth } 1627fcf5ef2aSThomas Huth } 1628fcf5ef2aSThomas Huth 1629fcf5ef2aSThomas Huth static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, 1630fcf5ef2aSThomas Huth uint8_t sipi_vector) 1631fcf5ef2aSThomas Huth { 1632fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu); 1633fcf5ef2aSThomas Huth CPUX86State *env = &cpu->env; 1634fcf5ef2aSThomas Huth 1635fcf5ef2aSThomas Huth env->eip = 0; 1636fcf5ef2aSThomas Huth cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, 1637fcf5ef2aSThomas Huth sipi_vector << 12, 1638fcf5ef2aSThomas Huth env->segs[R_CS].limit, 1639fcf5ef2aSThomas Huth env->segs[R_CS].flags); 1640fcf5ef2aSThomas Huth cs->halted = 0; 1641fcf5ef2aSThomas Huth } 1642fcf5ef2aSThomas Huth 1643fcf5ef2aSThomas Huth int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, 1644fcf5ef2aSThomas Huth target_ulong *base, unsigned int *limit, 1645fcf5ef2aSThomas Huth unsigned int *flags); 1646fcf5ef2aSThomas Huth 1647fcf5ef2aSThomas Huth /* op_helper.c */ 1648fcf5ef2aSThomas Huth /* used for debug or cpu save/restore */ 1649fcf5ef2aSThomas Huth 1650fcf5ef2aSThomas Huth /* cpu-exec.c */ 1651fcf5ef2aSThomas Huth /* the following helpers are only usable in user mode simulation as 1652fcf5ef2aSThomas Huth they can trigger unexpected exceptions */ 1653fcf5ef2aSThomas Huth void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); 1654fcf5ef2aSThomas Huth void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); 1655fcf5ef2aSThomas Huth void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); 16561c1df019SPranith Kumar void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr); 16571c1df019SPranith Kumar void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr); 1658fcf5ef2aSThomas Huth 1659fcf5ef2aSThomas Huth /* you can call this signal handler from your SIGBUS and SIGSEGV 1660fcf5ef2aSThomas Huth signal handlers to inform the virtual CPU of exceptions. non zero 1661fcf5ef2aSThomas Huth is returned if the signal was handled by the virtual CPU. */ 1662fcf5ef2aSThomas Huth int cpu_x86_signal_handler(int host_signum, void *pinfo, 1663fcf5ef2aSThomas Huth void *puc); 1664fcf5ef2aSThomas Huth 1665fcf5ef2aSThomas Huth /* cpu.c */ 1666fcf5ef2aSThomas Huth void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 1667fcf5ef2aSThomas Huth uint32_t *eax, uint32_t *ebx, 1668fcf5ef2aSThomas Huth uint32_t *ecx, uint32_t *edx); 1669fcf5ef2aSThomas Huth void cpu_clear_apic_feature(CPUX86State *env); 1670fcf5ef2aSThomas Huth void host_cpuid(uint32_t function, uint32_t count, 1671fcf5ef2aSThomas Huth uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); 167220271d48SEduardo Habkost void host_vendor_fms(char *vendor, int *family, int *model, int *stepping); 1673fcf5ef2aSThomas Huth 1674fcf5ef2aSThomas Huth /* helper.c */ 16755d004421SRichard Henderson bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 16765d004421SRichard Henderson MMUAccessType access_type, int mmu_idx, 16775d004421SRichard Henderson bool probe, uintptr_t retaddr); 1678fcf5ef2aSThomas Huth void x86_cpu_set_a20(X86CPU *cpu, int a20_state); 1679fcf5ef2aSThomas Huth 1680fcf5ef2aSThomas Huth #ifndef CONFIG_USER_ONLY 1681f8c45c65SPaolo Bonzini static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) 1682f8c45c65SPaolo Bonzini { 1683f8c45c65SPaolo Bonzini return !!attrs.secure; 1684f8c45c65SPaolo Bonzini } 1685f8c45c65SPaolo Bonzini 1686f8c45c65SPaolo Bonzini static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs) 1687f8c45c65SPaolo Bonzini { 1688f8c45c65SPaolo Bonzini return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs)); 1689f8c45c65SPaolo Bonzini } 1690f8c45c65SPaolo Bonzini 1691fcf5ef2aSThomas Huth uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr); 1692fcf5ef2aSThomas Huth uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr); 1693fcf5ef2aSThomas Huth uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr); 1694fcf5ef2aSThomas Huth uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr); 1695fcf5ef2aSThomas Huth void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val); 1696fcf5ef2aSThomas Huth void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val); 1697fcf5ef2aSThomas Huth void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val); 1698fcf5ef2aSThomas Huth void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val); 1699fcf5ef2aSThomas Huth void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val); 1700fcf5ef2aSThomas Huth #endif 1701fcf5ef2aSThomas Huth 1702fcf5ef2aSThomas Huth void breakpoint_handler(CPUState *cs); 1703fcf5ef2aSThomas Huth 1704fcf5ef2aSThomas Huth /* will be suppressed */ 1705fcf5ef2aSThomas Huth void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); 1706fcf5ef2aSThomas Huth void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); 1707fcf5ef2aSThomas Huth void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); 1708fcf5ef2aSThomas Huth void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7); 1709fcf5ef2aSThomas Huth 1710fcf5ef2aSThomas Huth /* hw/pc.c */ 1711fcf5ef2aSThomas Huth uint64_t cpu_get_tsc(CPUX86State *env); 1712fcf5ef2aSThomas Huth 1713fcf5ef2aSThomas Huth /* XXX: This value should match the one returned by CPUID 1714fcf5ef2aSThomas Huth * and in exec.c */ 1715fcf5ef2aSThomas Huth # if defined(TARGET_X86_64) 1716fcf5ef2aSThomas Huth # define TCG_PHYS_ADDR_BITS 40 1717fcf5ef2aSThomas Huth # else 1718fcf5ef2aSThomas Huth # define TCG_PHYS_ADDR_BITS 36 1719fcf5ef2aSThomas Huth # endif 1720fcf5ef2aSThomas Huth 1721fcf5ef2aSThomas Huth #define PHYS_ADDR_MASK MAKE_64BIT_MASK(0, TCG_PHYS_ADDR_BITS) 1722fcf5ef2aSThomas Huth 1723311ca98dSIgor Mammedov #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU 1724311ca98dSIgor Mammedov #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX) 17250dacec87SIgor Mammedov #define CPU_RESOLVING_TYPE TYPE_X86_CPU 1726311ca98dSIgor Mammedov 1727311ca98dSIgor Mammedov #ifdef TARGET_X86_64 1728311ca98dSIgor Mammedov #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64") 1729311ca98dSIgor Mammedov #else 1730311ca98dSIgor Mammedov #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32") 1731311ca98dSIgor Mammedov #endif 1732311ca98dSIgor Mammedov 1733fcf5ef2aSThomas Huth #define cpu_signal_handler cpu_x86_signal_handler 1734fcf5ef2aSThomas Huth #define cpu_list x86_cpu_list 1735fcf5ef2aSThomas Huth 1736fcf5ef2aSThomas Huth /* MMU modes definitions */ 1737fcf5ef2aSThomas Huth #define MMU_MODE0_SUFFIX _ksmap 1738fcf5ef2aSThomas Huth #define MMU_MODE1_SUFFIX _user 1739fcf5ef2aSThomas Huth #define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */ 1740fcf5ef2aSThomas Huth #define MMU_KSMAP_IDX 0 1741fcf5ef2aSThomas Huth #define MMU_USER_IDX 1 1742fcf5ef2aSThomas Huth #define MMU_KNOSMAP_IDX 2 1743fcf5ef2aSThomas Huth static inline int cpu_mmu_index(CPUX86State *env, bool ifetch) 1744fcf5ef2aSThomas Huth { 1745fcf5ef2aSThomas Huth return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : 1746fcf5ef2aSThomas Huth (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK)) 1747fcf5ef2aSThomas Huth ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; 1748fcf5ef2aSThomas Huth } 1749fcf5ef2aSThomas Huth 1750fcf5ef2aSThomas Huth static inline int cpu_mmu_index_kernel(CPUX86State *env) 1751fcf5ef2aSThomas Huth { 1752fcf5ef2aSThomas Huth return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX : 1753fcf5ef2aSThomas Huth ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) 1754fcf5ef2aSThomas Huth ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; 1755fcf5ef2aSThomas Huth } 1756fcf5ef2aSThomas Huth 1757fcf5ef2aSThomas Huth #define CC_DST (env->cc_dst) 1758fcf5ef2aSThomas Huth #define CC_SRC (env->cc_src) 1759fcf5ef2aSThomas Huth #define CC_SRC2 (env->cc_src2) 1760fcf5ef2aSThomas Huth #define CC_OP (env->cc_op) 1761fcf5ef2aSThomas Huth 1762fcf5ef2aSThomas Huth /* n must be a constant to be efficient */ 1763fcf5ef2aSThomas Huth static inline target_long lshift(target_long x, int n) 1764fcf5ef2aSThomas Huth { 1765fcf5ef2aSThomas Huth if (n >= 0) { 1766fcf5ef2aSThomas Huth return x << n; 1767fcf5ef2aSThomas Huth } else { 1768fcf5ef2aSThomas Huth return x >> (-n); 1769fcf5ef2aSThomas Huth } 1770fcf5ef2aSThomas Huth } 1771fcf5ef2aSThomas Huth 1772fcf5ef2aSThomas Huth /* float macros */ 1773fcf5ef2aSThomas Huth #define FT0 (env->ft0) 1774fcf5ef2aSThomas Huth #define ST0 (env->fpregs[env->fpstt].d) 1775fcf5ef2aSThomas Huth #define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) 1776fcf5ef2aSThomas Huth #define ST1 ST(1) 1777fcf5ef2aSThomas Huth 1778fcf5ef2aSThomas Huth /* translate.c */ 1779fcf5ef2aSThomas Huth void tcg_x86_init(void); 1780fcf5ef2aSThomas Huth 17814f7c64b3SRichard Henderson typedef CPUX86State CPUArchState; 17822161a612SRichard Henderson typedef X86CPU ArchCPU; 17834f7c64b3SRichard Henderson 1784fcf5ef2aSThomas Huth #include "exec/cpu-all.h" 1785fcf5ef2aSThomas Huth #include "svm.h" 1786fcf5ef2aSThomas Huth 1787fcf5ef2aSThomas Huth #if !defined(CONFIG_USER_ONLY) 1788fcf5ef2aSThomas Huth #include "hw/i386/apic.h" 1789fcf5ef2aSThomas Huth #endif 1790fcf5ef2aSThomas Huth 1791fcf5ef2aSThomas Huth static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc, 1792fcf5ef2aSThomas Huth target_ulong *cs_base, uint32_t *flags) 1793fcf5ef2aSThomas Huth { 1794fcf5ef2aSThomas Huth *cs_base = env->segs[R_CS].base; 1795fcf5ef2aSThomas Huth *pc = *cs_base + env->eip; 1796fcf5ef2aSThomas Huth *flags = env->hflags | 1797fcf5ef2aSThomas Huth (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); 1798fcf5ef2aSThomas Huth } 1799fcf5ef2aSThomas Huth 1800fcf5ef2aSThomas Huth void do_cpu_init(X86CPU *cpu); 1801fcf5ef2aSThomas Huth void do_cpu_sipi(X86CPU *cpu); 1802fcf5ef2aSThomas Huth 1803fcf5ef2aSThomas Huth #define MCE_INJECT_BROADCAST 1 1804fcf5ef2aSThomas Huth #define MCE_INJECT_UNCOND_AO 2 1805fcf5ef2aSThomas Huth 1806fcf5ef2aSThomas Huth void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, 1807fcf5ef2aSThomas Huth uint64_t status, uint64_t mcg_status, uint64_t addr, 1808fcf5ef2aSThomas Huth uint64_t misc, int flags); 1809fcf5ef2aSThomas Huth 1810fcf5ef2aSThomas Huth /* excp_helper.c */ 1811fcf5ef2aSThomas Huth void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index); 1812fcf5ef2aSThomas Huth void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index, 1813fcf5ef2aSThomas Huth uintptr_t retaddr); 1814fcf5ef2aSThomas Huth void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, 1815fcf5ef2aSThomas Huth int error_code); 1816fcf5ef2aSThomas Huth void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index, 1817fcf5ef2aSThomas Huth int error_code, uintptr_t retaddr); 1818fcf5ef2aSThomas Huth void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int, 1819fcf5ef2aSThomas Huth int error_code, int next_eip_addend); 1820fcf5ef2aSThomas Huth 1821fcf5ef2aSThomas Huth /* cc_helper.c */ 1822fcf5ef2aSThomas Huth extern const uint8_t parity_table[256]; 1823fcf5ef2aSThomas Huth uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); 1824fcf5ef2aSThomas Huth 1825fcf5ef2aSThomas Huth static inline uint32_t cpu_compute_eflags(CPUX86State *env) 1826fcf5ef2aSThomas Huth { 182779c664f6SYang Zhong uint32_t eflags = env->eflags; 182879c664f6SYang Zhong if (tcg_enabled()) { 182979c664f6SYang Zhong eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK); 183079c664f6SYang Zhong } 183179c664f6SYang Zhong return eflags; 1832fcf5ef2aSThomas Huth } 1833fcf5ef2aSThomas Huth 1834fcf5ef2aSThomas Huth /* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS 1835fcf5ef2aSThomas Huth * after generating a call to a helper that uses this. 1836fcf5ef2aSThomas Huth */ 1837fcf5ef2aSThomas Huth static inline void cpu_load_eflags(CPUX86State *env, int eflags, 1838fcf5ef2aSThomas Huth int update_mask) 1839fcf5ef2aSThomas Huth { 1840fcf5ef2aSThomas Huth CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 1841fcf5ef2aSThomas Huth CC_OP = CC_OP_EFLAGS; 1842fcf5ef2aSThomas Huth env->df = 1 - (2 * ((eflags >> 10) & 1)); 1843fcf5ef2aSThomas Huth env->eflags = (env->eflags & ~update_mask) | 1844fcf5ef2aSThomas Huth (eflags & update_mask) | 0x2; 1845fcf5ef2aSThomas Huth } 1846fcf5ef2aSThomas Huth 1847fcf5ef2aSThomas Huth /* load efer and update the corresponding hflags. XXX: do consistency 1848fcf5ef2aSThomas Huth checks with cpuid bits? */ 1849fcf5ef2aSThomas Huth static inline void cpu_load_efer(CPUX86State *env, uint64_t val) 1850fcf5ef2aSThomas Huth { 1851fcf5ef2aSThomas Huth env->efer = val; 1852fcf5ef2aSThomas Huth env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); 1853fcf5ef2aSThomas Huth if (env->efer & MSR_EFER_LMA) { 1854fcf5ef2aSThomas Huth env->hflags |= HF_LMA_MASK; 1855fcf5ef2aSThomas Huth } 1856fcf5ef2aSThomas Huth if (env->efer & MSR_EFER_SVME) { 1857fcf5ef2aSThomas Huth env->hflags |= HF_SVME_MASK; 1858fcf5ef2aSThomas Huth } 1859fcf5ef2aSThomas Huth } 1860fcf5ef2aSThomas Huth 1861fcf5ef2aSThomas Huth static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env) 1862fcf5ef2aSThomas Huth { 1863fcf5ef2aSThomas Huth return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 }); 1864fcf5ef2aSThomas Huth } 1865fcf5ef2aSThomas Huth 1866c8bc83a4SPaolo Bonzini static inline int32_t x86_get_a20_mask(CPUX86State *env) 1867c8bc83a4SPaolo Bonzini { 1868c8bc83a4SPaolo Bonzini if (env->hflags & HF_SMM_MASK) { 1869c8bc83a4SPaolo Bonzini return -1; 1870c8bc83a4SPaolo Bonzini } else { 1871c8bc83a4SPaolo Bonzini return env->a20_mask; 1872c8bc83a4SPaolo Bonzini } 1873c8bc83a4SPaolo Bonzini } 1874c8bc83a4SPaolo Bonzini 187518ab37baSLiran Alon static inline bool cpu_has_vmx(CPUX86State *env) 187618ab37baSLiran Alon { 187718ab37baSLiran Alon return env->features[FEAT_1_ECX] & CPUID_EXT_VMX; 187818ab37baSLiran Alon } 187918ab37baSLiran Alon 188079a197abSLiran Alon /* 188179a197abSLiran Alon * In order for a vCPU to enter VMX operation it must have CR4.VMXE set. 188279a197abSLiran Alon * Since it was set, CR4.VMXE must remain set as long as vCPU is in 188379a197abSLiran Alon * VMX operation. This is because CR4.VMXE is one of the bits set 188479a197abSLiran Alon * in MSR_IA32_VMX_CR4_FIXED1. 188579a197abSLiran Alon * 188679a197abSLiran Alon * There is one exception to above statement when vCPU enters SMM mode. 188779a197abSLiran Alon * When a vCPU enters SMM mode, it temporarily exit VMX operation and 188879a197abSLiran Alon * may also reset CR4.VMXE during execution in SMM mode. 188979a197abSLiran Alon * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation 189079a197abSLiran Alon * and CR4.VMXE is restored to it's original value of being set. 189179a197abSLiran Alon * 189279a197abSLiran Alon * Therefore, when vCPU is not in SMM mode, we can infer whether 189379a197abSLiran Alon * VMX is being used by examining CR4.VMXE. Otherwise, we cannot 189479a197abSLiran Alon * know for certain. 189579a197abSLiran Alon */ 189679a197abSLiran Alon static inline bool cpu_vmx_maybe_enabled(CPUX86State *env) 189779a197abSLiran Alon { 189879a197abSLiran Alon return cpu_has_vmx(env) && 189979a197abSLiran Alon ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK)); 190079a197abSLiran Alon } 190179a197abSLiran Alon 1902fcf5ef2aSThomas Huth /* fpu_helper.c */ 19031d8ad165SYang Zhong void update_fp_status(CPUX86State *env); 19041d8ad165SYang Zhong void update_mxcsr_status(CPUX86State *env); 19051d8ad165SYang Zhong 19061d8ad165SYang Zhong static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr) 19071d8ad165SYang Zhong { 19081d8ad165SYang Zhong env->mxcsr = mxcsr; 19091d8ad165SYang Zhong if (tcg_enabled()) { 19101d8ad165SYang Zhong update_mxcsr_status(env); 19111d8ad165SYang Zhong } 19121d8ad165SYang Zhong } 19131d8ad165SYang Zhong 19141d8ad165SYang Zhong static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc) 19151d8ad165SYang Zhong { 19161d8ad165SYang Zhong env->fpuc = fpuc; 19171d8ad165SYang Zhong if (tcg_enabled()) { 19181d8ad165SYang Zhong update_fp_status(env); 19191d8ad165SYang Zhong } 19201d8ad165SYang Zhong } 1921fcf5ef2aSThomas Huth 1922fcf5ef2aSThomas Huth /* mem_helper.c */ 1923fcf5ef2aSThomas Huth void helper_lock_init(void); 1924fcf5ef2aSThomas Huth 1925fcf5ef2aSThomas Huth /* svm_helper.c */ 1926fcf5ef2aSThomas Huth void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, 192765c9d60aSPaolo Bonzini uint64_t param, uintptr_t retaddr); 192850b3de6eSJan Kiszka void QEMU_NORETURN cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, 192950b3de6eSJan Kiszka uint64_t exit_info_1, uintptr_t retaddr); 193010cde894SPaolo Bonzini void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1); 1931fcf5ef2aSThomas Huth 1932fcf5ef2aSThomas Huth /* seg_helper.c */ 1933fcf5ef2aSThomas Huth void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw); 1934fcf5ef2aSThomas Huth 1935fcf5ef2aSThomas Huth /* smm_helper.c */ 1936fcf5ef2aSThomas Huth void do_smm_enter(X86CPU *cpu); 1937fcf5ef2aSThomas Huth 1938fcf5ef2aSThomas Huth /* apic.c */ 1939fcf5ef2aSThomas Huth void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); 1940fcf5ef2aSThomas Huth void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip, 1941fcf5ef2aSThomas Huth TPRAccess access); 1942fcf5ef2aSThomas Huth 1943fcf5ef2aSThomas Huth 1944fcf5ef2aSThomas Huth /* Change the value of a KVM-specific default 1945fcf5ef2aSThomas Huth * 1946fcf5ef2aSThomas Huth * If value is NULL, no default will be set and the original 1947fcf5ef2aSThomas Huth * value from the CPU model table will be kept. 1948fcf5ef2aSThomas Huth * 1949fcf5ef2aSThomas Huth * It is valid to call this function only for properties that 1950fcf5ef2aSThomas Huth * are already present in the kvm_default_props table. 1951fcf5ef2aSThomas Huth */ 1952fcf5ef2aSThomas Huth void x86_cpu_change_kvm_default(const char *prop, const char *value); 1953fcf5ef2aSThomas Huth 1954dcafd1efSEduardo Habkost /* Special values for X86CPUVersion: */ 1955dcafd1efSEduardo Habkost 1956dcafd1efSEduardo Habkost /* Resolve to latest CPU version */ 1957dcafd1efSEduardo Habkost #define CPU_VERSION_LATEST -1 1958dcafd1efSEduardo Habkost 19590788a56bSEduardo Habkost /* 19600788a56bSEduardo Habkost * Resolve to version defined by current machine type. 19610788a56bSEduardo Habkost * See x86_cpu_set_default_version() 19620788a56bSEduardo Habkost */ 19630788a56bSEduardo Habkost #define CPU_VERSION_AUTO -2 19640788a56bSEduardo Habkost 1965dcafd1efSEduardo Habkost /* Don't resolve to any versioned CPU models, like old QEMU versions */ 1966dcafd1efSEduardo Habkost #define CPU_VERSION_LEGACY 0 1967dcafd1efSEduardo Habkost 1968dcafd1efSEduardo Habkost typedef int X86CPUVersion; 1969dcafd1efSEduardo Habkost 19700788a56bSEduardo Habkost /* 19710788a56bSEduardo Habkost * Set default CPU model version for CPU models having 19720788a56bSEduardo Habkost * version == CPU_VERSION_AUTO. 19730788a56bSEduardo Habkost */ 19740788a56bSEduardo Habkost void x86_cpu_set_default_version(X86CPUVersion version); 19750788a56bSEduardo Habkost 1976fcf5ef2aSThomas Huth /* Return name of 32-bit register, from a R_* constant */ 1977fcf5ef2aSThomas Huth const char *get_register_name_32(unsigned int reg); 1978fcf5ef2aSThomas Huth 1979fcf5ef2aSThomas Huth void enable_compat_apic_id_mode(void); 1980fcf5ef2aSThomas Huth 1981fcf5ef2aSThomas Huth #define APIC_DEFAULT_ADDRESS 0xfee00000 1982fcf5ef2aSThomas Huth #define APIC_SPACE_SIZE 0x100000 1983fcf5ef2aSThomas Huth 1984d3fd9e4bSMarkus Armbruster void x86_cpu_dump_local_apic_state(CPUState *cs, int flags); 1985fcf5ef2aSThomas Huth 1986fcf5ef2aSThomas Huth /* cpu.c */ 1987fcf5ef2aSThomas Huth bool cpu_is_bsp(X86CPU *cpu); 1988fcf5ef2aSThomas Huth 198986a57621SSergio Andres Gomez Del Real void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf); 199086a57621SSergio Andres Gomez Del Real void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf); 199135b1b927STao Wu void x86_update_hflags(CPUX86State* env); 199235b1b927STao Wu 19932d384d7cSVitaly Kuznetsov static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat) 19942d384d7cSVitaly Kuznetsov { 19952d384d7cSVitaly Kuznetsov return !!(cpu->hyperv_features & BIT(feat)); 19962d384d7cSVitaly Kuznetsov } 19972d384d7cSVitaly Kuznetsov 1998fcf5ef2aSThomas Huth #endif /* I386_CPU_H */ 1999