1fcf5ef2aSThomas Huth /*
2fcf5ef2aSThomas Huth * i386 virtual CPU header
3fcf5ef2aSThomas Huth *
4fcf5ef2aSThomas Huth * Copyright (c) 2003 Fabrice Bellard
5fcf5ef2aSThomas Huth *
6fcf5ef2aSThomas Huth * This library is free software; you can redistribute it and/or
7fcf5ef2aSThomas Huth * modify it under the terms of the GNU Lesser General Public
8fcf5ef2aSThomas Huth * License as published by the Free Software Foundation; either
9d9ff33adSChetan Pant * version 2.1 of the License, or (at your option) any later version.
10fcf5ef2aSThomas Huth *
11fcf5ef2aSThomas Huth * This library is distributed in the hope that it will be useful,
12fcf5ef2aSThomas Huth * but WITHOUT ANY WARRANTY; without even the implied warranty of
13fcf5ef2aSThomas Huth * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14fcf5ef2aSThomas Huth * Lesser General Public License for more details.
15fcf5ef2aSThomas Huth *
16fcf5ef2aSThomas Huth * You should have received a copy of the GNU Lesser General Public
17fcf5ef2aSThomas Huth * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18fcf5ef2aSThomas Huth */
19fcf5ef2aSThomas Huth
20fcf5ef2aSThomas Huth #ifndef I386_CPU_H
21fcf5ef2aSThomas Huth #define I386_CPU_H
22fcf5ef2aSThomas Huth
2314a48c1dSMarkus Armbruster #include "sysemu/tcg.h"
24fcf5ef2aSThomas Huth #include "cpu-qom.h"
25a9dc68d9SClaudio Fontana #include "kvm/hyperv-proto.h"
26c97d6d2cSSergio Andres Gomez Del Real #include "exec/cpu-defs.h"
276ddeb0ecSZhao Liu #include "exec/memop.h"
2830d6ff66SVitaly Kuznetsov #include "hw/i386/topology.h"
2969242e7eSMarc-André Lureau #include "qapi/qapi-types-common.h"
30b746a779SJoao Martins #include "qemu/cpu-float.h"
31c97d6d2cSSergio Andres Gomez Del Real #include "qemu/timer.h"
32c723d4c1SDavid Woodhouse
33c723d4c1SDavid Woodhouse #define XEN_NR_VIRQS 24
34e24fd076SDongjiu Geng
35e24fd076SDongjiu Geng #define KVM_HAVE_MCE_INJECTION 1
36fcf5ef2aSThomas Huth
37fcf5ef2aSThomas Huth /* support for self modifying code even if the modified instruction is
38fcf5ef2aSThomas Huth close to the modifying instruction */
39fcf5ef2aSThomas Huth #define TARGET_HAS_PRECISE_SMC
40fcf5ef2aSThomas Huth
41fcf5ef2aSThomas Huth #ifdef TARGET_X86_64
42fcf5ef2aSThomas Huth #define I386_ELF_MACHINE EM_X86_64
43fcf5ef2aSThomas Huth #define ELF_MACHINE_UNAME "x86_64"
44fcf5ef2aSThomas Huth #else
45fcf5ef2aSThomas Huth #define I386_ELF_MACHINE EM_386
46fcf5ef2aSThomas Huth #define ELF_MACHINE_UNAME "i686"
47fcf5ef2aSThomas Huth #endif
486701d81dSPaolo Bonzini
496701d81dSPaolo Bonzini enum {
506701d81dSPaolo Bonzini R_EAX = 0,
516701d81dSPaolo Bonzini R_ECX = 1,
526701d81dSPaolo Bonzini R_EDX = 2,
536701d81dSPaolo Bonzini R_EBX = 3,
546701d81dSPaolo Bonzini R_ESP = 4,
556701d81dSPaolo Bonzini R_EBP = 5,
566701d81dSPaolo Bonzini R_ESI = 6,
576701d81dSPaolo Bonzini R_EDI = 7,
586701d81dSPaolo Bonzini R_R8 = 8,
596701d81dSPaolo Bonzini R_R9 = 9,
606701d81dSPaolo Bonzini R_R10 = 10,
616701d81dSPaolo Bonzini R_R11 = 11,
626701d81dSPaolo Bonzini R_R12 = 12,
636701d81dSPaolo Bonzini R_R13 = 13,
646701d81dSPaolo Bonzini R_R14 = 14,
65fcf5ef2aSThomas Huth R_R15 = 15,
666701d81dSPaolo Bonzini
676701d81dSPaolo Bonzini R_AL = 0,
686701d81dSPaolo Bonzini R_CL = 1,
696701d81dSPaolo Bonzini R_DL = 2,
706701d81dSPaolo Bonzini R_BL = 3,
716701d81dSPaolo Bonzini R_AH = 4,
726701d81dSPaolo Bonzini R_CH = 5,
736701d81dSPaolo Bonzini R_DH = 6,
746701d81dSPaolo Bonzini R_BH = 7,
75fcf5ef2aSThomas Huth };
766701d81dSPaolo Bonzini
776701d81dSPaolo Bonzini typedef enum X86Seg {
786701d81dSPaolo Bonzini R_ES = 0,
796701d81dSPaolo Bonzini R_CS = 1,
806701d81dSPaolo Bonzini R_SS = 2,
816701d81dSPaolo Bonzini R_DS = 3,
826701d81dSPaolo Bonzini R_FS = 4,
836701d81dSPaolo Bonzini R_GS = 5,
846701d81dSPaolo Bonzini R_LDTR = 6,
856701d81dSPaolo Bonzini R_TR = 7,
86fcf5ef2aSThomas Huth } X86Seg;
87fcf5ef2aSThomas Huth
88c97d6d2cSSergio Andres Gomez Del Real /* segment descriptor fields */
89c97d6d2cSSergio Andres Gomez Del Real #define DESC_G_SHIFT 23
90fcf5ef2aSThomas Huth #define DESC_G_MASK (1 << DESC_G_SHIFT)
91fcf5ef2aSThomas Huth #define DESC_B_SHIFT 22
92fcf5ef2aSThomas Huth #define DESC_B_MASK (1 << DESC_B_SHIFT)
93fcf5ef2aSThomas Huth #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
94c97d6d2cSSergio Andres Gomez Del Real #define DESC_L_MASK (1 << DESC_L_SHIFT)
95c97d6d2cSSergio Andres Gomez Del Real #define DESC_AVL_SHIFT 20
96c97d6d2cSSergio Andres Gomez Del Real #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT)
97c97d6d2cSSergio Andres Gomez Del Real #define DESC_P_SHIFT 15
98fcf5ef2aSThomas Huth #define DESC_P_MASK (1 << DESC_P_SHIFT)
99fcf5ef2aSThomas Huth #define DESC_DPL_SHIFT 13
100c97d6d2cSSergio Andres Gomez Del Real #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
101c97d6d2cSSergio Andres Gomez Del Real #define DESC_S_SHIFT 12
102fcf5ef2aSThomas Huth #define DESC_S_MASK (1 << DESC_S_SHIFT)
103fcf5ef2aSThomas Huth #define DESC_TYPE_SHIFT 8
104fcf5ef2aSThomas Huth #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
105fcf5ef2aSThomas Huth #define DESC_A_MASK (1 << 8)
106fcf5ef2aSThomas Huth
107fcf5ef2aSThomas Huth #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
108fcf5ef2aSThomas Huth #define DESC_C_MASK (1 << 10) /* code: conforming */
109fcf5ef2aSThomas Huth #define DESC_R_MASK (1 << 9) /* code: readable */
110fcf5ef2aSThomas Huth
111fcf5ef2aSThomas Huth #define DESC_E_MASK (1 << 10) /* data: expansion direction */
112fcf5ef2aSThomas Huth #define DESC_W_MASK (1 << 9) /* data: writable */
113fcf5ef2aSThomas Huth
114fcf5ef2aSThomas Huth #define DESC_TSS_BUSY_MASK (1 << 9)
115fcf5ef2aSThomas Huth
116fcf5ef2aSThomas Huth /* eflags masks */
117fcf5ef2aSThomas Huth #define CC_C 0x0001
118fcf5ef2aSThomas Huth #define CC_P 0x0004
119fcf5ef2aSThomas Huth #define CC_A 0x0010
120fcf5ef2aSThomas Huth #define CC_Z 0x0040
121fcf5ef2aSThomas Huth #define CC_S 0x0080
122fcf5ef2aSThomas Huth #define CC_O 0x0800
123fcf5ef2aSThomas Huth
124fcf5ef2aSThomas Huth #define TF_SHIFT 8
125fcf5ef2aSThomas Huth #define IOPL_SHIFT 12
126fcf5ef2aSThomas Huth #define VM_SHIFT 17
127fcf5ef2aSThomas Huth
128fcf5ef2aSThomas Huth #define TF_MASK 0x00000100
129fcf5ef2aSThomas Huth #define IF_MASK 0x00000200
130fcf5ef2aSThomas Huth #define DF_MASK 0x00000400
131fcf5ef2aSThomas Huth #define IOPL_MASK 0x00003000
132fcf5ef2aSThomas Huth #define NT_MASK 0x00004000
133fcf5ef2aSThomas Huth #define RF_MASK 0x00010000
134fcf5ef2aSThomas Huth #define VM_MASK 0x00020000
135fcf5ef2aSThomas Huth #define AC_MASK 0x00040000
136fcf5ef2aSThomas Huth #define VIF_MASK 0x00080000
137fcf5ef2aSThomas Huth #define VIP_MASK 0x00100000
138fcf5ef2aSThomas Huth #define ID_MASK 0x00200000
139fcf5ef2aSThomas Huth
140fcf5ef2aSThomas Huth /* hidden flags - used internally by qemu to represent additional cpu
141fcf5ef2aSThomas Huth states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
142fcf5ef2aSThomas Huth avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
143fcf5ef2aSThomas Huth positions to ease oring with eflags. */
144fcf5ef2aSThomas Huth /* current cpl */
145fcf5ef2aSThomas Huth #define HF_CPL_SHIFT 0
146fcf5ef2aSThomas Huth /* true if hardware interrupts must be disabled for next instruction */
147fcf5ef2aSThomas Huth #define HF_INHIBIT_IRQ_SHIFT 3
148fcf5ef2aSThomas Huth /* 16 or 32 segments */
149fcf5ef2aSThomas Huth #define HF_CS32_SHIFT 4
150fcf5ef2aSThomas Huth #define HF_SS32_SHIFT 5
151fcf5ef2aSThomas Huth /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
152fcf5ef2aSThomas Huth #define HF_ADDSEG_SHIFT 6
153fcf5ef2aSThomas Huth /* copy of CR0.PE (protected mode) */
154fcf5ef2aSThomas Huth #define HF_PE_SHIFT 7
155fcf5ef2aSThomas Huth #define HF_TF_SHIFT 8 /* must be same as eflags */
156fcf5ef2aSThomas Huth #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
157fcf5ef2aSThomas Huth #define HF_EM_SHIFT 10
158fcf5ef2aSThomas Huth #define HF_TS_SHIFT 11
159fcf5ef2aSThomas Huth #define HF_IOPL_SHIFT 12 /* must be same as eflags */
160fcf5ef2aSThomas Huth #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
161fcf5ef2aSThomas Huth #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
162fcf5ef2aSThomas Huth #define HF_RF_SHIFT 16 /* must be same as eflags */
163fcf5ef2aSThomas Huth #define HF_VM_SHIFT 17 /* must be same as eflags */
164fcf5ef2aSThomas Huth #define HF_AC_SHIFT 18 /* must be same as eflags */
165fcf5ef2aSThomas Huth #define HF_SMM_SHIFT 19 /* CPU in SMM mode */
166f8dc4c64SPaolo Bonzini #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
167fcf5ef2aSThomas Huth #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */
168fcf5ef2aSThomas Huth #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
169fcf5ef2aSThomas Huth #define HF_SMAP_SHIFT 23 /* CR4.SMAP */
170fcf5ef2aSThomas Huth #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */
171fcf5ef2aSThomas Huth #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
172637f1ee3SGareth Webb #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
173608db8dbSPaul Brook #define HF_UMIP_SHIFT 27 /* CR4.UMIP */
174fcf5ef2aSThomas Huth #define HF_AVX_EN_SHIFT 28 /* AVX Enabled (CR4+XCR0) */
175fcf5ef2aSThomas Huth
176fcf5ef2aSThomas Huth #define HF_CPL_MASK (3 << HF_CPL_SHIFT)
177fcf5ef2aSThomas Huth #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
178fcf5ef2aSThomas Huth #define HF_CS32_MASK (1 << HF_CS32_SHIFT)
179fcf5ef2aSThomas Huth #define HF_SS32_MASK (1 << HF_SS32_SHIFT)
180fcf5ef2aSThomas Huth #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
181fcf5ef2aSThomas Huth #define HF_PE_MASK (1 << HF_PE_SHIFT)
182fcf5ef2aSThomas Huth #define HF_TF_MASK (1 << HF_TF_SHIFT)
183fcf5ef2aSThomas Huth #define HF_MP_MASK (1 << HF_MP_SHIFT)
184fcf5ef2aSThomas Huth #define HF_EM_MASK (1 << HF_EM_SHIFT)
185fcf5ef2aSThomas Huth #define HF_TS_MASK (1 << HF_TS_SHIFT)
186fcf5ef2aSThomas Huth #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
187fcf5ef2aSThomas Huth #define HF_LMA_MASK (1 << HF_LMA_SHIFT)
188fcf5ef2aSThomas Huth #define HF_CS64_MASK (1 << HF_CS64_SHIFT)
189fcf5ef2aSThomas Huth #define HF_RF_MASK (1 << HF_RF_SHIFT)
190fcf5ef2aSThomas Huth #define HF_VM_MASK (1 << HF_VM_SHIFT)
191fcf5ef2aSThomas Huth #define HF_AC_MASK (1 << HF_AC_SHIFT)
192fcf5ef2aSThomas Huth #define HF_SMM_MASK (1 << HF_SMM_SHIFT)
193f8dc4c64SPaolo Bonzini #define HF_SVME_MASK (1 << HF_SVME_SHIFT)
194fcf5ef2aSThomas Huth #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT)
195fcf5ef2aSThomas Huth #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
196fcf5ef2aSThomas Huth #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
197fcf5ef2aSThomas Huth #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
198fcf5ef2aSThomas Huth #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
199637f1ee3SGareth Webb #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
200608db8dbSPaul Brook #define HF_UMIP_MASK (1 << HF_UMIP_SHIFT)
201fcf5ef2aSThomas Huth #define HF_AVX_EN_MASK (1 << HF_AVX_EN_SHIFT)
202fcf5ef2aSThomas Huth
203fcf5ef2aSThomas Huth /* hflags2 */
204fcf5ef2aSThomas Huth
205fcf5ef2aSThomas Huth #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
206fcf5ef2aSThomas Huth #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
207fcf5ef2aSThomas Huth #define HF2_NMI_SHIFT 2 /* CPU serving NMI */
208fcf5ef2aSThomas Huth #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
209fcf5ef2aSThomas Huth #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
210fe441054SJan Kiszka #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */
211bf13bfabSPaolo Bonzini #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */
212b67e2796SLara Lazier #define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */
213fcf5ef2aSThomas Huth #define HF2_VGIF_SHIFT 8 /* Can take VIRQ*/
214fcf5ef2aSThomas Huth
215fcf5ef2aSThomas Huth #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
216fcf5ef2aSThomas Huth #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
217fcf5ef2aSThomas Huth #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
218fcf5ef2aSThomas Huth #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
219fcf5ef2aSThomas Huth #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
220fe441054SJan Kiszka #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
221bf13bfabSPaolo Bonzini #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT)
222b67e2796SLara Lazier #define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT)
223fcf5ef2aSThomas Huth #define HF2_VGIF_MASK (1 << HF2_VGIF_SHIFT)
224fcf5ef2aSThomas Huth
225fcf5ef2aSThomas Huth #define CR0_PE_SHIFT 0
226fcf5ef2aSThomas Huth #define CR0_MP_SHIFT 1
227fcf5ef2aSThomas Huth
228fcf5ef2aSThomas Huth #define CR0_PE_MASK (1U << 0)
229fcf5ef2aSThomas Huth #define CR0_MP_MASK (1U << 1)
230fcf5ef2aSThomas Huth #define CR0_EM_MASK (1U << 2)
231fcf5ef2aSThomas Huth #define CR0_TS_MASK (1U << 3)
232fcf5ef2aSThomas Huth #define CR0_ET_MASK (1U << 4)
233fcf5ef2aSThomas Huth #define CR0_NE_MASK (1U << 5)
234fcf5ef2aSThomas Huth #define CR0_WP_MASK (1U << 16)
235498df2a7SLara Lazier #define CR0_AM_MASK (1U << 18)
236498df2a7SLara Lazier #define CR0_NW_MASK (1U << 29)
237fcf5ef2aSThomas Huth #define CR0_CD_MASK (1U << 30)
238fcf5ef2aSThomas Huth #define CR0_PG_MASK (1U << 31)
239fcf5ef2aSThomas Huth
240fcf5ef2aSThomas Huth #define CR4_VME_MASK (1U << 0)
241fcf5ef2aSThomas Huth #define CR4_PVI_MASK (1U << 1)
242fcf5ef2aSThomas Huth #define CR4_TSD_MASK (1U << 2)
243fcf5ef2aSThomas Huth #define CR4_DE_MASK (1U << 3)
244fcf5ef2aSThomas Huth #define CR4_PSE_MASK (1U << 4)
245fcf5ef2aSThomas Huth #define CR4_PAE_MASK (1U << 5)
246fcf5ef2aSThomas Huth #define CR4_MCE_MASK (1U << 6)
247fcf5ef2aSThomas Huth #define CR4_PGE_MASK (1U << 7)
248fcf5ef2aSThomas Huth #define CR4_PCE_MASK (1U << 8)
249fcf5ef2aSThomas Huth #define CR4_OSFXSR_SHIFT 9
250fcf5ef2aSThomas Huth #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
251213ff024SLara Lazier #define CR4_OSXMMEXCPT_MASK (1U << 10)
2526c7c3c21SKirill A. Shutemov #define CR4_UMIP_MASK (1U << 11)
253fcf5ef2aSThomas Huth #define CR4_LA57_MASK (1U << 12)
254fcf5ef2aSThomas Huth #define CR4_VMXE_MASK (1U << 13)
255fcf5ef2aSThomas Huth #define CR4_SMXE_MASK (1U << 14)
256fcf5ef2aSThomas Huth #define CR4_FSGSBASE_MASK (1U << 16)
257fcf5ef2aSThomas Huth #define CR4_PCIDE_MASK (1U << 17)
258fcf5ef2aSThomas Huth #define CR4_OSXSAVE_MASK (1U << 18)
259fcf5ef2aSThomas Huth #define CR4_SMEP_MASK (1U << 20)
260fcf5ef2aSThomas Huth #define CR4_SMAP_MASK (1U << 21)
261e7e7bdabSPaolo Bonzini #define CR4_PKE_MASK (1U << 22)
26201170671SBinbin Wu #define CR4_PKS_MASK (1U << 24)
263fcf5ef2aSThomas Huth #define CR4_LAM_SUP_MASK (1U << 28)
264f88ddc40SXin Li
265f88ddc40SXin Li #ifdef TARGET_X86_64
266f88ddc40SXin Li #define CR4_FRED_MASK (1ULL << 32)
267f88ddc40SXin Li #else
268f88ddc40SXin Li #define CR4_FRED_MASK 0
269f88ddc40SXin Li #endif
270f88ddc40SXin Li
271f88ddc40SXin Li #define CR4_RESERVED_MASK \
272f88ddc40SXin Li (~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \
273f88ddc40SXin Li | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \
274f88ddc40SXin Li | CR4_MCE_MASK | CR4_PGE_MASK | CR4_PCE_MASK \
275f88ddc40SXin Li | CR4_OSFXSR_MASK | CR4_OSXMMEXCPT_MASK | CR4_UMIP_MASK \
276213ff024SLara Lazier | CR4_LA57_MASK \
277213ff024SLara Lazier | CR4_FSGSBASE_MASK | CR4_PCIDE_MASK | CR4_OSXSAVE_MASK \
278213ff024SLara Lazier | CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_PKE_MASK | CR4_PKS_MASK \
279213ff024SLara Lazier | CR4_LAM_SUP_MASK | CR4_FRED_MASK))
280213ff024SLara Lazier
28169e3895fSDaniel P. Berrangé #define DR6_BD (1 << 13)
282213ff024SLara Lazier #define DR6_BS (1 << 14)
28301170671SBinbin Wu #define DR6_BT (1 << 15)
284f88ddc40SXin Li #define DR6_FIXED_1 0xffff0ff0
285213ff024SLara Lazier
286fcf5ef2aSThomas Huth #define DR7_GD (1 << 13)
287fcf5ef2aSThomas Huth #define DR7_TYPE_SHIFT 16
288fcf5ef2aSThomas Huth #define DR7_LEN_SHIFT 18
289fcf5ef2aSThomas Huth #define DR7_FIXED_1 0x00000400
290fcf5ef2aSThomas Huth #define DR7_GLOBAL_BP_MASK 0xaa
291fcf5ef2aSThomas Huth #define DR7_LOCAL_BP_MASK 0x55
292fcf5ef2aSThomas Huth #define DR7_MAX_BP 4
293fcf5ef2aSThomas Huth #define DR7_TYPE_BP_INST 0x0
294fcf5ef2aSThomas Huth #define DR7_TYPE_DATA_WR 0x1
295fcf5ef2aSThomas Huth #define DR7_TYPE_IO_RW 0x2
296fcf5ef2aSThomas Huth #define DR7_TYPE_DATA_RW 0x3
297fcf5ef2aSThomas Huth
298fcf5ef2aSThomas Huth #define DR_RESERVED_MASK 0xffffffff00000000ULL
299fcf5ef2aSThomas Huth
300fcf5ef2aSThomas Huth #define PG_PRESENT_BIT 0
301fcf5ef2aSThomas Huth #define PG_RW_BIT 1
302fcf5ef2aSThomas Huth #define PG_USER_BIT 2
303533883fdSPaolo Bonzini #define PG_PWT_BIT 3
304533883fdSPaolo Bonzini #define PG_PCD_BIT 4
305fcf5ef2aSThomas Huth #define PG_ACCESSED_BIT 5
306fcf5ef2aSThomas Huth #define PG_DIRTY_BIT 6
307fcf5ef2aSThomas Huth #define PG_PSE_BIT 7
308fcf5ef2aSThomas Huth #define PG_GLOBAL_BIT 8
309fcf5ef2aSThomas Huth #define PG_PSE_PAT_BIT 12
310fcf5ef2aSThomas Huth #define PG_PKRU_BIT 59
311fcf5ef2aSThomas Huth #define PG_NX_BIT 63
312fcf5ef2aSThomas Huth
313fcf5ef2aSThomas Huth #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
314fcf5ef2aSThomas Huth #define PG_RW_MASK (1 << PG_RW_BIT)
315fcf5ef2aSThomas Huth #define PG_USER_MASK (1 << PG_USER_BIT)
316fcf5ef2aSThomas Huth #define PG_PWT_MASK (1 << PG_PWT_BIT)
317fcf5ef2aSThomas Huth #define PG_PCD_MASK (1 << PG_PCD_BIT)
318fcf5ef2aSThomas Huth #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
319fcf5ef2aSThomas Huth #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
320fcf5ef2aSThomas Huth #define PG_PSE_MASK (1 << PG_PSE_BIT)
321fcf5ef2aSThomas Huth #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
322fcf5ef2aSThomas Huth #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT)
323fcf5ef2aSThomas Huth #define PG_ADDRESS_MASK 0x000ffffffffff000LL
324fcf5ef2aSThomas Huth #define PG_HI_USER_MASK 0x7ff0000000000000LL
325fcf5ef2aSThomas Huth #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT)
326fcf5ef2aSThomas Huth #define PG_NX_MASK (1ULL << PG_NX_BIT)
327fcf5ef2aSThomas Huth
328fcf5ef2aSThomas Huth #define PG_ERROR_W_BIT 1
329fcf5ef2aSThomas Huth
330fcf5ef2aSThomas Huth #define PG_ERROR_P_MASK 0x01
331fcf5ef2aSThomas Huth #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
332fcf5ef2aSThomas Huth #define PG_ERROR_U_MASK 0x04
333fcf5ef2aSThomas Huth #define PG_ERROR_RSVD_MASK 0x08
334fcf5ef2aSThomas Huth #define PG_ERROR_I_D_MASK 0x10
335fcf5ef2aSThomas Huth #define PG_ERROR_PK_MASK 0x20
336fcf5ef2aSThomas Huth
337fcf5ef2aSThomas Huth #define PG_MODE_PAE (1 << 0)
338fcf5ef2aSThomas Huth #define PG_MODE_LMA (1 << 1)
339fcf5ef2aSThomas Huth #define PG_MODE_NXE (1 << 2)
340fcf5ef2aSThomas Huth #define PG_MODE_PSE (1 << 3)
341fcf5ef2aSThomas Huth #define PG_MODE_LA57 (1 << 4)
342616a89eaSPaolo Bonzini #define PG_MODE_SVM_MASK MAKE_64BIT_MASK(0, 15)
343616a89eaSPaolo Bonzini
344616a89eaSPaolo Bonzini /* Bits of CR4 that do not affect the NPT page format. */
345616a89eaSPaolo Bonzini #define PG_MODE_WP (1 << 16)
34631dd35ebSPaolo Bonzini #define PG_MODE_PKE (1 << 17)
34731dd35ebSPaolo Bonzini #define PG_MODE_PKS (1 << 18)
34831dd35ebSPaolo Bonzini #define PG_MODE_SMEP (1 << 19)
34931dd35ebSPaolo Bonzini #define PG_MODE_PG (1 << 20)
35031dd35ebSPaolo Bonzini
35131dd35ebSPaolo Bonzini #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
35231dd35ebSPaolo Bonzini #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
35331dd35ebSPaolo Bonzini #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */
354*f7ff24a6SAlexander Graf
355616a89eaSPaolo Bonzini #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
356fcf5ef2aSThomas Huth #define MCE_BANKS_DEF 10
357fcf5ef2aSThomas Huth
358fcf5ef2aSThomas Huth #define MCG_CAP_BANKS_MASK 0xff
359fcf5ef2aSThomas Huth
360fcf5ef2aSThomas Huth #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
361fcf5ef2aSThomas Huth #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
362fcf5ef2aSThomas Huth #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
363fcf5ef2aSThomas Huth #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */
364fcf5ef2aSThomas Huth
365fcf5ef2aSThomas Huth #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */
366fcf5ef2aSThomas Huth
367fcf5ef2aSThomas Huth #define MCI_STATUS_VAL (1ULL<<63) /* valid error */
368fcf5ef2aSThomas Huth #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
369fcf5ef2aSThomas Huth #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
370fcf5ef2aSThomas Huth #define MCI_STATUS_EN (1ULL<<60) /* error enabled */
371fcf5ef2aSThomas Huth #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
372fcf5ef2aSThomas Huth #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
373fcf5ef2aSThomas Huth #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
374fcf5ef2aSThomas Huth #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
375fcf5ef2aSThomas Huth #define MCI_STATUS_AR (1ULL<<55) /* Action required */
376fcf5ef2aSThomas Huth #define MCI_STATUS_DEFERRED (1ULL<<44) /* Deferred error */
377fcf5ef2aSThomas Huth #define MCI_STATUS_POISON (1ULL<<43) /* Poisoned data consumed */
378fcf5ef2aSThomas Huth
379fcf5ef2aSThomas Huth /* MISC register defines */
380fcf5ef2aSThomas Huth #define MCM_ADDR_SEGOFF 0 /* segment offset */
3814b77512bSJohn Allen #define MCM_ADDR_LINEAR 1 /* linear address */
3824b77512bSJohn Allen #define MCM_ADDR_PHYS 2 /* physical address */
383fcf5ef2aSThomas Huth #define MCM_ADDR_MEM 3 /* memory address */
384fcf5ef2aSThomas Huth #define MCM_ADDR_GENERIC 7 /* generic */
385fcf5ef2aSThomas Huth
386fcf5ef2aSThomas Huth #define MSR_IA32_TSC 0x10
387fcf5ef2aSThomas Huth #define MSR_IA32_APICBASE 0x1b
388fcf5ef2aSThomas Huth #define MSR_IA32_APICBASE_BSP (1<<8)
389fcf5ef2aSThomas Huth #define MSR_IA32_APICBASE_ENABLE (1<<11)
390fcf5ef2aSThomas Huth #define MSR_IA32_APICBASE_EXTD (1 << 10)
391fcf5ef2aSThomas Huth #define MSR_IA32_APICBASE_BASE (0xfffffU<<12)
392fcf5ef2aSThomas Huth #define MSR_IA32_APICBASE_RESERVED \
393fcf5ef2aSThomas Huth (~(uint64_t)(MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE \
394fcf5ef2aSThomas Huth | MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_BASE))
395fcf5ef2aSThomas Huth
396fcf5ef2aSThomas Huth #define MSR_IA32_FEATURE_CONTROL 0x0000003a
397774204cfSBui Quang Minh #define MSR_TSC_ADJUST 0x0000003b
398774204cfSBui Quang Minh #define MSR_IA32_SPEC_CTRL 0x48
399774204cfSBui Quang Minh #define MSR_VIRT_SSBD 0xc001011f
400774204cfSBui Quang Minh #define MSR_IA32_PRED_CMD 0x49
401fcf5ef2aSThomas Huth #define MSR_IA32_UCODE_REV 0x8b
402fcf5ef2aSThomas Huth #define MSR_IA32_CORE_CAPABILITY 0xcf
403a33a2cfeSPaolo Bonzini
404cfeea0c0SKonrad Rzeszutek Wilk #define MSR_IA32_ARCH_CAPABILITIES 0x10a
4058c80c99fSRobert Hoo #define ARCH_CAP_TSX_CTRL_MSR (1<<7)
4064e45aff3SPaolo Bonzini
407597360c0SXiaoyao Li #define MSR_IA32_PERF_CAPABILITIES 0x345
4082a9758c5SPaolo Bonzini #define PERF_CAP_LBR_FMT 0x3f
4098c80c99fSRobert Hoo
4102a9758c5SPaolo Bonzini #define MSR_IA32_TSX_CTRL 0x122
4112a9758c5SPaolo Bonzini #define MSR_IA32_TSCDEADLINE 0x6e0
412ea39f9b6SLike Xu #define MSR_IA32_PKRS 0x6e1
413f06d8a18SYang Weijiang #define MSR_RAPL_POWER_UNIT 0x00000606
414ea39f9b6SLike Xu #define MSR_PKG_POWER_LIMIT 0x00000610
4152a9758c5SPaolo Bonzini #define MSR_PKG_ENERGY_STATUS 0x00000611
416fcf5ef2aSThomas Huth #define MSR_PKG_POWER_INFO 0x00000614
417e7e7bdabSPaolo Bonzini #define MSR_ARCH_LBR_CTL 0x000014ce
4180418f908SAnthony Harivel #define MSR_ARCH_LBR_DEPTH 0x000014cf
4190418f908SAnthony Harivel #define MSR_ARCH_LBR_FROM_0 0x00001500
4200418f908SAnthony Harivel #define MSR_ARCH_LBR_TO_0 0x00001600
4210418f908SAnthony Harivel #define MSR_ARCH_LBR_INFO_0 0x00001200
42212703d4eSYang Weijiang
42312703d4eSYang Weijiang #define FEATURE_CONTROL_LOCKED (1<<0)
42412703d4eSYang Weijiang #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1)
42512703d4eSYang Weijiang #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
42612703d4eSYang Weijiang #define FEATURE_CONTROL_SGX_LC (1ULL << 17)
427fcf5ef2aSThomas Huth #define FEATURE_CONTROL_SGX (1ULL << 18)
428fcf5ef2aSThomas Huth #define FEATURE_CONTROL_LMCE (1<<20)
4295c76b651SSean Christopherson
430fcf5ef2aSThomas Huth #define MSR_IA32_SGXLEPUBKEYHASH0 0x8c
4315c76b651SSean Christopherson #define MSR_IA32_SGXLEPUBKEYHASH1 0x8d
4325c76b651SSean Christopherson #define MSR_IA32_SGXLEPUBKEYHASH2 0x8e
433fcf5ef2aSThomas Huth #define MSR_IA32_SGXLEPUBKEYHASH3 0x8f
434fcf5ef2aSThomas Huth
4355c76b651SSean Christopherson #define MSR_P6_PERFCTR0 0xc1
4365c76b651SSean Christopherson
4375c76b651SSean Christopherson #define MSR_IA32_SMBASE 0x9e
4385c76b651SSean Christopherson #define MSR_SMI_COUNT 0x34
4395c76b651SSean Christopherson #define MSR_CORE_THREAD_COUNT 0x35
440fcf5ef2aSThomas Huth #define MSR_MTRRcap 0xfe
441fcf5ef2aSThomas Huth #define MSR_MTRRcap_VCNT 8
442fcf5ef2aSThomas Huth #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
443e13713dbSLiran Alon #define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
444027ac0cbSVladislav Yaroshchuk
445fcf5ef2aSThomas Huth #define MSR_IA32_SYSENTER_CS 0x174
446fcf5ef2aSThomas Huth #define MSR_IA32_SYSENTER_ESP 0x175
447fcf5ef2aSThomas Huth #define MSR_IA32_SYSENTER_EIP 0x176
448fcf5ef2aSThomas Huth
449fcf5ef2aSThomas Huth #define MSR_MCG_CAP 0x179
450fcf5ef2aSThomas Huth #define MSR_MCG_STATUS 0x17a
451fcf5ef2aSThomas Huth #define MSR_MCG_CTL 0x17b
452fcf5ef2aSThomas Huth #define MSR_MCG_EXT_CTL 0x4d0
453fcf5ef2aSThomas Huth
454fcf5ef2aSThomas Huth #define MSR_P6_EVNTSEL0 0x186
455fcf5ef2aSThomas Huth
456fcf5ef2aSThomas Huth #define MSR_IA32_PERF_STATUS 0x198
457fcf5ef2aSThomas Huth
458fcf5ef2aSThomas Huth #define MSR_IA32_MISC_ENABLE 0x1a0
459fcf5ef2aSThomas Huth /* Indicates good rep/movs microcode on some processors: */
460fcf5ef2aSThomas Huth #define MSR_IA32_MISC_ENABLE_DEFAULT 1
461fcf5ef2aSThomas Huth #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
462fcf5ef2aSThomas Huth
463fcf5ef2aSThomas Huth #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
464fcf5ef2aSThomas Huth #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
465fcf5ef2aSThomas Huth
4664cfd7babSWanpeng Li #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
467fcf5ef2aSThomas Huth
468fcf5ef2aSThomas Huth #define MSR_MTRRfix64K_00000 0x250
469fcf5ef2aSThomas Huth #define MSR_MTRRfix16K_80000 0x258
470fcf5ef2aSThomas Huth #define MSR_MTRRfix16K_A0000 0x259
471fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_C0000 0x268
472fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_C8000 0x269
473fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_D0000 0x26a
474fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_D8000 0x26b
475fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_E0000 0x26c
476fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_E8000 0x26d
477fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_F0000 0x26e
478fcf5ef2aSThomas Huth #define MSR_MTRRfix4K_F8000 0x26f
479fcf5ef2aSThomas Huth
480fcf5ef2aSThomas Huth #define MSR_PAT 0x277
481fcf5ef2aSThomas Huth
482fcf5ef2aSThomas Huth #define MSR_MTRRdefType 0x2ff
483fcf5ef2aSThomas Huth
484fcf5ef2aSThomas Huth #define MSR_CORE_PERF_FIXED_CTR0 0x309
485fcf5ef2aSThomas Huth #define MSR_CORE_PERF_FIXED_CTR1 0x30a
486fcf5ef2aSThomas Huth #define MSR_CORE_PERF_FIXED_CTR2 0x30b
487fcf5ef2aSThomas Huth #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
488fcf5ef2aSThomas Huth #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
489fcf5ef2aSThomas Huth #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
490fcf5ef2aSThomas Huth #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
491fcf5ef2aSThomas Huth
492fcf5ef2aSThomas Huth #define MSR_MC0_CTL 0x400
493fcf5ef2aSThomas Huth #define MSR_MC0_STATUS 0x401
494fcf5ef2aSThomas Huth #define MSR_MC0_ADDR 0x402
495fcf5ef2aSThomas Huth #define MSR_MC0_MISC 0x403
496fcf5ef2aSThomas Huth
497fcf5ef2aSThomas Huth #define MSR_IA32_RTIT_OUTPUT_BASE 0x560
498fcf5ef2aSThomas Huth #define MSR_IA32_RTIT_OUTPUT_MASK 0x561
499fcf5ef2aSThomas Huth #define MSR_IA32_RTIT_CTL 0x570
500fcf5ef2aSThomas Huth #define MSR_IA32_RTIT_STATUS 0x571
501fcf5ef2aSThomas Huth #define MSR_IA32_RTIT_CR3_MATCH 0x572
502b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR0_A 0x580
503b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR0_B 0x581
504b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR1_A 0x582
505b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR1_B 0x583
506b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR2_A 0x584
507b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR2_B 0x585
508b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR3_A 0x586
509b77146e9SChao Peng #define MSR_IA32_RTIT_ADDR3_B 0x587
510b77146e9SChao Peng #define MAX_RTIT_ADDRS 8
511b77146e9SChao Peng
512b77146e9SChao Peng #define MSR_EFER 0xc0000080
513b77146e9SChao Peng
514b77146e9SChao Peng #define MSR_EFER_SCE (1 << 0)
515b77146e9SChao Peng #define MSR_EFER_LME (1 << 8)
516b77146e9SChao Peng #define MSR_EFER_LMA (1 << 10)
517fcf5ef2aSThomas Huth #define MSR_EFER_NXE (1 << 11)
518fcf5ef2aSThomas Huth #define MSR_EFER_SVME (1 << 12)
519fcf5ef2aSThomas Huth #define MSR_EFER_FFXSR (1 << 14)
520fcf5ef2aSThomas Huth
521fcf5ef2aSThomas Huth #define MSR_EFER_RESERVED\
522fcf5ef2aSThomas Huth (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\
523fcf5ef2aSThomas Huth | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\
524fcf5ef2aSThomas Huth | MSR_EFER_FFXSR))
525fcf5ef2aSThomas Huth
526d499f196SLara Lazier #define MSR_STAR 0xc0000081
527d499f196SLara Lazier #define MSR_LSTAR 0xc0000082
528d499f196SLara Lazier #define MSR_CSTAR 0xc0000083
529d499f196SLara Lazier #define MSR_FMASK 0xc0000084
530d499f196SLara Lazier #define MSR_FSBASE 0xc0000100
531fcf5ef2aSThomas Huth #define MSR_GSBASE 0xc0000101
532fcf5ef2aSThomas Huth #define MSR_KERNELGSBASE 0xc0000102
533fcf5ef2aSThomas Huth #define MSR_TSC_AUX 0xc0000103
534fcf5ef2aSThomas Huth #define MSR_AMD64_TSC_RATIO 0xc0000104
535fcf5ef2aSThomas Huth
536fcf5ef2aSThomas Huth #define MSR_AMD64_TSC_RATIO_DEFAULT 0x100000000ULL
537fcf5ef2aSThomas Huth
538fcf5ef2aSThomas Huth #define MSR_K7_HWCR 0xc0010015
539cabf9862SMaxim Levitsky
540cabf9862SMaxim Levitsky #define MSR_VM_HSAVE_PA 0xc0010117
541cabf9862SMaxim Levitsky
542fcf5ef2aSThomas Huth #define MSR_IA32_XFD 0x000001c4
543fcf5ef2aSThomas Huth #define MSR_IA32_XFD_ERR 0x000001c5
544fcf5ef2aSThomas Huth
545cdec2b75SZeng Guang /* FRED MSRs */
546cdec2b75SZeng Guang #define MSR_IA32_FRED_RSP0 0x000001cc /* Stack level 0 regular stack pointer */
547cdec2b75SZeng Guang #define MSR_IA32_FRED_RSP1 0x000001cd /* Stack level 1 regular stack pointer */
5484ebd98ebSXin Li #define MSR_IA32_FRED_RSP2 0x000001ce /* Stack level 2 regular stack pointer */
5494ebd98ebSXin Li #define MSR_IA32_FRED_RSP3 0x000001cf /* Stack level 3 regular stack pointer */
5504ebd98ebSXin Li #define MSR_IA32_FRED_STKLVLS 0x000001d0 /* FRED exception stack levels */
5514ebd98ebSXin Li #define MSR_IA32_FRED_SSP1 0x000001d1 /* Stack level 1 shadow stack pointer in ring 0 */
5524ebd98ebSXin Li #define MSR_IA32_FRED_SSP2 0x000001d2 /* Stack level 2 shadow stack pointer in ring 0 */
5534ebd98ebSXin Li #define MSR_IA32_FRED_SSP3 0x000001d3 /* Stack level 3 shadow stack pointer in ring 0 */
5544ebd98ebSXin Li #define MSR_IA32_FRED_CONFIG 0x000001d4 /* FRED Entrypoint and interrupt stack level */
5554ebd98ebSXin Li
5564ebd98ebSXin Li #define MSR_IA32_BNDCFGS 0x00000d90
5574ebd98ebSXin Li #define MSR_IA32_XSS 0x00000da0
5584ebd98ebSXin Li #define MSR_IA32_UMWAIT_CONTROL 0xe1
559fcf5ef2aSThomas Huth
560fcf5ef2aSThomas Huth #define MSR_IA32_VMX_BASIC 0x00000480
56165087997STao Xu #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
562fcf5ef2aSThomas Huth #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
563704798adSPaolo Bonzini #define MSR_IA32_VMX_EXIT_CTLS 0x00000483
564704798adSPaolo Bonzini #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
565704798adSPaolo Bonzini #define MSR_IA32_VMX_MISC 0x00000485
566704798adSPaolo Bonzini #define MSR_IA32_VMX_CR0_FIXED0 0x00000486
567704798adSPaolo Bonzini #define MSR_IA32_VMX_CR0_FIXED1 0x00000487
568704798adSPaolo Bonzini #define MSR_IA32_VMX_CR4_FIXED0 0x00000488
569704798adSPaolo Bonzini #define MSR_IA32_VMX_CR4_FIXED1 0x00000489
570704798adSPaolo Bonzini #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
571704798adSPaolo Bonzini #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
572704798adSPaolo Bonzini #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
573704798adSPaolo Bonzini #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
574704798adSPaolo Bonzini #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
575704798adSPaolo Bonzini #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
576704798adSPaolo Bonzini #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
577704798adSPaolo Bonzini #define MSR_IA32_VMX_VMFUNC 0x00000491
578704798adSPaolo Bonzini
579704798adSPaolo Bonzini #define MSR_APIC_START 0x00000800
580704798adSPaolo Bonzini #define MSR_APIC_END 0x000008ff
581704798adSPaolo Bonzini
582b2101358SBui Quang Minh #define XSTATE_FP_BIT 0
583b2101358SBui Quang Minh #define XSTATE_SSE_BIT 1
584b2101358SBui Quang Minh #define XSTATE_YMM_BIT 2
585fcf5ef2aSThomas Huth #define XSTATE_BNDREGS_BIT 3
586fcf5ef2aSThomas Huth #define XSTATE_BNDCSR_BIT 4
587fcf5ef2aSThomas Huth #define XSTATE_OPMASK_BIT 5
588fcf5ef2aSThomas Huth #define XSTATE_ZMM_Hi256_BIT 6
589fcf5ef2aSThomas Huth #define XSTATE_Hi16_ZMM_BIT 7
590fcf5ef2aSThomas Huth #define XSTATE_PKRU_BIT 9
591fcf5ef2aSThomas Huth #define XSTATE_ARCH_LBR_BIT 15
592fcf5ef2aSThomas Huth #define XSTATE_XTILE_CFG_BIT 17
593fcf5ef2aSThomas Huth #define XSTATE_XTILE_DATA_BIT 18
59410f0abcbSYang Weijiang
5951f16764fSJing Liu #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT)
5961f16764fSJing Liu #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT)
597fcf5ef2aSThomas Huth #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT)
598fcf5ef2aSThomas Huth #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT)
599fcf5ef2aSThomas Huth #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT)
600fcf5ef2aSThomas Huth #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
601fcf5ef2aSThomas Huth #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
602fcf5ef2aSThomas Huth #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
603fcf5ef2aSThomas Huth #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
604fcf5ef2aSThomas Huth #define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT)
605fcf5ef2aSThomas Huth #define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
606fcf5ef2aSThomas Huth #define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT)
60710f0abcbSYang Weijiang
60819db68caSYang Zhong #define XSTATE_DYNAMIC_MASK (XSTATE_XTILE_DATA_MASK)
60919db68caSYang Zhong
61019db68caSYang Zhong #define ESA_FEATURE_ALIGN64_BIT 1
61119db68caSYang Zhong #define ESA_FEATURE_XFD_BIT 2
612fcf5ef2aSThomas Huth
613131266b7SJing Liu #define ESA_FEATURE_ALIGN64_MASK (1U << ESA_FEATURE_ALIGN64_BIT)
6140f17f6b3SJing Liu #define ESA_FEATURE_XFD_MASK (1U << ESA_FEATURE_XFD_BIT)
615131266b7SJing Liu
616131266b7SJing Liu
6170f17f6b3SJing Liu /* CPUID feature bits available in XCR0 */
618131266b7SJing Liu #define CPUID_XSTATE_XCR0_MASK (XSTATE_FP_MASK | XSTATE_SSE_MASK | \
619131266b7SJing Liu XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | \
620301e9067SYang Weijiang XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK | \
621301e9067SYang Weijiang XSTATE_ZMM_Hi256_MASK | \
622301e9067SYang Weijiang XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \
623301e9067SYang Weijiang XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK)
624301e9067SYang Weijiang
625301e9067SYang Weijiang /* CPUID feature words */
626301e9067SYang Weijiang typedef enum FeatureWord {
627301e9067SYang Weijiang FEAT_1_EDX, /* CPUID[1].EDX */
628fcf5ef2aSThomas Huth FEAT_1_ECX, /* CPUID[1].ECX */
629fcf5ef2aSThomas Huth FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
630fcf5ef2aSThomas Huth FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */
631fcf5ef2aSThomas Huth FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */
632fcf5ef2aSThomas Huth FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */
633fcf5ef2aSThomas Huth FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
634fcf5ef2aSThomas Huth FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
63580db491dSJing Liu FEAT_8000_0007_EBX, /* CPUID[8000_0007].EBX */
636fcf5ef2aSThomas Huth FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
637fcf5ef2aSThomas Huth FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */
6382ba8b7eeSJohn Allen FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */
639fcf5ef2aSThomas Huth FEAT_8000_0021_EBX, /* CPUID[8000_0021].EBX */
6401b3420e1SEduardo Habkost FEAT_8000_0022_EAX, /* CPUID[8000_0022].EAX */
641b70eec31SBabu Moger FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
642fcf5ef2aSThomas Huth FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
643fcf5ef2aSThomas Huth FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */
644be777326SWanpeng Li FEAT_SVM, /* CPUID[8000_000A].EDX */
645fcf5ef2aSThomas Huth FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
646fcf5ef2aSThomas Huth FEAT_6_EAX, /* CPUID[6].EAX */
647fcf5ef2aSThomas Huth FEAT_XSAVE_XCR0_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
648301e9067SYang Weijiang FEAT_XSAVE_XCR0_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
649301e9067SYang Weijiang FEAT_ARCH_CAPABILITIES,
650d86f9636SRobert Hoo FEAT_CORE_CAPABILITY,
651597360c0SXiaoyao Li FEAT_PERF_CAPABILITIES,
652ea39f9b6SLike Xu FEAT_VMX_PROCBASED_CTLS,
65320a78b02SPaolo Bonzini FEAT_VMX_SECONDARY_CTLS,
65420a78b02SPaolo Bonzini FEAT_VMX_PINBASED_CTLS,
65520a78b02SPaolo Bonzini FEAT_VMX_EXIT_CTLS,
65620a78b02SPaolo Bonzini FEAT_VMX_ENTRY_CTLS,
65720a78b02SPaolo Bonzini FEAT_VMX_MISC,
65820a78b02SPaolo Bonzini FEAT_VMX_EPT_VPID_CAPS,
65920a78b02SPaolo Bonzini FEAT_VMX_BASIC,
66020a78b02SPaolo Bonzini FEAT_VMX_VMFUNC,
66120a78b02SPaolo Bonzini FEAT_14_0_ECX,
662d1615ea5SLuwei Kang FEAT_SGX_12_0_EAX, /* CPUID[EAX=0x12,ECX=0].EAX (SGX) */
6634b841a79SSean Christopherson FEAT_SGX_12_0_EBX, /* CPUID[EAX=0x12,ECX=0].EBX (SGX MISCSELECT[31:0]) */
664120ca112SSean Christopherson FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */
665165981a5SSean Christopherson FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */
666301e9067SYang Weijiang FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */
667301e9067SYang Weijiang FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */
668eaaa197dSJiaxi Chen FEAT_7_2_EDX, /* CPUID[EAX=7,ECX=2].EDX */
6699dd8b710STao Su FEAT_24_0_EBX, /* CPUID[EAX=0x24,ECX=0].EBX */
670fcf5ef2aSThomas Huth FEATURE_WORDS,
671fcf5ef2aSThomas Huth } FeatureWord;
672fcf5ef2aSThomas Huth
673ede146c2SPaolo Bonzini typedef uint64_t FeatureWordArray[FEATURE_WORDS];
6748dee3848SPaolo Bonzini uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
675fcf5ef2aSThomas Huth
676fcf5ef2aSThomas Huth /* cpuid_features bits */
677fcf5ef2aSThomas Huth #define CPUID_FP87 (1U << 0)
678fcf5ef2aSThomas Huth #define CPUID_VME (1U << 1)
679fcf5ef2aSThomas Huth #define CPUID_DE (1U << 2)
680fcf5ef2aSThomas Huth #define CPUID_PSE (1U << 3)
681fcf5ef2aSThomas Huth #define CPUID_TSC (1U << 4)
682fcf5ef2aSThomas Huth #define CPUID_MSR (1U << 5)
683fcf5ef2aSThomas Huth #define CPUID_PAE (1U << 6)
684fcf5ef2aSThomas Huth #define CPUID_MCE (1U << 7)
685fcf5ef2aSThomas Huth #define CPUID_CX8 (1U << 8)
686fcf5ef2aSThomas Huth #define CPUID_APIC (1U << 9)
687fcf5ef2aSThomas Huth #define CPUID_SEP (1U << 11) /* sysenter/sysexit */
688fcf5ef2aSThomas Huth #define CPUID_MTRR (1U << 12)
689fcf5ef2aSThomas Huth #define CPUID_PGE (1U << 13)
690fcf5ef2aSThomas Huth #define CPUID_MCA (1U << 14)
691fcf5ef2aSThomas Huth #define CPUID_CMOV (1U << 15)
692fcf5ef2aSThomas Huth #define CPUID_PAT (1U << 16)
693fcf5ef2aSThomas Huth #define CPUID_PSE36 (1U << 17)
694fcf5ef2aSThomas Huth #define CPUID_PN (1U << 18)
695fcf5ef2aSThomas Huth #define CPUID_CLFLUSH (1U << 19)
696fcf5ef2aSThomas Huth #define CPUID_DTS (1U << 21)
697fcf5ef2aSThomas Huth #define CPUID_ACPI (1U << 22)
698fcf5ef2aSThomas Huth #define CPUID_MMX (1U << 23)
699fcf5ef2aSThomas Huth #define CPUID_FXSR (1U << 24)
700fcf5ef2aSThomas Huth #define CPUID_SSE (1U << 25)
701fcf5ef2aSThomas Huth #define CPUID_SSE2 (1U << 26)
702fcf5ef2aSThomas Huth #define CPUID_SS (1U << 27)
703fcf5ef2aSThomas Huth #define CPUID_HT (1U << 28)
704fcf5ef2aSThomas Huth #define CPUID_TM (1U << 29)
705fcf5ef2aSThomas Huth #define CPUID_IA64 (1U << 30)
706fcf5ef2aSThomas Huth #define CPUID_PBE (1U << 31)
707fcf5ef2aSThomas Huth
708fcf5ef2aSThomas Huth #define CPUID_EXT_SSE3 (1U << 0)
709fcf5ef2aSThomas Huth #define CPUID_EXT_PCLMULQDQ (1U << 1)
710fcf5ef2aSThomas Huth #define CPUID_EXT_DTES64 (1U << 2)
711fcf5ef2aSThomas Huth #define CPUID_EXT_MONITOR (1U << 3)
712fcf5ef2aSThomas Huth #define CPUID_EXT_DSCPL (1U << 4)
713fcf5ef2aSThomas Huth #define CPUID_EXT_VMX (1U << 5)
714fcf5ef2aSThomas Huth #define CPUID_EXT_SMX (1U << 6)
715fcf5ef2aSThomas Huth #define CPUID_EXT_EST (1U << 7)
716fcf5ef2aSThomas Huth #define CPUID_EXT_TM2 (1U << 8)
717fcf5ef2aSThomas Huth #define CPUID_EXT_SSSE3 (1U << 9)
718fcf5ef2aSThomas Huth #define CPUID_EXT_CID (1U << 10)
719fcf5ef2aSThomas Huth #define CPUID_EXT_FMA (1U << 12)
720fcf5ef2aSThomas Huth #define CPUID_EXT_CX16 (1U << 13)
721fcf5ef2aSThomas Huth #define CPUID_EXT_XTPR (1U << 14)
722fcf5ef2aSThomas Huth #define CPUID_EXT_PDCM (1U << 15)
723fcf5ef2aSThomas Huth #define CPUID_EXT_PCID (1U << 17)
724fcf5ef2aSThomas Huth #define CPUID_EXT_DCA (1U << 18)
725fcf5ef2aSThomas Huth #define CPUID_EXT_SSE41 (1U << 19)
726fcf5ef2aSThomas Huth #define CPUID_EXT_SSE42 (1U << 20)
727fcf5ef2aSThomas Huth #define CPUID_EXT_X2APIC (1U << 21)
728fcf5ef2aSThomas Huth #define CPUID_EXT_MOVBE (1U << 22)
729fcf5ef2aSThomas Huth #define CPUID_EXT_POPCNT (1U << 23)
730fcf5ef2aSThomas Huth #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24)
731fcf5ef2aSThomas Huth #define CPUID_EXT_AES (1U << 25)
732fcf5ef2aSThomas Huth #define CPUID_EXT_XSAVE (1U << 26)
733fcf5ef2aSThomas Huth #define CPUID_EXT_OSXSAVE (1U << 27)
734fcf5ef2aSThomas Huth #define CPUID_EXT_AVX (1U << 28)
735fcf5ef2aSThomas Huth #define CPUID_EXT_F16C (1U << 29)
736fcf5ef2aSThomas Huth #define CPUID_EXT_RDRAND (1U << 30)
737fcf5ef2aSThomas Huth #define CPUID_EXT_HYPERVISOR (1U << 31)
738fcf5ef2aSThomas Huth
739fcf5ef2aSThomas Huth #define CPUID_EXT2_FPU (1U << 0)
740fcf5ef2aSThomas Huth #define CPUID_EXT2_VME (1U << 1)
741fcf5ef2aSThomas Huth #define CPUID_EXT2_DE (1U << 2)
742fcf5ef2aSThomas Huth #define CPUID_EXT2_PSE (1U << 3)
743fcf5ef2aSThomas Huth #define CPUID_EXT2_TSC (1U << 4)
744fcf5ef2aSThomas Huth #define CPUID_EXT2_MSR (1U << 5)
745fcf5ef2aSThomas Huth #define CPUID_EXT2_PAE (1U << 6)
746fcf5ef2aSThomas Huth #define CPUID_EXT2_MCE (1U << 7)
747fcf5ef2aSThomas Huth #define CPUID_EXT2_CX8 (1U << 8)
748fcf5ef2aSThomas Huth #define CPUID_EXT2_APIC (1U << 9)
749fcf5ef2aSThomas Huth #define CPUID_EXT2_SYSCALL (1U << 11)
750fcf5ef2aSThomas Huth #define CPUID_EXT2_MTRR (1U << 12)
751fcf5ef2aSThomas Huth #define CPUID_EXT2_PGE (1U << 13)
752fcf5ef2aSThomas Huth #define CPUID_EXT2_MCA (1U << 14)
753fcf5ef2aSThomas Huth #define CPUID_EXT2_CMOV (1U << 15)
754fcf5ef2aSThomas Huth #define CPUID_EXT2_PAT (1U << 16)
755fcf5ef2aSThomas Huth #define CPUID_EXT2_PSE36 (1U << 17)
756fcf5ef2aSThomas Huth #define CPUID_EXT2_MP (1U << 19)
757fcf5ef2aSThomas Huth #define CPUID_EXT2_NX (1U << 20)
758fcf5ef2aSThomas Huth #define CPUID_EXT2_MMXEXT (1U << 22)
759fcf5ef2aSThomas Huth #define CPUID_EXT2_MMX (1U << 23)
760fcf5ef2aSThomas Huth #define CPUID_EXT2_FXSR (1U << 24)
761fcf5ef2aSThomas Huth #define CPUID_EXT2_FFXSR (1U << 25)
762fcf5ef2aSThomas Huth #define CPUID_EXT2_PDPE1GB (1U << 26)
763fcf5ef2aSThomas Huth #define CPUID_EXT2_RDTSCP (1U << 27)
764fcf5ef2aSThomas Huth #define CPUID_EXT2_LM (1U << 29)
765fcf5ef2aSThomas Huth #define CPUID_EXT2_3DNOWEXT (1U << 30)
766fcf5ef2aSThomas Huth #define CPUID_EXT2_3DNOW (1U << 31)
767fcf5ef2aSThomas Huth
768bad5cfcdSMichael Tokarev /* CPUID[8000_0001].EDX bits that are aliases of CPUID[1].EDX bits on AMD CPUs */
769fcf5ef2aSThomas Huth #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
770fcf5ef2aSThomas Huth CPUID_EXT2_DE | CPUID_EXT2_PSE | \
771fcf5ef2aSThomas Huth CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
772fcf5ef2aSThomas Huth CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
773fcf5ef2aSThomas Huth CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
774fcf5ef2aSThomas Huth CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
775fcf5ef2aSThomas Huth CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
776fcf5ef2aSThomas Huth CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
777fcf5ef2aSThomas Huth CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
778fcf5ef2aSThomas Huth
779fcf5ef2aSThomas Huth #define CPUID_EXT3_LAHF_LM (1U << 0)
780fcf5ef2aSThomas Huth #define CPUID_EXT3_CMP_LEG (1U << 1)
781fcf5ef2aSThomas Huth #define CPUID_EXT3_SVM (1U << 2)
782fcf5ef2aSThomas Huth #define CPUID_EXT3_EXTAPIC (1U << 3)
783fcf5ef2aSThomas Huth #define CPUID_EXT3_CR8LEG (1U << 4)
784fcf5ef2aSThomas Huth #define CPUID_EXT3_ABM (1U << 5)
785fcf5ef2aSThomas Huth #define CPUID_EXT3_SSE4A (1U << 6)
786fcf5ef2aSThomas Huth #define CPUID_EXT3_MISALIGNSSE (1U << 7)
787fcf5ef2aSThomas Huth #define CPUID_EXT3_3DNOWPREFETCH (1U << 8)
788fcf5ef2aSThomas Huth #define CPUID_EXT3_OSVW (1U << 9)
789fcf5ef2aSThomas Huth #define CPUID_EXT3_IBS (1U << 10)
790fcf5ef2aSThomas Huth #define CPUID_EXT3_XOP (1U << 11)
791fcf5ef2aSThomas Huth #define CPUID_EXT3_SKINIT (1U << 12)
792fcf5ef2aSThomas Huth #define CPUID_EXT3_WDT (1U << 13)
793fcf5ef2aSThomas Huth #define CPUID_EXT3_LWP (1U << 15)
794fcf5ef2aSThomas Huth #define CPUID_EXT3_FMA4 (1U << 16)
795fcf5ef2aSThomas Huth #define CPUID_EXT3_TCE (1U << 17)
796fcf5ef2aSThomas Huth #define CPUID_EXT3_NODEID (1U << 19)
797fcf5ef2aSThomas Huth #define CPUID_EXT3_TBM (1U << 21)
798fcf5ef2aSThomas Huth #define CPUID_EXT3_TOPOEXT (1U << 22)
799fcf5ef2aSThomas Huth #define CPUID_EXT3_PERFCORE (1U << 23)
800fcf5ef2aSThomas Huth #define CPUID_EXT3_PERFNB (1U << 24)
801fcf5ef2aSThomas Huth
802fcf5ef2aSThomas Huth #define CPUID_SVM_NPT (1U << 0)
803fcf5ef2aSThomas Huth #define CPUID_SVM_LBRV (1U << 1)
804fcf5ef2aSThomas Huth #define CPUID_SVM_SVMLOCK (1U << 2)
805fcf5ef2aSThomas Huth #define CPUID_SVM_NRIPSAVE (1U << 3)
806fcf5ef2aSThomas Huth #define CPUID_SVM_TSCSCALE (1U << 4)
807fcf5ef2aSThomas Huth #define CPUID_SVM_VMCBCLEAN (1U << 5)
808fcf5ef2aSThomas Huth #define CPUID_SVM_FLUSHASID (1U << 6)
809fcf5ef2aSThomas Huth #define CPUID_SVM_DECODEASSIST (1U << 7)
810fcf5ef2aSThomas Huth #define CPUID_SVM_PAUSEFILTER (1U << 10)
811fcf5ef2aSThomas Huth #define CPUID_SVM_PFTHRESHOLD (1U << 12)
8125447089cSWei Huang #define CPUID_SVM_AVIC (1U << 13)
8135447089cSWei Huang #define CPUID_SVM_V_VMSAVE_VMLOAD (1U << 15)
8145447089cSWei Huang #define CPUID_SVM_VGIF (1U << 16)
81562a798d4SBabu Moger #define CPUID_SVM_VNMI (1U << 25)
8165447089cSWei Huang #define CPUID_SVM_SVME_ADDR_CHK (1U << 28)
817fcf5ef2aSThomas Huth
818f2be0bebSTao Xu /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */
819fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_FSGSBASE (1U << 0)
820188569c1SPaolo Bonzini /* Support TSC adjust MSR */
821188569c1SPaolo Bonzini #define CPUID_7_0_EBX_TSC_ADJUST (1U << 1)
8225c76b651SSean Christopherson /* Support SGX */
8235c76b651SSean Christopherson #define CPUID_7_0_EBX_SGX (1U << 2)
824f2be0bebSTao Xu /* 1st Group of Advanced Bit Manipulation Extensions */
825fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_BMI1 (1U << 3)
826f2be0bebSTao Xu /* Hardware Lock Elision */
827fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_HLE (1U << 4)
828f2be0bebSTao Xu /* Intel Advanced Vector Extensions 2 */
829fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_AVX2 (1U << 5)
830f2be0bebSTao Xu /* FPU data pointer updated only on x87 exceptions */
831fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_FDP_EXCPTN_ONLY (1u << 6)
832f2be0bebSTao Xu /* Supervisor-mode Execution Prevention */
833fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_SMEP (1U << 7)
834f2be0bebSTao Xu /* 2nd Group of Advanced Bit Manipulation Extensions */
835fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_BMI2 (1U << 8)
836f2be0bebSTao Xu /* Enhanced REP MOVSB/STOSB */
837fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_ERMS (1U << 9)
838f2be0bebSTao Xu /* Invalidate Process-Context Identifier */
839fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_INVPCID (1U << 10)
840f2be0bebSTao Xu /* Restricted Transactional Memory */
841fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_RTM (1U << 11)
842f2be0bebSTao Xu /* Zero out FPU CS and FPU DS */
843f2be0bebSTao Xu #define CPUID_7_0_EBX_ZERO_FCS_FDS (1U << 13)
844f2be0bebSTao Xu /* Memory Protection Extension */
845f2be0bebSTao Xu #define CPUID_7_0_EBX_MPX (1U << 14)
846f2be0bebSTao Xu /* AVX-512 Foundation */
847fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_AVX512F (1U << 16)
848f2be0bebSTao Xu /* AVX-512 Doubleword & Quadword Instruction */
849fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_AVX512DQ (1U << 17)
850f2be0bebSTao Xu /* Read Random SEED */
851fcf5ef2aSThomas Huth #define CPUID_7_0_EBX_RDSEED (1U << 18)
852f2be0bebSTao Xu /* ADCX and ADOX instructions */
853f2be0bebSTao Xu #define CPUID_7_0_EBX_ADX (1U << 19)
854f2be0bebSTao Xu /* Supervisor Mode Access Prevention */
855f2be0bebSTao Xu #define CPUID_7_0_EBX_SMAP (1U << 20)
856f2be0bebSTao Xu /* AVX-512 Integer Fused Multiply Add */
857f2be0bebSTao Xu #define CPUID_7_0_EBX_AVX512IFMA (1U << 21)
858f2be0bebSTao Xu /* Flush a Cache Line Optimized */
859f2be0bebSTao Xu #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23)
860f2be0bebSTao Xu /* Cache Line Write Back */
861f2be0bebSTao Xu #define CPUID_7_0_EBX_CLWB (1U << 24)
862f2be0bebSTao Xu /* Intel Processor Trace */
863f2be0bebSTao Xu #define CPUID_7_0_EBX_INTEL_PT (1U << 25)
864f2be0bebSTao Xu /* AVX-512 Prefetch */
865f2be0bebSTao Xu #define CPUID_7_0_EBX_AVX512PF (1U << 26)
866f2be0bebSTao Xu /* AVX-512 Exponential and Reciprocal */
867f2be0bebSTao Xu #define CPUID_7_0_EBX_AVX512ER (1U << 27)
868f2be0bebSTao Xu /* AVX-512 Conflict Detection */
869f2be0bebSTao Xu #define CPUID_7_0_EBX_AVX512CD (1U << 28)
870f2be0bebSTao Xu /* SHA1/SHA256 Instruction Extensions */
871f2be0bebSTao Xu #define CPUID_7_0_EBX_SHA_NI (1U << 29)
872fcf5ef2aSThomas Huth /* AVX-512 Byte and Word Instructions */
873f2be0bebSTao Xu #define CPUID_7_0_EBX_AVX512BW (1U << 30)
874e7694a5eSTao Xu /* AVX-512 Vector Length Extensions */
875f2be0bebSTao Xu #define CPUID_7_0_EBX_AVX512VL (1U << 31)
876fcf5ef2aSThomas Huth
877f2be0bebSTao Xu /* AVX-512 Vector Byte Manipulation Instruction */
878fcf5ef2aSThomas Huth #define CPUID_7_0_ECX_AVX512_VBMI (1U << 1)
879f2be0bebSTao Xu /* User-Mode Instruction Prevention */
880fcf5ef2aSThomas Huth #define CPUID_7_0_ECX_UMIP (1U << 2)
88167192a29STao Xu /* Protection Keys for User-mode Pages */
88267192a29STao Xu #define CPUID_7_0_ECX_PKU (1U << 3)
883f2be0bebSTao Xu /* OS Enable Protection Keys */
884e7694a5eSTao Xu #define CPUID_7_0_ECX_OSPKE (1U << 4)
885f2be0bebSTao Xu /* UMONITOR/UMWAIT/TPAUSE Instructions */
886aff9e6e4SYang Zhong #define CPUID_7_0_ECX_WAITPKG (1U << 5)
887f2be0bebSTao Xu /* Additional AVX-512 Vector Byte Manipulation Instruction */
888aff9e6e4SYang Zhong #define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6)
889f2be0bebSTao Xu /* Galois Field New Instructions */
890aff9e6e4SYang Zhong #define CPUID_7_0_ECX_GFNI (1U << 8)
891f2be0bebSTao Xu /* Vector AES Instructions */
892aff9e6e4SYang Zhong #define CPUID_7_0_ECX_VAES (1U << 9)
893f2be0bebSTao Xu /* Carry-Less Multiplication Quadword */
894aff9e6e4SYang Zhong #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10)
895f2be0bebSTao Xu /* Vector Neural Network Instructions */
896f2be0bebSTao Xu #define CPUID_7_0_ECX_AVX512VNNI (1U << 11)
897f2be0bebSTao Xu /* Support for VPOPCNT[B,W] and VPSHUFBITQMB */
8986c7c3c21SKirill A. Shutemov #define CPUID_7_0_ECX_AVX512BITALG (1U << 12)
899f2be0bebSTao Xu /* POPCNT for vectors of DW/QW */
900fcf5ef2aSThomas Huth #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14)
90106e878b4SChenyi Qiang /* 5-level Page Tables */
90206e878b4SChenyi Qiang #define CPUID_7_0_ECX_LA57 (1U << 16)
903f2be0bebSTao Xu /* Read Processor ID */
904f2be0bebSTao Xu #define CPUID_7_0_ECX_RDPID (1U << 22)
905f2be0bebSTao Xu /* Bus Lock Debug Exception */
906f2be0bebSTao Xu #define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24)
907f2be0bebSTao Xu /* Cache Line Demote Instruction */
908f2be0bebSTao Xu #define CPUID_7_0_ECX_CLDEMOTE (1U << 25)
9095c76b651SSean Christopherson /* Move Doubleword as Direct Store Instruction */
9105c76b651SSean Christopherson #define CPUID_7_0_ECX_MOVDIRI (1U << 27)
911e7e7bdabSPaolo Bonzini /* Move 64 Bytes as Direct Store Instruction */
912e7e7bdabSPaolo Bonzini #define CPUID_7_0_ECX_MOVDIR64B (1U << 28)
913fcf5ef2aSThomas Huth /* Support SGX Launch Control */
914f2be0bebSTao Xu #define CPUID_7_0_ECX_SGX_LC (1U << 30)
915f2be0bebSTao Xu /* Protection Keys for Supervisor-mode Pages */
916f2be0bebSTao Xu #define CPUID_7_0_ECX_PKS (1U << 31)
917f2be0bebSTao Xu
9185cb287d2SChenyi Qiang /* AVX512 Neural Network Instructions */
9195cb287d2SChenyi Qiang #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2)
920353f98c9SCathy Zhang /* AVX512 Multiply Accumulation Single Precision */
921353f98c9SCathy Zhang #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3)
9225dd13f2aSCathy Zhang /* Fast Short Rep Mov */
9235dd13f2aSCathy Zhang #define CPUID_7_0_EDX_FSRM (1U << 4)
924b3c7344eSCathy Zhang /* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
925b3c7344eSCathy Zhang #define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
92610f0abcbSYang Weijiang /* SERIALIZE instruction */
92710f0abcbSYang Weijiang #define CPUID_7_0_EDX_SERIALIZE (1U << 14)
9287eb061b0SWang, Lei /* TSX Suspend Load Address Tracking instruction */
9297eb061b0SWang, Lei #define CPUID_7_0_EDX_TSX_LDTRK (1U << 16)
93040399ecbSCathy Zhang /* Architectural LBRs */
93140399ecbSCathy Zhang #define CPUID_7_0_EDX_ARCH_LBR (1U << 19)
9321f16764fSJing Liu /* AMX_BF16 instruction */
9331f16764fSJing Liu #define CPUID_7_0_EDX_AMX_BF16 (1U << 22)
9347eb061b0SWang, Lei /* AVX512_FP16 instruction */
9357eb061b0SWang, Lei #define CPUID_7_0_EDX_AVX512_FP16 (1U << 23)
936f2be0bebSTao Xu /* AMX tile (two-dimensional register) */
937f2be0bebSTao Xu #define CPUID_7_0_EDX_AMX_TILE (1U << 24)
9385af514d0SCathy Zhang /* AMX_INT8 instruction */
9395af514d0SCathy Zhang #define CPUID_7_0_EDX_AMX_INT8 (1U << 25)
9400e7e3bf1SEmanuele Giuseppe Esposito /* Speculation Control */
9410e7e3bf1SEmanuele Giuseppe Esposito #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26)
942f2be0bebSTao Xu /* Single Thread Indirect Branch Predictors */
943f2be0bebSTao Xu #define CPUID_7_0_EDX_STIBP (1U << 27)
944f2be0bebSTao Xu /* Flush L1D cache */
945f2be0bebSTao Xu #define CPUID_7_0_EDX_FLUSH_L1D (1U << 28)
946f2be0bebSTao Xu /* Arch Capabilities */
947f2be0bebSTao Xu #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29)
948fcf5ef2aSThomas Huth /* Core Capability */
949c1826ea6SYang Zhong #define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30)
950c1826ea6SYang Zhong /* Speculative Store Bypass Disable */
951f2be0bebSTao Xu #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31)
952f2be0bebSTao Xu
953a9ce107fSJiaxi Chen /* AVX VNNI Instruction */
954a9ce107fSJiaxi Chen #define CPUID_7_1_EAX_AVX_VNNI (1U << 4)
95558794f64SPaolo Bonzini /* AVX512 BFloat16 Instruction */
95658794f64SPaolo Bonzini #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
95758794f64SPaolo Bonzini /* CMPCCXADD Instructions */
95858794f64SPaolo Bonzini #define CPUID_7_1_EAX_CMPCCXADD (1U << 7)
95958794f64SPaolo Bonzini /* Fast Zero REP MOVS */
96058794f64SPaolo Bonzini #define CPUID_7_1_EAX_FZRM (1U << 10)
96199ed8445SJiaxi Chen /* Fast Short REP STOS */
96299ed8445SJiaxi Chen #define CPUID_7_1_EAX_FSRS (1U << 11)
963a957a884SJiaxi Chen /* Fast Short REP CMPS/SCAS */
964a957a884SJiaxi Chen #define CPUID_7_1_EAX_FSRC (1U << 12)
965ba678090SRobert Hoo /* Support Tile Computational Operations on FP16 Numbers */
966ba678090SRobert Hoo #define CPUID_7_1_EAX_AMX_FP16 (1U << 21)
96758794f64SPaolo Bonzini /* Support for VPMADD52[H,L]UQ */
968eaaa197dSJiaxi Chen #define CPUID_7_1_EAX_AVX_IFMA (1U << 23)
969eaaa197dSJiaxi Chen /* Linear Address Masking */
970ecd2e6caSJiaxi Chen #define CPUID_7_1_EAX_LAM (1U << 26)
971ecd2e6caSJiaxi Chen
9723e76bafbSTao Su /* Support for VPDPB[SU,UU,SS]D[,S] */
9733e76bafbSTao Su #define CPUID_7_1_EDX_AVX_VNNI_INT8 (1U << 4)
974d1a11115SJiaxi Chen /* AVX NE CONVERT Instructions */
975d1a11115SJiaxi Chen #define CPUID_7_1_EDX_AVX_NE_CONVERT (1U << 5)
976c1acad9fSXin Li /* AMX COMPLEX Instructions */
977c1acad9fSXin Li #define CPUID_7_1_EDX_AMX_COMPLEX (1U << 8)
978c1acad9fSXin Li /* PREFETCHIT0/1 Instructions */
979c1acad9fSXin Li #define CPUID_7_1_EDX_PREFETCHITI (1U << 14)
980c1acad9fSXin Li /* Support for Advanced Vector Extensions 10 */
981c1acad9fSXin Li #define CPUID_7_1_EDX_AVX10 (1U << 19)
982eaaa197dSJiaxi Chen /* Flexible return and event delivery (FRED) */
9839dd8b710STao Su #define CPUID_7_1_EAX_FRED (1U << 17)
9849dd8b710STao Su /* Load into IA32_KERNEL_GS_BASE (LKGS) */
9859dd8b710STao Su #define CPUID_7_1_EAX_LKGS (1U << 18)
986cdec2b75SZeng Guang /* Non-Serializing Write to Model Specific Register (WRMSRNS) */
987cdec2b75SZeng Guang #define CPUID_7_1_EAX_WRMSRNS (1U << 19)
98880db491dSJing Liu
989d1615ea5SLuwei Kang /* Do not exhibit MXCSR Configuration Dependent Timing (MCDT) behavior */
990d1615ea5SLuwei Kang #define CPUID_7_2_EDX_MCDT_NO (1U << 5)
991d1615ea5SLuwei Kang
9922ba8b7eeSJohn Allen /* XFD Extend Feature Disabled */
9931ea14321SJohn Allen #define CPUID_D_1_EAX_XFD (1U << 4)
9942ba8b7eeSJohn Allen
9952ba8b7eeSJohn Allen /* Packets which contain IP payload have LIP values */
996f2be0bebSTao Xu #define CPUID_14_0_ECX_LIP (1U << 31)
997f2be0bebSTao Xu
998f2be0bebSTao Xu /* AVX10 128-bit vector support is present */
999f2be0bebSTao Xu #define CPUID_24_0_EBX_AVX10_128 (1U << 16)
1000f2be0bebSTao Xu /* AVX10 256-bit vector support is present */
1001f2be0bebSTao Xu #define CPUID_24_0_EBX_AVX10_256 (1U << 17)
1002f2be0bebSTao Xu /* AVX10 512-bit vector support is present */
1003f2be0bebSTao Xu #define CPUID_24_0_EBX_AVX10_512 (1U << 18)
1004623972ceSBabu Moger /* AVX10 vector length support mask */
1005623972ceSBabu Moger #define CPUID_24_0_EBX_AVX10_VL_MASK (CPUID_24_0_EBX_AVX10_128 | \
1006143c30d4SMoger, Babu CPUID_24_0_EBX_AVX10_256 | \
1007143c30d4SMoger, Babu CPUID_24_0_EBX_AVX10_512)
1008bb039a23SBabu Moger
1009bb039a23SBabu Moger /* RAS Features */
1010623972ceSBabu Moger #define CPUID_8000_0007_EBX_OVERFLOW_RECOV (1U << 0)
1011623972ceSBabu Moger #define CPUID_8000_0007_EBX_SUCCOR (1U << 1)
1012188569c1SPaolo Bonzini
1013188569c1SPaolo Bonzini /* CLZERO instruction */
1014bb039a23SBabu Moger #define CPUID_8000_0008_EBX_CLZERO (1U << 0)
1015bb039a23SBabu Moger /* Always save/restore FP error pointers */
10161b3420e1SEduardo Habkost #define CPUID_8000_0008_EBX_XSAVEERPTR (1U << 2)
1017b70eec31SBabu Moger /* Write back and do not invalidate cache */
1018b70eec31SBabu Moger #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9)
1019b70eec31SBabu Moger /* Indirect Branch Prediction Barrier */
1020b70eec31SBabu Moger #define CPUID_8000_0008_EBX_IBPB (1U << 12)
1021b70eec31SBabu Moger /* Indirect Branch Restricted Speculation */
1022b70eec31SBabu Moger #define CPUID_8000_0008_EBX_IBRS (1U << 14)
102362a798d4SBabu Moger /* Single Thread Indirect Branch Predictors */
102462a798d4SBabu Moger #define CPUID_8000_0008_EBX_STIBP (1U << 15)
1025b70eec31SBabu Moger /* STIBP mode has enhanced performance and may be left always on */
1026fcf5ef2aSThomas Huth #define CPUID_8000_0008_EBX_STIBP_ALWAYS_ON (1U << 17)
1027fcf5ef2aSThomas Huth /* Speculative Store Bypass Disable */
1028fcf5ef2aSThomas Huth #define CPUID_8000_0008_EBX_AMD_SSBD (1U << 24)
1029fcf5ef2aSThomas Huth /* Paravirtualized Speculative Store Bypass Disable MSR */
1030fcf5ef2aSThomas Huth #define CPUID_8000_0008_EBX_VIRT_SSBD (1U << 25)
1031fcf5ef2aSThomas Huth /* Predictive Store Forwarding Disable */
1032fcf5ef2aSThomas Huth #define CPUID_8000_0008_EBX_AMD_PSFD (1U << 28)
1033fcf5ef2aSThomas Huth
1034fcf5ef2aSThomas Huth /* Processor ignores nested data breakpoints */
1035fcf5ef2aSThomas Huth #define CPUID_8000_0021_EAX_NO_NESTED_DATA_BP (1U << 0)
1036fcf5ef2aSThomas Huth /* LFENCE is always serializing */
1037fcf5ef2aSThomas Huth #define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2)
1038fcf5ef2aSThomas Huth /* Null Selector Clears Base */
1039fcf5ef2aSThomas Huth #define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6)
1040fcf5ef2aSThomas Huth /* Automatic IBRS */
1041fcf5ef2aSThomas Huth #define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8)
1042fcf5ef2aSThomas Huth /* Enhanced Return Address Predictor Scurity */
1043fcf5ef2aSThomas Huth #define CPUID_8000_0021_EAX_ERAPS (1U << 24)
1044fcf5ef2aSThomas Huth /* Selective Branch Predictor Barrier */
1045fcf5ef2aSThomas Huth #define CPUID_8000_0021_EAX_SBPB (1U << 27)
1046fcf5ef2aSThomas Huth /* IBPB includes branch type prediction flushing */
1047fcf5ef2aSThomas Huth #define CPUID_8000_0021_EAX_IBPB_BRTYPE (1U << 28)
1048fcf5ef2aSThomas Huth /* Not vulnerable to Speculative Return Stack Overflow */
1049fcf5ef2aSThomas Huth #define CPUID_8000_0021_EAX_SRSO_NO (1U << 29)
10508d031cecSPu Wen /* Not vulnerable to SRSO at the user-kernel boundary */
10518d031cecSPu Wen #define CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO (1U << 30)
105218ab37baSLiran Alon
105318ab37baSLiran Alon /*
105418ab37baSLiran Alon * Return Address Predictor size. RapSize x 8 is the minimum number of
105518ab37baSLiran Alon * CALL instructions software needs to execute to flush the RAP.
105618ab37baSLiran Alon */
105718ab37baSLiran Alon #define CPUID_8000_0021_EBX_RAPSIZE (8U << 16)
105818ab37baSLiran Alon
1059fcf5ef2aSThomas Huth /* Performance Monitoring Version 2 */
1060fcf5ef2aSThomas Huth #define CPUID_8000_0022_EAX_PERFMON_V2 (1U << 0)
1061fcf5ef2aSThomas Huth
1062fcf5ef2aSThomas Huth #define CPUID_XSAVE_XSAVEOPT (1U << 0)
10630f6ed7baSZhao Liu #define CPUID_XSAVE_XSAVEC (1U << 1)
10640f6ed7baSZhao Liu #define CPUID_XSAVE_XGETBV1 (1U << 2)
10650f6ed7baSZhao Liu #define CPUID_XSAVE_XSAVES (1U << 3)
10660f6ed7baSZhao Liu
10670f6ed7baSZhao Liu #define CPUID_6_EAX_ARAT (1U << 2)
10680f6ed7baSZhao Liu
10690f6ed7baSZhao Liu /* CPUID[0x80000007].EDX flags: */
10700f6ed7baSZhao Liu #define CPUID_APM_INVTSC (1U << 8)
10715304873aSZhao Liu
10720f6ed7baSZhao Liu #define CPUID_VENDOR_SZ 12
1073fcf5ef2aSThomas Huth
1074d86f9636SRobert Hoo #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
1075d86f9636SRobert Hoo #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
1076d86f9636SRobert Hoo #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
1077d86f9636SRobert Hoo #define CPUID_VENDOR_INTEL "GenuineIntel"
1078d86f9636SRobert Hoo
1079d86f9636SRobert Hoo #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */
108077b168d2SCathy Zhang #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */
10816c997b4aSXiaoyao Li #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
10826c997b4aSXiaoyao Li #define CPUID_VENDOR_AMD "AuthenticAMD"
10836c997b4aSXiaoyao Li
10846c43ec3bSTao Su #define CPUID_VENDOR_VIA "CentaurHauls"
10856c43ec3bSTao Su
10866c43ec3bSTao Su #define CPUID_VENDOR_HYGON "HygonGenuine"
108722e1094cSEmanuele Giuseppe Esposito
10886c43ec3bSTao Su #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
1089d86f9636SRobert Hoo (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
1090597360c0SXiaoyao Li (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
1091597360c0SXiaoyao Li #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
1092704798adSPaolo Bonzini (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
1093704798adSPaolo Bonzini (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
1094704798adSPaolo Bonzini
1095704798adSPaolo Bonzini #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
1096704798adSPaolo Bonzini #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
1097704798adSPaolo Bonzini
1098704798adSPaolo Bonzini /* CPUID[0xB].ECX level types */
10990c49c918SPaolo Bonzini #define CPUID_B_ECX_TOPO_LEVEL_INVALID 0
1100ef202d64SXin Li #define CPUID_B_ECX_TOPO_LEVEL_SMT 1
1101704798adSPaolo Bonzini #define CPUID_B_ECX_TOPO_LEVEL_CORE 2
1102704798adSPaolo Bonzini
1103704798adSPaolo Bonzini /* COUID[0x1F].ECX level types */
1104704798adSPaolo Bonzini #define CPUID_1F_ECX_TOPO_LEVEL_INVALID CPUID_B_ECX_TOPO_LEVEL_INVALID
1105704798adSPaolo Bonzini #define CPUID_1F_ECX_TOPO_LEVEL_SMT CPUID_B_ECX_TOPO_LEVEL_SMT
1106704798adSPaolo Bonzini #define CPUID_1F_ECX_TOPO_LEVEL_CORE CPUID_B_ECX_TOPO_LEVEL_CORE
1107704798adSPaolo Bonzini #define CPUID_1F_ECX_TOPO_LEVEL_MODULE 3
1108704798adSPaolo Bonzini #define CPUID_1F_ECX_TOPO_LEVEL_DIE 5
1109704798adSPaolo Bonzini
1110704798adSPaolo Bonzini /* MSR Feature Bits */
1111704798adSPaolo Bonzini #define MSR_ARCH_CAP_RDCL_NO (1U << 0)
1112704798adSPaolo Bonzini #define MSR_ARCH_CAP_IBRS_ALL (1U << 1)
1113704798adSPaolo Bonzini #define MSR_ARCH_CAP_RSBA (1U << 2)
1114704798adSPaolo Bonzini #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3)
1115704798adSPaolo Bonzini #define MSR_ARCH_CAP_SSB_NO (1U << 4)
1116704798adSPaolo Bonzini #define MSR_ARCH_CAP_MDS_NO (1U << 5)
1117704798adSPaolo Bonzini #define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6)
1118704798adSPaolo Bonzini #define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7)
1119704798adSPaolo Bonzini #define MSR_ARCH_CAP_TAA_NO (1U << 8)
1120704798adSPaolo Bonzini #define MSR_ARCH_CAP_SBDR_SSDP_NO (1U << 13)
1121704798adSPaolo Bonzini #define MSR_ARCH_CAP_FBSDP_NO (1U << 14)
1122704798adSPaolo Bonzini #define MSR_ARCH_CAP_PSDP_NO (1U << 15)
1123704798adSPaolo Bonzini #define MSR_ARCH_CAP_FB_CLEAR (1U << 17)
1124704798adSPaolo Bonzini #define MSR_ARCH_CAP_PBRSB_NO (1U << 24)
1125704798adSPaolo Bonzini
1126704798adSPaolo Bonzini #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5)
1127704798adSPaolo Bonzini
1128704798adSPaolo Bonzini /* VMX MSR features */
1129704798adSPaolo Bonzini #define MSR_VMX_BASIC_VMCS_REVISION_MASK 0x7FFFFFFFull
1130704798adSPaolo Bonzini #define MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK (0x00001FFFull << 32)
1131704798adSPaolo Bonzini #define MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK (0x003C0000ull << 32)
1132704798adSPaolo Bonzini #define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49)
1133704798adSPaolo Bonzini #define MSR_VMX_BASIC_INS_OUTS (1ULL << 54)
1134704798adSPaolo Bonzini #define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55)
1135704798adSPaolo Bonzini #define MSR_VMX_BASIC_ANY_ERRCODE (1ULL << 56)
1136704798adSPaolo Bonzini #define MSR_VMX_BASIC_NESTED_EXCEPTION (1ULL << 58)
1137704798adSPaolo Bonzini
1138704798adSPaolo Bonzini #define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full
1139704798adSPaolo Bonzini #define MSR_VMX_MISC_STORE_LMA (1ULL << 5)
1140704798adSPaolo Bonzini #define MSR_VMX_MISC_ACTIVITY_HLT (1ULL << 6)
1141704798adSPaolo Bonzini #define MSR_VMX_MISC_ACTIVITY_SHUTDOWN (1ULL << 7)
1142704798adSPaolo Bonzini #define MSR_VMX_MISC_ACTIVITY_WAIT_SIPI (1ULL << 8)
1143704798adSPaolo Bonzini #define MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK 0x0E000000ull
1144704798adSPaolo Bonzini #define MSR_VMX_MISC_VMWRITE_VMEXIT (1ULL << 29)
1145704798adSPaolo Bonzini #define MSR_VMX_MISC_ZERO_LEN_INJECT (1ULL << 30)
1146704798adSPaolo Bonzini
1147704798adSPaolo Bonzini #define MSR_VMX_EPT_EXECONLY (1ULL << 0)
1148704798adSPaolo Bonzini #define MSR_VMX_EPT_PAGE_WALK_LENGTH_4 (1ULL << 6)
1149704798adSPaolo Bonzini #define MSR_VMX_EPT_PAGE_WALK_LENGTH_5 (1ULL << 7)
1150704798adSPaolo Bonzini #define MSR_VMX_EPT_UC (1ULL << 8)
1151704798adSPaolo Bonzini #define MSR_VMX_EPT_WB (1ULL << 14)
1152704798adSPaolo Bonzini #define MSR_VMX_EPT_2MB (1ULL << 16)
1153704798adSPaolo Bonzini #define MSR_VMX_EPT_1GB (1ULL << 17)
1154704798adSPaolo Bonzini #define MSR_VMX_EPT_INVEPT (1ULL << 20)
1155704798adSPaolo Bonzini #define MSR_VMX_EPT_AD_BITS (1ULL << 21)
1156704798adSPaolo Bonzini #define MSR_VMX_EPT_ADVANCED_VMEXIT_INFO (1ULL << 22)
1157704798adSPaolo Bonzini #define MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT (1ULL << 25)
1158704798adSPaolo Bonzini #define MSR_VMX_EPT_INVEPT_ALL_CONTEXT (1ULL << 26)
1159704798adSPaolo Bonzini #define MSR_VMX_EPT_INVVPID (1ULL << 32)
1160704798adSPaolo Bonzini #define MSR_VMX_EPT_INVVPID_SINGLE_ADDR (1ULL << 40)
1161704798adSPaolo Bonzini #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT (1ULL << 41)
1162704798adSPaolo Bonzini #define MSR_VMX_EPT_INVVPID_ALL_CONTEXT (1ULL << 42)
1163704798adSPaolo Bonzini #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS (1ULL << 43)
1164704798adSPaolo Bonzini
1165704798adSPaolo Bonzini #define MSR_VMX_VMFUNC_EPT_SWITCHING (1ULL << 0)
1166704798adSPaolo Bonzini
1167704798adSPaolo Bonzini
1168704798adSPaolo Bonzini /* VMX controls */
1169704798adSPaolo Bonzini #define VMX_CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
1170704798adSPaolo Bonzini #define VMX_CPU_BASED_USE_TSC_OFFSETING 0x00000008
1171704798adSPaolo Bonzini #define VMX_CPU_BASED_HLT_EXITING 0x00000080
1172704798adSPaolo Bonzini #define VMX_CPU_BASED_INVLPG_EXITING 0x00000200
1173704798adSPaolo Bonzini #define VMX_CPU_BASED_MWAIT_EXITING 0x00000400
11749ce8af4dSPaolo Bonzini #define VMX_CPU_BASED_RDPMC_EXITING 0x00000800
117533cc8826SAke Koomsin #define VMX_CPU_BASED_RDTSC_EXITING 0x00001000
1176704798adSPaolo Bonzini #define VMX_CPU_BASED_CR3_LOAD_EXITING 0x00008000
1177704798adSPaolo Bonzini #define VMX_CPU_BASED_CR3_STORE_EXITING 0x00010000
1178704798adSPaolo Bonzini #define VMX_CPU_BASED_CR8_LOAD_EXITING 0x00080000
1179704798adSPaolo Bonzini #define VMX_CPU_BASED_CR8_STORE_EXITING 0x00100000
1180704798adSPaolo Bonzini #define VMX_CPU_BASED_TPR_SHADOW 0x00200000
1181704798adSPaolo Bonzini #define VMX_CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
1182704798adSPaolo Bonzini #define VMX_CPU_BASED_MOV_DR_EXITING 0x00800000
1183704798adSPaolo Bonzini #define VMX_CPU_BASED_UNCOND_IO_EXITING 0x01000000
1184704798adSPaolo Bonzini #define VMX_CPU_BASED_USE_IO_BITMAPS 0x02000000
1185704798adSPaolo Bonzini #define VMX_CPU_BASED_MONITOR_TRAP_FLAG 0x08000000
1186704798adSPaolo Bonzini #define VMX_CPU_BASED_USE_MSR_BITMAPS 0x10000000
1187704798adSPaolo Bonzini #define VMX_CPU_BASED_MONITOR_EXITING 0x20000000
1188704798adSPaolo Bonzini #define VMX_CPU_BASED_PAUSE_EXITING 0x40000000
1189704798adSPaolo Bonzini #define VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
1190704798adSPaolo Bonzini
1191704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
1192704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_ENABLE_EPT 0x00000002
1193704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_DESC 0x00000004
1194704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_RDTSCP 0x00000008
119552a44ad2SChenyi Qiang #define VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
1196704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_ENABLE_VPID 0x00000020
1197704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_WBINVD_EXITING 0x00000040
1198704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
1199704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
1200704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
1201704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
1202704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_RDRAND_EXITING 0x00000800
1203704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
1204704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000
1205704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_SHADOW_VMCS 0x00004000
1206704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_ENCLS_EXITING 0x00008000
120752a44ad2SChenyi Qiang #define VMX_SECONDARY_EXEC_RDSEED_EXITING 0x00010000
1208704798adSPaolo Bonzini #define VMX_SECONDARY_EXEC_ENABLE_PML 0x00020000
12092d384d7cSVitaly Kuznetsov #define VMX_SECONDARY_EXEC_XSAVES 0x00100000
12102d384d7cSVitaly Kuznetsov #define VMX_SECONDARY_EXEC_TSC_SCALING 0x02000000
12112d384d7cSVitaly Kuznetsov #define VMX_SECONDARY_EXEC_ENABLE_USER_WAIT_PAUSE 0x04000000
12122d384d7cSVitaly Kuznetsov
12132d384d7cSVitaly Kuznetsov #define VMX_PIN_BASED_EXT_INTR_MASK 0x00000001
12142d384d7cSVitaly Kuznetsov #define VMX_PIN_BASED_NMI_EXITING 0x00000008
12152d384d7cSVitaly Kuznetsov #define VMX_PIN_BASED_VIRTUAL_NMIS 0x00000020
12162d384d7cSVitaly Kuznetsov #define VMX_PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
12172d384d7cSVitaly Kuznetsov #define VMX_PIN_BASED_POSTED_INTR 0x00000080
12182d384d7cSVitaly Kuznetsov
12192d384d7cSVitaly Kuznetsov #define VMX_VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
12202d384d7cSVitaly Kuznetsov #define VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
12212d384d7cSVitaly Kuznetsov #define VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
12222d384d7cSVitaly Kuznetsov #define VMX_VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
12232d384d7cSVitaly Kuznetsov #define VMX_VM_EXIT_SAVE_IA32_PAT 0x00040000
1224128531d9SVitaly Kuznetsov #define VMX_VM_EXIT_LOAD_IA32_PAT 0x00080000
1225e1f9a8e8SVitaly Kuznetsov #define VMX_VM_EXIT_SAVE_IA32_EFER 0x00100000
122673d24074SJon Doron #define VMX_VM_EXIT_LOAD_IA32_EFER 0x00200000
1227869840d2SVitaly Kuznetsov #define VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
12289411e8b6SVitaly Kuznetsov #define VMX_VM_EXIT_CLEAR_BNDCFGS 0x00800000
1229aa6bb5faSVitaly Kuznetsov #define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000
12303aae0854SVitaly Kuznetsov #define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
12312d384d7cSVitaly Kuznetsov #define VMX_VM_EXIT_LOAD_IA32_PKRS 0x20000000
1232f701c082SVitaly Kuznetsov #define VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS 0x80000000
1233f701c082SVitaly Kuznetsov
1234fcf5ef2aSThomas Huth #define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
1235fcf5ef2aSThomas Huth #define VMX_VM_ENTRY_IA32E_MODE 0x00000200
1236fcf5ef2aSThomas Huth #define VMX_VM_ENTRY_SMM 0x00000400
1237fcf5ef2aSThomas Huth #define VMX_VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
1238fcf5ef2aSThomas Huth #define VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
1239fcf5ef2aSThomas Huth #define VMX_VM_ENTRY_LOAD_IA32_PAT 0x00004000
1240fcf5ef2aSThomas Huth #define VMX_VM_ENTRY_LOAD_IA32_EFER 0x00008000
1241fcf5ef2aSThomas Huth #define VMX_VM_ENTRY_LOAD_BNDCFGS 0x00010000
1242fcf5ef2aSThomas Huth #define VMX_VM_ENTRY_PT_CONCEAL_PIP 0x00020000
1243fcf5ef2aSThomas Huth #define VMX_VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000
1244fcf5ef2aSThomas Huth #define VMX_VM_ENTRY_LOAD_IA32_PKRS 0x00400000
1245fcf5ef2aSThomas Huth
1246fcf5ef2aSThomas Huth /* Supported Hyper-V Enlightenments */
1247fcf5ef2aSThomas Huth #define HYPERV_FEAT_RELAXED 0
1248fcf5ef2aSThomas Huth #define HYPERV_FEAT_VAPIC 1
1249fcf5ef2aSThomas Huth #define HYPERV_FEAT_TIME 2
1250fcf5ef2aSThomas Huth #define HYPERV_FEAT_CRASH 3
1251fcf5ef2aSThomas Huth #define HYPERV_FEAT_RESET 4
1252fcf5ef2aSThomas Huth #define HYPERV_FEAT_VPINDEX 5
1253fcf5ef2aSThomas Huth #define HYPERV_FEAT_RUNTIME 6
1254fcf5ef2aSThomas Huth #define HYPERV_FEAT_SYNIC 7
125562846089SRichard Henderson #define HYPERV_FEAT_STIMER 8
125662846089SRichard Henderson #define HYPERV_FEAT_FREQUENCIES 9
1257b26491b4SRichard Henderson #define HYPERV_FEAT_REENLIGHTENMENT 10
1258fcf5ef2aSThomas Huth #define HYPERV_FEAT_TLBFLUSH 11
1259fcf5ef2aSThomas Huth #define HYPERV_FEAT_EVMCS 12
1260fcf5ef2aSThomas Huth #define HYPERV_FEAT_IPI 13
1261fcf5ef2aSThomas Huth #define HYPERV_FEAT_STIMER_DIRECT 14
1262fcf5ef2aSThomas Huth #define HYPERV_FEAT_AVIC 15
1263fcf5ef2aSThomas Huth #define HYPERV_FEAT_SYNDBG 16
1264fcf5ef2aSThomas Huth #define HYPERV_FEAT_MSR_BITMAP 17
1265fcf5ef2aSThomas Huth #define HYPERV_FEAT_XMM_INPUT 18
1266fcf5ef2aSThomas Huth #define HYPERV_FEAT_TLBFLUSH_EXT 19
1267fcf5ef2aSThomas Huth #define HYPERV_FEAT_TLBFLUSH_DIRECT 20
1268fcf5ef2aSThomas Huth
1269fcf5ef2aSThomas Huth #ifndef HYPERV_SPINLOCK_NEVER_NOTIFY
1270fcf5ef2aSThomas Huth #define HYPERV_SPINLOCK_NEVER_NOTIFY 0xFFFFFFFF
1271cc155f19SPaolo Bonzini #endif
1272cc155f19SPaolo Bonzini
1273fcf5ef2aSThomas Huth #define EXCP00_DIVZ 0
1274fcf5ef2aSThomas Huth #define EXCP01_DB 1
1275fcf5ef2aSThomas Huth #define EXCP02_NMI 2
1276fcf5ef2aSThomas Huth #define EXCP03_INT3 3
1277fcf5ef2aSThomas Huth #define EXCP04_INTO 4
1278fcf5ef2aSThomas Huth #define EXCP05_BOUND 5
1279fcf5ef2aSThomas Huth #define EXCP06_ILLOP 6
1280fcf5ef2aSThomas Huth #define EXCP07_PREX 7
1281fcf5ef2aSThomas Huth #define EXCP08_DBLE 8
1282fcf5ef2aSThomas Huth #define EXCP09_XERR 9
1283cc155f19SPaolo Bonzini #define EXCP0A_TSS 10
1284cc155f19SPaolo Bonzini #define EXCP0B_NOSEG 11
1285cc155f19SPaolo Bonzini #define EXCP0C_STACK 12
1286460231adSPaolo Bonzini #define EXCP0D_GPF 13
1287fcf5ef2aSThomas Huth #define EXCP0E_PAGE 14
1288fcf5ef2aSThomas Huth #define EXCP10_COPR 16
1289fcf5ef2aSThomas Huth #define EXCP11_ALGN 17
1290fcf5ef2aSThomas Huth #define EXCP12_MCHK 18
1291fcf5ef2aSThomas Huth
1292fcf5ef2aSThomas Huth #define EXCP_VMEXIT 0x100 /* only for system emulation */
1293fcf5ef2aSThomas Huth #define EXCP_SYSCALL 0x101 /* only for user emulation */
1294fcf5ef2aSThomas Huth #define EXCP_VSYSCALL 0x102 /* only for user emulation */
1295fcf5ef2aSThomas Huth
1296fcf5ef2aSThomas Huth /* i386-specific interrupt pending bits. */
1297fcf5ef2aSThomas Huth #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
1298fcf5ef2aSThomas Huth #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
1299fcf5ef2aSThomas Huth #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
1300fcf5ef2aSThomas Huth #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
1301fcf5ef2aSThomas Huth #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0
1302fcf5ef2aSThomas Huth #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1
1303fcf5ef2aSThomas Huth #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2
1304fcf5ef2aSThomas Huth
1305fcf5ef2aSThomas Huth /* Use a clearer name for this. */
1306fcf5ef2aSThomas Huth #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
1307fcf5ef2aSThomas Huth
1308fcf5ef2aSThomas Huth #define CC_OP_HAS_EFLAGS(op) ((op) >= CC_OP_EFLAGS && (op) <= CC_OP_ADCOX)
1309fcf5ef2aSThomas Huth
1310fcf5ef2aSThomas Huth /* Instead of computing the condition codes after each x86 instruction,
1311fcf5ef2aSThomas Huth * QEMU just stores one operand (called CC_SRC), the result
1312fcf5ef2aSThomas Huth * (called CC_DST) and the type of operation (called CC_OP). When the
1313fcf5ef2aSThomas Huth * condition codes are needed, the condition codes can be calculated
1314fcf5ef2aSThomas Huth * using this information. Condition codes are not generated if they
1315fcf5ef2aSThomas Huth * are only needed for conditional branches.
1316fcf5ef2aSThomas Huth */
1317fcf5ef2aSThomas Huth typedef enum {
1318fcf5ef2aSThomas Huth CC_OP_EFLAGS = 0, /* all cc are explicitly computed, CC_SRC = flags */
1319fcf5ef2aSThomas Huth CC_OP_ADCX = 1, /* CC_DST = C, CC_SRC = rest. */
1320fcf5ef2aSThomas Huth CC_OP_ADOX = 2, /* CC_SRC2 = O, CC_SRC = rest. */
1321fcf5ef2aSThomas Huth CC_OP_ADCOX = 3, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
1322fcf5ef2aSThomas Huth
1323fcf5ef2aSThomas Huth /* Low 2 bits = MemOp constant for the size */
1324fcf5ef2aSThomas Huth #define CC_OP_FIRST_BWLQ CC_OP_MULB
1325fcf5ef2aSThomas Huth CC_OP_MULB = 4, /* modify all flags, C, O = (CC_SRC != 0) */
1326fcf5ef2aSThomas Huth CC_OP_MULW,
1327fcf5ef2aSThomas Huth CC_OP_MULL,
1328fcf5ef2aSThomas Huth CC_OP_MULQ,
1329fcf5ef2aSThomas Huth
1330fcf5ef2aSThomas Huth CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1331fcf5ef2aSThomas Huth CC_OP_ADDW,
1332fcf5ef2aSThomas Huth CC_OP_ADDL,
1333fcf5ef2aSThomas Huth CC_OP_ADDQ,
1334fcf5ef2aSThomas Huth
1335fcf5ef2aSThomas Huth CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1336fcf5ef2aSThomas Huth CC_OP_ADCW,
1337fcf5ef2aSThomas Huth CC_OP_ADCL,
1338fcf5ef2aSThomas Huth CC_OP_ADCQ,
1339fcf5ef2aSThomas Huth
1340fcf5ef2aSThomas Huth CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1341fcf5ef2aSThomas Huth CC_OP_SUBW,
1342fcf5ef2aSThomas Huth CC_OP_SUBL,
134383a3a20eSRichard Henderson CC_OP_SUBQ,
134483a3a20eSRichard Henderson
134583a3a20eSRichard Henderson CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
134683a3a20eSRichard Henderson CC_OP_SBBW,
134783a3a20eSRichard Henderson CC_OP_SBBL,
1348460231adSPaolo Bonzini CC_OP_SBBQ,
1349460231adSPaolo Bonzini
1350460231adSPaolo Bonzini CC_OP_LOGICB, /* modify all flags, CC_DST = res */
1351460231adSPaolo Bonzini CC_OP_LOGICW,
1352460231adSPaolo Bonzini CC_OP_LOGICL,
1353460231adSPaolo Bonzini CC_OP_LOGICQ,
1354460231adSPaolo Bonzini
1355460231adSPaolo Bonzini CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
1356460231adSPaolo Bonzini CC_OP_INCW,
1357460231adSPaolo Bonzini CC_OP_INCL,
1358fcf5ef2aSThomas Huth CC_OP_INCQ,
1359fcf5ef2aSThomas Huth
1360fcf5ef2aSThomas Huth CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
1361e7bbb7cbSPaolo Bonzini CC_OP_DECW,
1362fcf5ef2aSThomas Huth CC_OP_DECL,
1363fcf5ef2aSThomas Huth CC_OP_DECQ,
1364fcf5ef2aSThomas Huth
1365fcf5ef2aSThomas Huth CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
1366fcf5ef2aSThomas Huth CC_OP_SHLW,
1367fcf5ef2aSThomas Huth CC_OP_SHLL,
1368fcf5ef2aSThomas Huth CC_OP_SHLQ,
1369fcf5ef2aSThomas Huth
137075f107a8SRichard Henderson CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
137175f107a8SRichard Henderson CC_OP_SARW,
137275f107a8SRichard Henderson CC_OP_SARL,
137375f107a8SRichard Henderson CC_OP_SARQ,
137475f107a8SRichard Henderson
137575f107a8SRichard Henderson CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */
137675f107a8SRichard Henderson CC_OP_BMILGW,
137775f107a8SRichard Henderson CC_OP_BMILGL,
1378fcf5ef2aSThomas Huth CC_OP_BMILGQ,
137975f107a8SRichard Henderson
138075f107a8SRichard Henderson CC_OP_BLSIB, /* Z,S via CC_DST, C = SRC!=0; O=0; P,A undefined */
138175f107a8SRichard Henderson CC_OP_BLSIW,
138275f107a8SRichard Henderson CC_OP_BLSIL,
138375f107a8SRichard Henderson CC_OP_BLSIQ,
138475f107a8SRichard Henderson
138575f107a8SRichard Henderson /*
138675f107a8SRichard Henderson * Note that only CC_OP_POPCNT (i.e. the one with MO_TL size)
138775f107a8SRichard Henderson * is used or implemented, because the translation needs
138875f107a8SRichard Henderson * to zero-extend CC_DST anyway.
138975f107a8SRichard Henderson */
139075f107a8SRichard Henderson CC_OP_POPCNTB__, /* Z via CC_DST, all other flags clear. */
139175f107a8SRichard Henderson CC_OP_POPCNTW__,
139275f107a8SRichard Henderson CC_OP_POPCNTL__,
1393cf5ec664SPaolo Bonzini CC_OP_POPCNTQ__,
139475f107a8SRichard Henderson CC_OP_POPCNT = sizeof(target_ulong) == 8 ? CC_OP_POPCNTQ__ : CC_OP_POPCNTL__,
139575f107a8SRichard Henderson #define CC_OP_LAST_BWLQ CC_OP_POPCNTQ__
139675f107a8SRichard Henderson
139775f107a8SRichard Henderson CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
139875f107a8SRichard Henderson } CCOp;
1399fcf5ef2aSThomas Huth
1400fcf5ef2aSThomas Huth /* See X86DecodedInsn.cc_op, using int8_t. */
1401fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(CC_OP_DYNAMIC > INT8_MAX);
1402fcf5ef2aSThomas Huth
cc_op_size(CCOp op)1403fcf5ef2aSThomas Huth static inline MemOp cc_op_size(CCOp op)
1404fcf5ef2aSThomas Huth {
1405fcf5ef2aSThomas Huth MemOp size = op & 3;
1406fcf5ef2aSThomas Huth
1407fcf5ef2aSThomas Huth QEMU_BUILD_BUG_ON(CC_OP_FIRST_BWLQ & 3);
1408fcf5ef2aSThomas Huth assert(op >= CC_OP_FIRST_BWLQ && op <= CC_OP_LAST_BWLQ);
1409fcf5ef2aSThomas Huth assert(size <= MO_TL);
1410fcf5ef2aSThomas Huth
1411fcf5ef2aSThomas Huth return size;
1412fcf5ef2aSThomas Huth }
1413fcf5ef2aSThomas Huth
1414e03b5686SMarc-André Lureau typedef struct SegmentCache {
1415fcf5ef2aSThomas Huth uint32_t selector;
1416fcf5ef2aSThomas Huth target_ulong base;
1417fcf5ef2aSThomas Huth uint32_t limit;
1418cf5ec664SPaolo Bonzini uint32_t flags;
1419fcf5ef2aSThomas Huth } SegmentCache;
1420fcf5ef2aSThomas Huth
1421fcf5ef2aSThomas Huth typedef union MMXReg {
142275f107a8SRichard Henderson uint8_t _b_MMXReg[64 / 8];
142375f107a8SRichard Henderson uint16_t _w_MMXReg[64 / 16];
142475f107a8SRichard Henderson uint32_t _l_MMXReg[64 / 32];
142575f107a8SRichard Henderson uint64_t _q_MMXReg[64 / 64];
142675f107a8SRichard Henderson float32 _s_MMXReg[64 / 32];
142775f107a8SRichard Henderson float64 _d_MMXReg[64 / 64];
142875f107a8SRichard Henderson } MMXReg;
1429fcf5ef2aSThomas Huth
1430fcf5ef2aSThomas Huth typedef union XMMReg {
1431fcf5ef2aSThomas Huth uint64_t _q_XMMReg[128 / 64];
1432fcf5ef2aSThomas Huth } XMMReg;
1433fcf5ef2aSThomas Huth
1434fcf5ef2aSThomas Huth typedef union YMMReg {
1435fcf5ef2aSThomas Huth uint64_t _q_YMMReg[256 / 64];
1436fcf5ef2aSThomas Huth XMMReg _x_YMMReg[256 / 128];
1437fcf5ef2aSThomas Huth } YMMReg;
1438cf5ec664SPaolo Bonzini
1439fcf5ef2aSThomas Huth typedef union ZMMReg {
1440fcf5ef2aSThomas Huth uint8_t _b_ZMMReg[512 / 8];
1441fcf5ef2aSThomas Huth uint16_t _w_ZMMReg[512 / 16];
144275f107a8SRichard Henderson uint32_t _l_ZMMReg[512 / 32];
144375f107a8SRichard Henderson uint64_t _q_ZMMReg[512 / 64];
144475f107a8SRichard Henderson float16 _h_ZMMReg[512 / 16];
144575f107a8SRichard Henderson float32 _s_ZMMReg[512 / 32];
144675f107a8SRichard Henderson float64 _d_ZMMReg[512 / 64];
144775f107a8SRichard Henderson XMMReg _x_ZMMReg[512 / 128];
144875f107a8SRichard Henderson YMMReg _y_ZMMReg[512 / 256];
1449fcf5ef2aSThomas Huth } ZMMReg;
1450fcf5ef2aSThomas Huth
1451fcf5ef2aSThomas Huth typedef struct BNDReg {
1452fcf5ef2aSThomas Huth uint64_t lb;
1453fcf5ef2aSThomas Huth uint64_t ub;
1454fcf5ef2aSThomas Huth } BNDReg;
1455fcf5ef2aSThomas Huth
1456fcf5ef2aSThomas Huth typedef struct BNDCSReg {
1457fcf5ef2aSThomas Huth uint64_t cfgu;
1458fcf5ef2aSThomas Huth uint64_t sts;
1459fcf5ef2aSThomas Huth } BNDCSReg;
1460fcf5ef2aSThomas Huth
1461fcf5ef2aSThomas Huth #define BNDCFG_ENABLE 1ULL
1462fcf5ef2aSThomas Huth #define BNDCFG_BNDPRESERVE 2ULL
1463fcf5ef2aSThomas Huth #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
1464fcf5ef2aSThomas Huth
1465fcf5ef2aSThomas Huth #if HOST_BIG_ENDIAN
1466fcf5ef2aSThomas Huth #define ZMM_B(n) _b_ZMMReg[63 - (n)]
1467fcf5ef2aSThomas Huth #define ZMM_W(n) _w_ZMMReg[31 - (n)]
1468fcf5ef2aSThomas Huth #define ZMM_L(n) _l_ZMMReg[15 - (n)]
1469fcf5ef2aSThomas Huth #define ZMM_H(n) _h_ZMMReg[31 - (n)]
1470fcf5ef2aSThomas Huth #define ZMM_S(n) _s_ZMMReg[15 - (n)]
1471fcf5ef2aSThomas Huth #define ZMM_Q(n) _q_ZMMReg[7 - (n)]
1472fcf5ef2aSThomas Huth #define ZMM_D(n) _d_ZMMReg[7 - (n)]
1473fcf5ef2aSThomas Huth #define ZMM_X(n) _x_ZMMReg[3 - (n)]
1474fcf5ef2aSThomas Huth #define ZMM_Y(n) _y_ZMMReg[1 - (n)]
1475fcf5ef2aSThomas Huth
1476fcf5ef2aSThomas Huth #define XMM_Q(n) _q_XMMReg[1 - (n)]
1477fcf5ef2aSThomas Huth
1478fcf5ef2aSThomas Huth #define YMM_Q(n) _q_YMMReg[3 - (n)]
1479fcf5ef2aSThomas Huth #define YMM_X(n) _x_YMMReg[1 - (n)]
1480fcf5ef2aSThomas Huth
1481fcf5ef2aSThomas Huth #define MMX_B(n) _b_MMXReg[7 - (n)]
1482fcf5ef2aSThomas Huth #define MMX_W(n) _w_MMXReg[3 - (n)]
1483fcf5ef2aSThomas Huth #define MMX_L(n) _l_MMXReg[1 - (n)]
1484fcf5ef2aSThomas Huth #define MMX_S(n) _s_MMXReg[1 - (n)]
1485fcf5ef2aSThomas Huth #else
1486fcf5ef2aSThomas Huth #define ZMM_B(n) _b_ZMMReg[n]
1487fcf5ef2aSThomas Huth #define ZMM_W(n) _w_ZMMReg[n]
14886dba8b47SRichard Henderson #define ZMM_L(n) _l_ZMMReg[n]
1489fcf5ef2aSThomas Huth #define ZMM_H(n) _h_ZMMReg[n]
1490fcf5ef2aSThomas Huth #define ZMM_S(n) _s_ZMMReg[n]
1491fcf5ef2aSThomas Huth #define ZMM_Q(n) _q_ZMMReg[n]
1492fcf5ef2aSThomas Huth #define ZMM_D(n) _d_ZMMReg[n]
1493fcf5ef2aSThomas Huth #define ZMM_X(n) _x_ZMMReg[n]
14946dba8b47SRichard Henderson #define ZMM_Y(n) _y_ZMMReg[n]
14956dba8b47SRichard Henderson
1496fcf5ef2aSThomas Huth #define XMM_Q(n) _q_XMMReg[n]
1497fcf5ef2aSThomas Huth
14986dba8b47SRichard Henderson #define YMM_Q(n) _q_YMMReg[n]
14996dba8b47SRichard Henderson #define YMM_X(n) _x_YMMReg[n]
15006dba8b47SRichard Henderson
15016dba8b47SRichard Henderson #define MMX_B(n) _b_MMXReg[n]
15026dba8b47SRichard Henderson #define MMX_W(n) _w_MMXReg[n]
15036dba8b47SRichard Henderson #define MMX_L(n) _l_MMXReg[n]
15046dba8b47SRichard Henderson #define MMX_S(n) _s_MMXReg[n]
15056dba8b47SRichard Henderson #endif
1506fcf5ef2aSThomas Huth #define MMX_Q(n) _q_MMXReg[n]
1507fcf5ef2aSThomas Huth
1508fcf5ef2aSThomas Huth typedef union {
1509fcf5ef2aSThomas Huth floatx80 d __attribute__((aligned(16)));
15106dba8b47SRichard Henderson MMXReg mmx;
15116dba8b47SRichard Henderson } FPReg;
1512fcf5ef2aSThomas Huth
1513fcf5ef2aSThomas Huth typedef struct {
15146dba8b47SRichard Henderson uint64_t base;
15156dba8b47SRichard Henderson uint64_t mask;
1516fcf5ef2aSThomas Huth } MTRRVar;
1517fcf5ef2aSThomas Huth
1518fcf5ef2aSThomas Huth #define CPU_NB_REGS64 16
1519fcf5ef2aSThomas Huth #define CPU_NB_REGS32 8
1520fcf5ef2aSThomas Huth
1521fcf5ef2aSThomas Huth #ifdef TARGET_X86_64
1522fcf5ef2aSThomas Huth #define CPU_NB_REGS CPU_NB_REGS64
1523fcf5ef2aSThomas Huth #else
1524fcf5ef2aSThomas Huth #define CPU_NB_REGS CPU_NB_REGS32
1525fcf5ef2aSThomas Huth #endif
1526fcf5ef2aSThomas Huth
1527fcf5ef2aSThomas Huth #define MAX_FIXED_COUNTERS 3
1528fcf5ef2aSThomas Huth #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
1529fcf5ef2aSThomas Huth
1530fcf5ef2aSThomas Huth #define TARGET_INSN_START_EXTRA_WORDS 1
1531fcf5ef2aSThomas Huth
1532fcf5ef2aSThomas Huth #define NB_OPMASK_REGS 8
1533fcf5ef2aSThomas Huth
1534fcf5ef2aSThomas Huth /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
1535fcf5ef2aSThomas Huth * that APIC ID hasn't been set yet
1536fcf5ef2aSThomas Huth */
1537fcf5ef2aSThomas Huth #define UNASSIGNED_APIC_ID 0xFFFFFFFF
1538fcf5ef2aSThomas Huth
1539fcf5ef2aSThomas Huth typedef struct X86LegacyXSaveArea {
1540fcf5ef2aSThomas Huth uint16_t fcw;
1541fcf5ef2aSThomas Huth uint16_t fsw;
1542fcf5ef2aSThomas Huth uint8_t ftw;
1543fcf5ef2aSThomas Huth uint8_t reserved;
1544fcf5ef2aSThomas Huth uint16_t fpop;
1545fcf5ef2aSThomas Huth union {
1546fcf5ef2aSThomas Huth struct {
1547fcf5ef2aSThomas Huth uint64_t fpip;
1548fcf5ef2aSThomas Huth uint64_t fpdp;
1549fcf5ef2aSThomas Huth };
1550fcf5ef2aSThomas Huth struct {
1551fcf5ef2aSThomas Huth uint32_t fip;
1552fcf5ef2aSThomas Huth uint32_t fcs;
1553fcf5ef2aSThomas Huth uint32_t foo;
1554fcf5ef2aSThomas Huth uint32_t fos;
1555fcf5ef2aSThomas Huth };
1556fcf5ef2aSThomas Huth };
1557fcf5ef2aSThomas Huth uint32_t mxcsr;
1558fcf5ef2aSThomas Huth uint32_t mxcsr_mask;
1559fcf5ef2aSThomas Huth FPReg fpregs[8];
15601f16764fSJing Liu uint8_t xmm_regs[16][16];
15611f16764fSJing Liu uint32_t hw_reserved[12];
15621f16764fSJing Liu uint32_t sw_reserved[12];
15631f16764fSJing Liu } X86LegacyXSaveArea;
15641f16764fSJing Liu
15651f16764fSJing Liu QEMU_BUILD_BUG_ON(sizeof(X86LegacyXSaveArea) != 512);
15661f16764fSJing Liu
15671f16764fSJing Liu typedef struct X86XSaveHeader {
15681f16764fSJing Liu uint64_t xstate_bv;
15691f16764fSJing Liu uint64_t xcomp_bv;
157010f0abcbSYang Weijiang uint64_t reserve0;
157110f0abcbSYang Weijiang uint8_t reserved[40];
157210f0abcbSYang Weijiang } X86XSaveHeader;
157310f0abcbSYang Weijiang
157410f0abcbSYang Weijiang /* Ext. save area 2: AVX State */
157510f0abcbSYang Weijiang typedef struct XSaveAVX {
157610f0abcbSYang Weijiang uint8_t ymmh[16][16];
157710f0abcbSYang Weijiang } XSaveAVX;
157810f0abcbSYang Weijiang
157910f0abcbSYang Weijiang /* Ext. save area 3: BNDREG */
158010f0abcbSYang Weijiang typedef struct XSaveBNDREG {
158110f0abcbSYang Weijiang BNDReg bnd_regs[4];
158210f0abcbSYang Weijiang } XSaveBNDREG;
158310f0abcbSYang Weijiang
158410f0abcbSYang Weijiang /* Ext. save area 4: BNDCSR */
158510f0abcbSYang Weijiang typedef union XSaveBNDCSR {
158610f0abcbSYang Weijiang BNDCSReg bndcsr;
158710f0abcbSYang Weijiang uint8_t data[64];
1588fcf5ef2aSThomas Huth } XSaveBNDCSR;
1589fcf5ef2aSThomas Huth
1590fcf5ef2aSThomas Huth /* Ext. save area 5: Opmask */
1591fcf5ef2aSThomas Huth typedef struct XSaveOpmask {
1592fcf5ef2aSThomas Huth uint64_t opmask_regs[NB_OPMASK_REGS];
1593fcf5ef2aSThomas Huth } XSaveOpmask;
1594fcf5ef2aSThomas Huth
15951f16764fSJing Liu /* Ext. save area 6: ZMM_Hi256 */
15961f16764fSJing Liu typedef struct XSaveZMM_Hi256 {
159710f0abcbSYang Weijiang uint8_t zmm_hi256[16][32];
1598fcf5ef2aSThomas Huth } XSaveZMM_Hi256;
15995aa10ab1SDavid Edmondson
16005aa10ab1SDavid Edmondson /* Ext. save area 7: Hi16_ZMM */
16015aa10ab1SDavid Edmondson typedef struct XSaveHi16_ZMM {
1602131266b7SJing Liu uint8_t hi16_zmm[16][64];
16035aa10ab1SDavid Edmondson } XSaveHi16_ZMM;
16045aa10ab1SDavid Edmondson
16051f16764fSJing Liu /* Ext. save area 9: PKRU state */
16065aa10ab1SDavid Edmondson typedef struct XSavePKRU {
1607fea45008SDavid Edmondson uint32_t pkru;
16085aa10ab1SDavid Edmondson uint32_t padding;
1609fcf5ef2aSThomas Huth } XSavePKRU;
1610fcf5ef2aSThomas Huth
1611fcf5ef2aSThomas Huth /* Ext. save area 17: AMX XTILECFG state */
1612fcf5ef2aSThomas Huth typedef struct XSaveXTILECFG {
1613fcf5ef2aSThomas Huth uint8_t xtilecfg[64];
16147e3482f8SEduardo Habkost } XSaveXTILECFG;
16157e3482f8SEduardo Habkost
16167e3482f8SEduardo Habkost /* Ext. save area 18: AMX XTILEDATA state */
16175f00335aSEduardo Habkost typedef struct XSaveXTILEDATA {
16185f00335aSEduardo Habkost uint8_t xtiledata[8][1024];
16197e3482f8SEduardo Habkost } XSaveXTILEDATA;
16207e3482f8SEduardo Habkost
16217e3482f8SEduardo Habkost typedef struct {
16227e3482f8SEduardo Habkost uint64_t from;
16237e3482f8SEduardo Habkost uint64_t to;
16247e3482f8SEduardo Habkost uint64_t info;
16257e3482f8SEduardo Habkost } LBREntry;
16267e3482f8SEduardo Habkost
16277e3482f8SEduardo Habkost #define ARCH_LBR_NR_ENTRIES 32
16287e3482f8SEduardo Habkost
16297e3482f8SEduardo Habkost /* Ext. save area 19: Supervisor mode Arch LBR state */
16307e3482f8SEduardo Habkost typedef struct XSavesArchLBR {
16317e3482f8SEduardo Habkost uint64_t lbr_ctl;
16327e3482f8SEduardo Habkost uint64_t lbr_depth;
16337e3482f8SEduardo Habkost uint64_t ler_from;
16347e3482f8SEduardo Habkost uint64_t ler_to;
16357e3482f8SEduardo Habkost uint64_t ler_info;
16367e3482f8SEduardo Habkost LBREntry lbr_records[ARCH_LBR_NR_ENTRIES];
16377e3482f8SEduardo Habkost } XSavesArchLBR;
16387e3482f8SEduardo Habkost
16397e3482f8SEduardo Habkost QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
16407e3482f8SEduardo Habkost QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
16417e3482f8SEduardo Habkost QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
16427e3482f8SEduardo Habkost QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40);
16437e3482f8SEduardo Habkost QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200);
16447e3482f8SEduardo Habkost QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
16457e3482f8SEduardo Habkost QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
16467e3482f8SEduardo Habkost QEMU_BUILD_BUG_ON(sizeof(XSaveXTILECFG) != 0x40);
16477e3482f8SEduardo Habkost QEMU_BUILD_BUG_ON(sizeof(XSaveXTILEDATA) != 0x2000);
16487e3482f8SEduardo Habkost QEMU_BUILD_BUG_ON(sizeof(XSavesArchLBR) != 0x328);
16497e3482f8SEduardo Habkost
16507e3482f8SEduardo Habkost typedef struct ExtSaveArea {
16517e3482f8SEduardo Habkost uint32_t feature, bits;
16527e3482f8SEduardo Habkost uint32_t offset, size;
16537e3482f8SEduardo Habkost uint32_t ecx;
16547e3482f8SEduardo Habkost } ExtSaveArea;
16557e3482f8SEduardo Habkost
16567e3482f8SEduardo Habkost #define XSAVE_STATE_AREA_COUNT (XSTATE_XTILE_DATA_BIT + 1)
16577e3482f8SEduardo Habkost
16587e3482f8SEduardo Habkost extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
16597e3482f8SEduardo Habkost
16607e3482f8SEduardo Habkost typedef enum TPRAccess {
16617e3482f8SEduardo Habkost TPR_ACCESS_READ,
16627e3482f8SEduardo Habkost TPR_ACCESS_WRITE,
16639fcba76aSZhao Liu } TPRAccess;
16649fcba76aSZhao Liu
16659fcba76aSZhao Liu /* Cache information data structures: */
16669fcba76aSZhao Liu
16679fcba76aSZhao Liu enum CacheType {
16689fcba76aSZhao Liu DATA_CACHE,
16699fcba76aSZhao Liu INSTRUCTION_CACHE,
16707e3482f8SEduardo Habkost UNIFIED_CACHE
16717e3482f8SEduardo Habkost };
16727e3482f8SEduardo Habkost
16736aaeb054SBabu Moger typedef struct CPUCacheInfo {
1674a9f27ea9SEduardo Habkost enum CacheType type;
1675a9f27ea9SEduardo Habkost uint8_t level;
1676a9f27ea9SEduardo Habkost /* Size in bytes */
1677a9f27ea9SEduardo Habkost uint32_t size;
16786aaeb054SBabu Moger /* Line size, in bytes */
16797e3482f8SEduardo Habkost uint16_t line_size;
1680577f02b8SRoman Bolshakov /*
1681577f02b8SRoman Bolshakov * Associativity.
1682577f02b8SRoman Bolshakov * Note: representation of fully-associative caches is not implemented
1683577f02b8SRoman Bolshakov */
1684577f02b8SRoman Bolshakov uint8_t associativity;
16851ea4a06aSPhilippe Mathieu-Daudé /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */
1686fcf5ef2aSThomas Huth uint8_t partitions;
1687fcf5ef2aSThomas Huth /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */
1688fcf5ef2aSThomas Huth uint32_t sets;
1689fcf5ef2aSThomas Huth /*
1690fcf5ef2aSThomas Huth * Lines per tag.
1691fcf5ef2aSThomas Huth * AMD-specific: CPUID[0x80000005], CPUID[0x80000006].
1692fcf5ef2aSThomas Huth * (Is this synonym to @partitions?)
1693fcf5ef2aSThomas Huth */
1694fcf5ef2aSThomas Huth uint8_t lines_per_tag;
1695fcf5ef2aSThomas Huth
1696fcf5ef2aSThomas Huth /* Self-initializing cache */
1697fcf5ef2aSThomas Huth bool self_init;
1698fcf5ef2aSThomas Huth /*
1699fcf5ef2aSThomas Huth * WBINVD/INVD is not guaranteed to act upon lower level caches of
1700fcf5ef2aSThomas Huth * non-originating threads sharing this cache.
1701fcf5ef2aSThomas Huth * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0]
1702fcf5ef2aSThomas Huth */
1703fcf5ef2aSThomas Huth bool no_invd_sharing;
1704fcf5ef2aSThomas Huth /*
1705fcf5ef2aSThomas Huth * Cache is inclusive of lower cache levels.
1706fcf5ef2aSThomas Huth * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1].
1707fcf5ef2aSThomas Huth */
1708fcf5ef2aSThomas Huth bool inclusive;
1709fcf5ef2aSThomas Huth /*
1710fcf5ef2aSThomas Huth * A complex function is used to index the cache, potentially using all
17118f515d38SMaxim Levitsky * address bits. CPUID[4].EDX[bit 2].
17128f515d38SMaxim Levitsky */
17138f515d38SMaxim Levitsky bool complex_indexing;
1714fcf5ef2aSThomas Huth
1715fcf5ef2aSThomas Huth /*
1716fcf5ef2aSThomas Huth * Cache Topology. The level that cache is shared in.
1717fcf5ef2aSThomas Huth * Used to encode CPUID[4].EAX[bits 25:14] or
1718fcf5ef2aSThomas Huth * CPUID[0x8000001D].EAX[bits 25:14].
1719fcf5ef2aSThomas Huth */
1720fcf5ef2aSThomas Huth CpuTopologyLevel share_level;
1721fcf5ef2aSThomas Huth } CPUCacheInfo;
1722fcf5ef2aSThomas Huth
1723fcf5ef2aSThomas Huth
1724fcf5ef2aSThomas Huth typedef struct CPUCaches {
1725fcf5ef2aSThomas Huth CPUCacheInfo *l1d_cache;
1726fcf5ef2aSThomas Huth CPUCacheInfo *l1i_cache;
1727fcf5ef2aSThomas Huth CPUCacheInfo *l2_cache;
1728fcf5ef2aSThomas Huth CPUCacheInfo *l3_cache;
1729fcf5ef2aSThomas Huth } CPUCaches;
1730fcf5ef2aSThomas Huth
1731fcf5ef2aSThomas Huth typedef struct HVFX86LazyFlags {
173284abdd7dSZiqiao Kong target_ulong result;
173384abdd7dSZiqiao Kong target_ulong auxbits;
1734fcf5ef2aSThomas Huth } HVFX86LazyFlags;
1735fcf5ef2aSThomas Huth
1736fcf5ef2aSThomas Huth typedef struct CPUArchState {
1737fcf5ef2aSThomas Huth /* standard registers */
1738fcf5ef2aSThomas Huth target_ulong regs[CPU_NB_REGS];
1739fcf5ef2aSThomas Huth target_ulong eip;
1740fcf5ef2aSThomas Huth target_ulong eflags; /* eflags register. During CPU emulation, CC
1741fcf5ef2aSThomas Huth flags and DF are set to zero because they are
1742fcf5ef2aSThomas Huth stored elsewhere */
1743fcf5ef2aSThomas Huth
174475f107a8SRichard Henderson /* emulator internal eflags handling */
174575f107a8SRichard Henderson target_ulong cc_dst;
1746fcf5ef2aSThomas Huth target_ulong cc_src;
1747fcf5ef2aSThomas Huth target_ulong cc_src2;
1748fcf5ef2aSThomas Huth uint32_t cc_op;
1749e56dd3c7SJing Liu int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
1750e56dd3c7SJing Liu uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
1751e56dd3c7SJing Liu are known at translation time. */
1752e56dd3c7SJing Liu uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
1753fcf5ef2aSThomas Huth
1754fcf5ef2aSThomas Huth /* segments */
1755fcf5ef2aSThomas Huth SegmentCache segs[6]; /* selector values */
1756fcf5ef2aSThomas Huth SegmentCache ldt;
1757fcf5ef2aSThomas Huth SegmentCache tr;
1758fcf5ef2aSThomas Huth SegmentCache gdt; /* only base and limit are used */
1759fcf5ef2aSThomas Huth SegmentCache idt; /* only base and limit are used */
1760fcf5ef2aSThomas Huth
1761fcf5ef2aSThomas Huth target_ulong cr[5]; /* NOTE: cr1 is unused */
1762fcf5ef2aSThomas Huth
1763fcf5ef2aSThomas Huth bool pdptrs_valid;
1764fcf5ef2aSThomas Huth uint64_t pdptrs[4];
1765fcf5ef2aSThomas Huth int32_t a20_mask;
1766fcf5ef2aSThomas Huth
17674ebd98ebSXin Li BNDReg bnd_regs[4];
17684ebd98ebSXin Li BNDCSReg bndcs_regs;
17694ebd98ebSXin Li uint64_t msr_bndcfgs;
17704ebd98ebSXin Li uint64_t efer;
17714ebd98ebSXin Li
17724ebd98ebSXin Li /* Beginning of state preserved by INIT (dummy marker). */
17734ebd98ebSXin Li struct {} start_init_save;
17744ebd98ebSXin Li
17754ebd98ebSXin Li /* FPU state */
17764ebd98ebSXin Li unsigned int fpstt; /* top of stack index */
17774ebd98ebSXin Li uint16_t fpus;
1778fcf5ef2aSThomas Huth uint16_t fpuc;
1779fcf5ef2aSThomas Huth uint8_t fptags[8]; /* 0 = valid, 1 = empty */
1780fcf5ef2aSThomas Huth FPReg fpregs[8];
1781fcf5ef2aSThomas Huth /* KVM-only so far */
1782fcf5ef2aSThomas Huth uint16_t fpop;
1783fcf5ef2aSThomas Huth uint16_t fpcs;
1784fcf5ef2aSThomas Huth uint16_t fpds;
1785fcf5ef2aSThomas Huth uint64_t fpip;
1786fcf5ef2aSThomas Huth uint64_t fpdp;
1787fcf5ef2aSThomas Huth
1788fcf5ef2aSThomas Huth /* emulator internal variables */
1789db888065SSean Christopherson float_status fp_status;
1790fcf5ef2aSThomas Huth floatx80 ft0;
1791fcf5ef2aSThomas Huth
1792fcf5ef2aSThomas Huth float_status mmx_status; /* for 3DNow! float ops */
1793fcf5ef2aSThomas Huth float_status sse_status;
1794fcf5ef2aSThomas Huth uint32_t mxcsr;
1795fcf5ef2aSThomas Huth ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32] QEMU_ALIGNED(16);
1796fcf5ef2aSThomas Huth ZMMReg xmm_t0 QEMU_ALIGNED(16);
1797fcf5ef2aSThomas Huth MMXReg mmx_t0;
1798fcf5ef2aSThomas Huth
1799fcf5ef2aSThomas Huth uint64_t opmask_regs[NB_OPMASK_REGS];
1800fcf5ef2aSThomas Huth #ifdef TARGET_X86_64
1801e13713dbSLiran Alon uint8_t xtilecfg[64];
1802fcf5ef2aSThomas Huth uint8_t xtiledata[8192];
1803fcf5ef2aSThomas Huth #endif
1804e7e7bdabSPaolo Bonzini
18052a9758c5SPaolo Bonzini /* sysenter registers */
1806fcf5ef2aSThomas Huth uint32_t sysenter_cs;
1807a33a2cfeSPaolo Bonzini target_ulong sysenter_esp;
1808cabf9862SMaxim Levitsky target_ulong sysenter_eip;
1809cfeea0c0SKonrad Rzeszutek Wilk uint64_t star;
1810a33a2cfeSPaolo Bonzini
1811fcf5ef2aSThomas Huth uint64_t vm_hsave;
1812fcf5ef2aSThomas Huth
1813fcf5ef2aSThomas Huth #ifdef TARGET_X86_64
1814fcf5ef2aSThomas Huth target_ulong lstar;
1815fcf5ef2aSThomas Huth target_ulong cstar;
1816fcf5ef2aSThomas Huth target_ulong fmask;
1817fcf5ef2aSThomas Huth target_ulong kernelgsbase;
1818db5daafaSVitaly Kuznetsov
1819fcf5ef2aSThomas Huth /* FRED MSRs */
1820d645e132SMarcelo Tosatti uint64_t fred_rsp0;
1821fcf5ef2aSThomas Huth uint64_t fred_rsp1;
1822da1cc323SEvgeny Yakovlev uint64_t fred_rsp2;
1823fcf5ef2aSThomas Huth uint64_t fred_rsp3;
1824fcf5ef2aSThomas Huth uint64_t fred_stklvls;
1825fcf5ef2aSThomas Huth uint64_t fred_ssp1;
182673d24074SJon Doron uint64_t fred_ssp2;
182773d24074SJon Doron uint64_t fred_ssp3;
182873d24074SJon Doron uint64_t fred_config;
182973d24074SJon Doron #endif
183073d24074SJon Doron
183173d24074SJon Doron uint64_t tsc_adjust;
1832da1cc323SEvgeny Yakovlev uint64_t tsc_deadline;
1833da1cc323SEvgeny Yakovlev uint64_t tsc_aux;
1834da1cc323SEvgeny Yakovlev
18355e953812SRoman Kagan uint64_t xcr0;
1836fcf5ef2aSThomas Huth
1837fcf5ef2aSThomas Huth uint64_t mcg_status;
1838fcf5ef2aSThomas Huth uint64_t msr_ia32_misc_enable;
1839fcf5ef2aSThomas Huth uint64_t msr_ia32_feature_control;
18405e953812SRoman Kagan uint64_t msr_ia32_sgxlepubkeyhash[4];
18415e953812SRoman Kagan
18425e953812SRoman Kagan uint64_t msr_fixed_ctr_ctrl;
1843ba6a4fd9SVitaly Kuznetsov uint64_t msr_global_ctrl;
1844ba6a4fd9SVitaly Kuznetsov uint64_t msr_global_status;
1845ba6a4fd9SVitaly Kuznetsov uint64_t msr_global_ovf_ctrl;
1846fcf5ef2aSThomas Huth uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
1847b77146e9SChao Peng uint64_t msr_gp_counters[MAX_GP_COUNTERS];
1848b77146e9SChao Peng uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
1849b77146e9SChao Peng
1850b77146e9SChao Peng uint64_t pat;
1851b77146e9SChao Peng uint32_t smbase;
1852b77146e9SChao Peng uint64_t msr_smi_count;
1853b77146e9SChao Peng
1854cdec2b75SZeng Guang uint32_t pkru;
1855cdec2b75SZeng Guang uint32_t pkrs;
1856cdec2b75SZeng Guang uint32_t tsx_ctrl;
1857cdec2b75SZeng Guang
185812703d4eSYang Weijiang uint64_t spec_ctrl;
185912703d4eSYang Weijiang uint64_t amd_tsc_scale_msr;
186012703d4eSYang Weijiang uint64_t virt_ssbd;
186112703d4eSYang Weijiang
186212703d4eSYang Weijiang /* End of state preserved by INIT (dummy marker). */
1863fcf5ef2aSThomas Huth struct {} end_init_save;
1864fcf5ef2aSThomas Huth
1865fcf5ef2aSThomas Huth uint64_t system_time_msr;
1866fcf5ef2aSThomas Huth uint64_t wall_clock_msr;
1867fcf5ef2aSThomas Huth uint64_t steal_time_msr;
1868fcf5ef2aSThomas Huth uint64_t async_pf_en_msr;
1869fcf5ef2aSThomas Huth uint64_t async_pf_int_msr;
1870fcf5ef2aSThomas Huth uint64_t pv_eoi_en_msr;
1871fcf5ef2aSThomas Huth uint64_t poll_control_msr;
1872fcf5ef2aSThomas Huth
1873fcf5ef2aSThomas Huth /* Partition-wide HV MSRs, will be updated only on the first vcpu */
1874fcf5ef2aSThomas Huth uint64_t msr_hv_hypercall;
1875fcf5ef2aSThomas Huth uint64_t msr_hv_guest_os_id;
1876fcf5ef2aSThomas Huth uint64_t msr_hv_tsc;
1877fcf5ef2aSThomas Huth uint64_t msr_hv_syndbg_control;
1878fcf5ef2aSThomas Huth uint64_t msr_hv_syndbg_status;
1879fcf5ef2aSThomas Huth uint64_t msr_hv_syndbg_send_page;
1880fcf5ef2aSThomas Huth uint64_t msr_hv_syndbg_recv_page;
1881fcf5ef2aSThomas Huth uint64_t msr_hv_syndbg_pending_page;
1882fe441054SJan Kiszka uint64_t msr_hv_syndbg_options;
1883fe441054SJan Kiszka
1884fcf5ef2aSThomas Huth /* Per-VCPU HV MSRs */
1885e3126a5cSLara Lazier uint64_t msr_hv_vapic;
1886fcf5ef2aSThomas Huth uint64_t msr_hv_crash_params[HV_CRASH_PARAMS];
1887fcf5ef2aSThomas Huth uint64_t msr_hv_runtime;
1888fcf5ef2aSThomas Huth uint64_t msr_hv_synic_control;
1889fcf5ef2aSThomas Huth uint64_t msr_hv_synic_evt_page;
1890fcf5ef2aSThomas Huth uint64_t msr_hv_synic_msg_page;
1891fe441054SJan Kiszka uint64_t msr_hv_synic_sint[HV_SINT_COUNT];
1892fe441054SJan Kiszka uint64_t msr_hv_stimer_config[HV_STIMER_COUNT];
18930418f908SAnthony Harivel uint64_t msr_hv_stimer_count[HV_STIMER_COUNT];
18940418f908SAnthony Harivel uint64_t msr_hv_reenlightenment_control;
18950418f908SAnthony Harivel uint64_t msr_hv_tsc_emulation_control;
18960418f908SAnthony Harivel uint64_t msr_hv_tsc_emulation_status;
18971f5c00cfSAlex Bennée
18981f5c00cfSAlex Bennée uint64_t msr_rtit_ctrl;
18991f5c00cfSAlex Bennée uint64_t msr_rtit_status;
1900e8b5fae5SRichard Henderson uint64_t msr_rtit_output_base;
1901fcf5ef2aSThomas Huth uint64_t msr_rtit_output_mask;
1902fcf5ef2aSThomas Huth uint64_t msr_rtit_cr3_match;
190380db491dSJing Liu uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS];
190480db491dSJing Liu
190580db491dSJing Liu /* Per-VCPU XFD MSRs */
190680db491dSJing Liu uint64_t msr_xfd;
1907fcf5ef2aSThomas Huth uint64_t msr_xfd_err;
1908fcf5ef2aSThomas Huth
1909fcf5ef2aSThomas Huth /* Per-VCPU Arch LBR MSRs */
1910fcf5ef2aSThomas Huth uint64_t msr_lbr_ctl;
1911fcf5ef2aSThomas Huth uint64_t msr_lbr_depth;
1912fcf5ef2aSThomas Huth LBREntry lbr_records[ARCH_LBR_NR_ENTRIES];
1913fcf5ef2aSThomas Huth
1914fcf5ef2aSThomas Huth /* AMD MSRC001_0015 Hardware Configuration */
1915fcf5ef2aSThomas Huth uint64_t msr_hwcr;
1916fcf5ef2aSThomas Huth
1917fcf5ef2aSThomas Huth /* exception/interrupt handling */
1918d4a606b3SEduardo Habkost int error_code;
1919d4a606b3SEduardo Habkost int exception_is_int;
1920fcf5ef2aSThomas Huth target_ulong exception_next_eip;
1921a9f27ea9SEduardo Habkost target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */
1922a9f27ea9SEduardo Habkost union {
1923a9f27ea9SEduardo Habkost struct CPUBreakpoint *cpu_breakpoint[4];
1924a9f27ea9SEduardo Habkost struct CPUWatchpoint *cpu_watchpoint[4];
1925a9f27ea9SEduardo Habkost }; /* break/watchpoints for dr[0..3] */
1926fcf5ef2aSThomas Huth int old_exception; /* exception in flight */
1927fcf5ef2aSThomas Huth
1928fcf5ef2aSThomas Huth uint64_t vm_vmcb;
1929fcf5ef2aSThomas Huth uint64_t tsc_offset;
1930fcf5ef2aSThomas Huth uint64_t intercept;
1931fcf5ef2aSThomas Huth uint16_t intercept_cr_read;
1932fcf5ef2aSThomas Huth uint16_t intercept_cr_write;
1933fcf5ef2aSThomas Huth uint16_t intercept_dr_read;
1934fd13f23bSLiran Alon uint16_t intercept_dr_write;
1935fcf5ef2aSThomas Huth uint32_t intercept_exceptions;
1936fcf5ef2aSThomas Huth uint64_t nested_cr3;
1937fd13f23bSLiran Alon uint32_t nested_pg_mode;
1938fd13f23bSLiran Alon uint8_t v_tpr;
1939fcf5ef2aSThomas Huth uint32_t int_ctl;
1940fd13f23bSLiran Alon
1941fd13f23bSLiran Alon /* KVM states, automatically cleared on reset */
194212f89a39SChenyi Qiang uint8_t nmi_injected;
1943c97d6d2cSSergio Andres Gomez Del Real uint8_t nmi_pending;
1944fcf5ef2aSThomas Huth
1945fcf5ef2aSThomas Huth uintptr_t retaddr;
1946fcf5ef2aSThomas Huth
1947fcf5ef2aSThomas Huth /* RAPL MSR */
194873b994f6SLiran Alon uint64_t msr_rapl_power_unit;
19495286c366SPaolo Bonzini uint64_t msr_pkg_energy_status;
19505b8063c4SLiran Alon
19515b8063c4SLiran Alon /* Fields up to this point are cleared by a CPU reset */
1952c0198c5fSDavid Edmondson struct {} end_reset_fields;
19535b8063c4SLiran Alon
1954ebbfef2fSLiran Alon /* Fields after this point are preserved across CPU reset. */
1955ebbfef2fSLiran Alon
195627d4075dSDavid Woodhouse /* processor features (e.g. for CPUID insn) */
195727d4075dSDavid Woodhouse /* Minimum cpuid leaf 7 value */
1958c345104cSJoao Martins uint32_t cpuid_level_func7;
1959c345104cSJoao Martins /* Actual cpuid leaf 7 value */
1960f0689302SJoao Martins uint32_t cpuid_min_level_func7;
19615092db87SJoao Martins /* Minimum level/xlevel/xlevel2, based on CPU model + features */
1962105b47fdSAnkur Arora uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
1963ddf0fd9aSDavid Woodhouse /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
1964c723d4c1SDavid Woodhouse uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2;
1965c723d4c1SDavid Woodhouse /* Actual level/xlevel/xlevel2 value: */
1966b746a779SJoao Martins uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2;
1967b746a779SJoao Martins uint32_t cpuid_vendor1;
1968b746a779SJoao Martins uint32_t cpuid_vendor2;
1969b746a779SJoao Martins uint32_t cpuid_vendor3;
1970ebbfef2fSLiran Alon uint32_t cpuid_version;
1971c97d6d2cSSergio Andres Gomez Del Real FeatureWordArray features;
1972577f02b8SRoman Bolshakov /* AVX10 version */
1973fe76b09cSRoman Bolshakov uint8_t avx10_version;
1974c97d6d2cSSergio Andres Gomez Del Real /* Features that were explicitly enabled/disabled */
1975fcf5ef2aSThomas Huth FeatureWordArray user_features;
1976fcf5ef2aSThomas Huth uint32_t cpuid_model[12];
1977fcf5ef2aSThomas Huth /* Cache information for CPUID. When legacy-cache=on, the cache data
1978fcf5ef2aSThomas Huth * on each CPUID leaf will be different, because we keep compatibility
1979fcf5ef2aSThomas Huth * with old QEMU versions.
1980fcf5ef2aSThomas Huth */
1981fcf5ef2aSThomas Huth CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd;
1982fcf5ef2aSThomas Huth
1983fcf5ef2aSThomas Huth /* MTRRs */
1984fcf5ef2aSThomas Huth uint64_t mtrr_fixed[11];
1985fcf5ef2aSThomas Huth uint64_t mtrr_deftype;
1986fcf5ef2aSThomas Huth MTRRVar mtrr_var[MSR_MTRRcap_VCNT];
1987fcf5ef2aSThomas Huth
198865087997STao Xu /* For KVM */
1989fcf5ef2aSThomas Huth uint32_t mp_state;
1990fcf5ef2aSThomas Huth int32_t exception_nr;
1991c26ae610SLike Xu int32_t interrupt_injected;
1992aa1878fbSZhao Liu uint8_t soft_interrupt;
1993c26ae610SLike Xu uint8_t exception_pending;
19946ddeb0ecSZhao Liu uint8_t exception_injected;
199581c392abSZhao Liu uint8_t has_error_code;
199681c392abSZhao Liu uint8_t exception_has_payload;
199781c392abSZhao Liu uint64_t exception_payload;
19986ddeb0ecSZhao Liu uint8_t triple_fault_pending;
19996ddeb0ecSZhao Liu uint32_t ins_len;
2000fcf5ef2aSThomas Huth uint32_t sipi_vector;
2001fcf5ef2aSThomas Huth bool tsc_valid;
2002fcf5ef2aSThomas Huth int64_t tsc_khz;
2003fcf5ef2aSThomas Huth int64_t user_tsc_khz; /* for sanity check only */
2004fcf5ef2aSThomas Huth uint64_t apic_bus_freq;
2005fcf5ef2aSThomas Huth uint64_t tsc;
2006fcf5ef2aSThomas Huth #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2007fcf5ef2aSThomas Huth void *xsave_buf;
2008fcf5ef2aSThomas Huth uint32_t xsave_buf_len;
2009fcf5ef2aSThomas Huth #endif
2010fcf5ef2aSThomas Huth #if defined(CONFIG_KVM)
2011fcf5ef2aSThomas Huth struct kvm_nested_state *nested_state;
2012fcf5ef2aSThomas Huth MemoryRegion *xen_vcpu_info_mr;
2013b36e239eSPhilippe Mathieu-Daudé void *xen_vcpu_info_hva;
2014fcf5ef2aSThomas Huth uint64_t xen_vcpu_info_gpa;
2015fcf5ef2aSThomas Huth uint64_t xen_vcpu_info_default_gpa;
2016fcf5ef2aSThomas Huth uint64_t xen_vcpu_time_info_gpa;
20172a693142SPan Nengyuan uint64_t xen_vcpu_runstate_gpa;
2018fcf5ef2aSThomas Huth uint8_t xen_vcpu_callback_vector;
20194e45aff3SPaolo Bonzini bool xen_callback_asserted;
20204e45aff3SPaolo Bonzini uint16_t xen_virq[XEN_NR_VIRQS];
20214f2beda4SEduardo Habkost uint64_t xen_singleshot_timer_ns;
202208856771SVitaly Kuznetsov QEMUTimer *xen_singleshot_timer;
20239b4cf107SRoman Kagan uint64_t xen_periodic_timer_period;
20242d384d7cSVitaly Kuznetsov QEMUTimer *xen_periodic_timer;
2025e48ddcc6SVitaly Kuznetsov QemuMutex xen_timers_lock;
202630d6ff66SVitaly Kuznetsov #endif
202708856771SVitaly Kuznetsov #if defined(CONFIG_HVF)
2028735db465SVitaly Kuznetsov HVFX86LazyFlags hvf_lflags;
202923eb5d03SVitaly Kuznetsov void *hvf_mmio_buf;
203070367f09SVitaly Kuznetsov #endif
2031af7228b8SVitaly Kuznetsov
2032af7228b8SVitaly Kuznetsov uint64_t mcg_cap;
2033af7228b8SVitaly Kuznetsov uint64_t mcg_ctl;
2034af7228b8SVitaly Kuznetsov uint64_t mcg_ext_ctl;
2035af7228b8SVitaly Kuznetsov uint64_t mce_banks[MCE_BANKS_DEF*4];
2036af7228b8SVitaly Kuznetsov uint64_t xstate_bv;
20372d384d7cSVitaly Kuznetsov
2038fcf5ef2aSThomas Huth /* vmstate */
2039fcf5ef2aSThomas Huth uint16_t fpus_vmstate;
2040dac1deaeSEduardo Habkost uint16_t fptag_vmstate;
2041dac1deaeSEduardo Habkost uint16_t fpregs_format_vmstate;
2042dac1deaeSEduardo Habkost
2043dac1deaeSEduardo Habkost uint64_t xss;
2044dac1deaeSEduardo Habkost uint32_t umwait;
2045dac1deaeSEduardo Habkost
2046fcf5ef2aSThomas Huth TPRAccess tpr_access_type;
20471ce36bfeSDaniel P. Berrange
2048fcf5ef2aSThomas Huth /* Number of dies within this CPU package. */
2049990e0be2SPaolo Bonzini unsigned nr_dies;
205044bd8e53SEduardo Habkost
2051fcf5ef2aSThomas Huth /* Number of modules within one die. */
2052fcf5ef2aSThomas Huth unsigned nr_modules;
20539954a158SPhil Dennis-Jordan
20549954a158SPhil Dennis-Jordan /* Bitmap of available CPU topology levels for this CPU. */
20559954a158SPhil Dennis-Jordan DECLARE_BITMAP(avail_cpu_topo, CPU_TOPOLOGY_LEVEL__MAX);
20569954a158SPhil Dennis-Jordan } CPUX86State;
2057fcf5ef2aSThomas Huth
2058fcf5ef2aSThomas Huth struct kvm_msrs;
2059fcf5ef2aSThomas Huth
20602266d443SMichael S. Tsirkin /**
20612266d443SMichael S. Tsirkin * X86CPU:
20622266d443SMichael S. Tsirkin * @env: #CPUX86State
20632266d443SMichael S. Tsirkin * @migratable: If set, only migratable flags will be accepted when "enforce"
20642266d443SMichael S. Tsirkin * mode is used, and only migratable flags will be included in the "host"
20652266d443SMichael S. Tsirkin * CPU model.
20662266d443SMichael S. Tsirkin *
20672266d443SMichael S. Tsirkin * An x86 CPU.
20682266d443SMichael S. Tsirkin */
2069fcf5ef2aSThomas Huth struct ArchCPU {
2070f69ecddbSWei Yang CPUState parent_obj;
2071fcf5ef2aSThomas Huth
2072fcf5ef2aSThomas Huth CPUX86State env;
2073fcf5ef2aSThomas Huth VMChangeStateEntry *vmsentry;
2074fcf5ef2aSThomas Huth
2075fcf5ef2aSThomas Huth uint64_t ucode_rev;
2076fcf5ef2aSThomas Huth
2077fcf5ef2aSThomas Huth uint32_t hyperv_spinlock_attempts;
2078fcf5ef2aSThomas Huth char *hyperv_vendor;
2079f06d8a18SYang Weijiang bool hyperv_synic_kvm_only;
2080f06d8a18SYang Weijiang uint64_t hyperv_features;
2081f06d8a18SYang Weijiang bool hyperv_passthrough;
2082f06d8a18SYang Weijiang OnOffAuto hyperv_no_nonarch_cs;
2083f06d8a18SYang Weijiang uint32_t hyperv_vendor_id[3];
2084f06d8a18SYang Weijiang uint32_t hyperv_interface_id[4];
2085f06d8a18SYang Weijiang uint32_t hyperv_limits[3];
2086f06d8a18SYang Weijiang bool hyperv_enforce_cpuid;
2087f06d8a18SYang Weijiang uint32_t hyperv_ver_id_build;
2088fcf5ef2aSThomas Huth uint16_t hyperv_ver_id_major;
2089fcf5ef2aSThomas Huth uint16_t hyperv_ver_id_minor;
2090fcf5ef2aSThomas Huth uint32_t hyperv_ver_id_sp;
2091fcf5ef2aSThomas Huth uint8_t hyperv_ver_id_sb;
2092fcf5ef2aSThomas Huth uint32_t hyperv_ver_id_sn;
2093fcf5ef2aSThomas Huth
2094fcf5ef2aSThomas Huth bool check_cpuid;
2095fcf5ef2aSThomas Huth bool enforce_cpuid;
2096fcf5ef2aSThomas Huth /*
2097fcf5ef2aSThomas Huth * Force features to be enabled even if the host doesn't support them.
2098fcf5ef2aSThomas Huth * This is dangerous and should be done only for testing CPUID
2099fcf5ef2aSThomas Huth * compatibility.
2100ab8f992eSBabu Moger */
2101f602eb92SZhao Liu bool force_features;
2102f602eb92SZhao Liu bool expose_kvm;
2103f602eb92SZhao Liu bool expose_tcg;
2104f602eb92SZhao Liu bool migratable;
2105f602eb92SZhao Liu bool migrate_smi_count;
2106ab8f992eSBabu Moger bool max_features; /* Enable all supported features automatically */
2107ab8f992eSBabu Moger uint32_t apic_id;
2108ab8f992eSBabu Moger
2109ab8f992eSBabu Moger /* Enables publishing of TSC increment and Local APIC bus frequencies to
2110b776569aSBabu Moger * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */
2111b776569aSBabu Moger bool vmware_cpuid_freq;
2112b776569aSBabu Moger
2113b776569aSBabu Moger /* if true the CPUID code directly forward host cache leaves to the guest */
2114b776569aSBabu Moger bool cache_info_passthrough;
2115b776569aSBabu Moger
2116fcf5ef2aSThomas Huth /* if true the CPUID code directly forwards
2117fcf5ef2aSThomas Huth * host monitor/mwait leaves to the guest */
2118fcf5ef2aSThomas Huth struct {
2119fcf5ef2aSThomas Huth uint32_t eax;
2120fcf5ef2aSThomas Huth uint32_t ebx;
2121fcf5ef2aSThomas Huth uint32_t ecx;
2122a7a0da84SMichael Roth uint32_t edx;
2123a7a0da84SMichael Roth } mwait;
2124a7a0da84SMichael Roth
212529a51b2bSPaolo Bonzini /* Features that were filtered out because of missing host capabilities */
212629a51b2bSPaolo Bonzini FeatureWordArray filtered_features;
212729a51b2bSPaolo Bonzini
2128f24c3a79SLuwei Kang /* Enable PMU CPUID bits. This can't be enabled by default yet because
2129f24c3a79SLuwei Kang * it doesn't have ABI stability guarantees, as it passes all PMU CPUID
2130f24c3a79SLuwei Kang * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
2131fcf5ef2aSThomas Huth * capabilities) directly to the guest.
2132fcf5ef2aSThomas Huth */
2133fcf5ef2aSThomas Huth bool enable_pmu;
2134fcf5ef2aSThomas Huth
2135fcf5ef2aSThomas Huth /*
2136fcf5ef2aSThomas Huth * Enable LBR_FMT bits of IA32_PERF_CAPABILITIES MSR.
2137258fe08bSEduardo Habkost * This can't be initialized with a default because it doesn't have
2138258fe08bSEduardo Habkost * stable ABI support yet. It is only allowed to pass all LBR_FMT bits
2139258fe08bSEduardo Habkost * returned by kvm_arch_get_supported_msr_feature()(which depends on both
2140988f7b8bSVitaly Kuznetsov * host CPU and kernel capabilities) to the guest.
2141988f7b8bSVitaly Kuznetsov */
2142988f7b8bSVitaly Kuznetsov uint64_t lbr_fmt;
2143fcf5ef2aSThomas Huth
2144fcf5ef2aSThomas Huth /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is
2145fcf5ef2aSThomas Huth * disabled by default to avoid breaking migration between QEMU with
2146513ba32dSGerd Hoffmann * different LMCE configurations.
2147513ba32dSGerd Hoffmann */
2148513ba32dSGerd Hoffmann bool enable_lmce;
2149513ba32dSGerd Hoffmann
2150513ba32dSGerd Hoffmann /* Compatibility bits for old machine types.
2151513ba32dSGerd Hoffmann * If true present virtual l3 cache for VM, the vcpus in the same virtual
2152513ba32dSGerd Hoffmann * socket share an virtual l3 cache.
2153513ba32dSGerd Hoffmann */
2154fcf5ef2aSThomas Huth bool enable_l3_cache;
2155fcf5ef2aSThomas Huth
2156fcf5ef2aSThomas Huth /* Compatibility bits for old machine types.
2157fcf5ef2aSThomas Huth * If true present L1 cache as per-thread, not per-core.
2158fcf5ef2aSThomas Huth */
2159fcf5ef2aSThomas Huth bool l1_cache_per_core;
2160fcf5ef2aSThomas Huth
2161fcf5ef2aSThomas Huth /* Compatibility bits for old machine types.
216215f8b142SIgor Mammedov * If true present the old cache topology information
2163fcf5ef2aSThomas Huth */
2164176d2cdaSLike Xu bool legacy_cache;
216558820834SZhao Liu
2166fcf5ef2aSThomas Huth /* Compatibility bits for old machine types.
2167fcf5ef2aSThomas Huth * If true decode the CPUID Function 0x8000001E_ECX to support multiple
21686c69dfb6SGonglei * nodes per processor
21696c69dfb6SGonglei */
2170f66b8a83SJoao Martins bool legacy_multi_node;
2171f66b8a83SJoao Martins
2172fcf5ef2aSThomas Huth /* Compatibility bits for old machine types: */
2173fcf5ef2aSThomas Huth bool enable_cpuid_0xb;
21749348028eSPhilippe Mathieu-Daudé
21759348028eSPhilippe Mathieu-Daudé /* Enable auto level-increase for all CPUID leaves */
21769348028eSPhilippe Mathieu-Daudé bool full_cpuid_auto_level;
21779348028eSPhilippe Mathieu-Daudé
21789348028eSPhilippe Mathieu-Daudé /* Only advertise CPUID leaves defined by the vendor */
21799348028eSPhilippe Mathieu-Daudé bool vendor_cpuid_only;
21809348028eSPhilippe Mathieu-Daudé
21819348028eSPhilippe Mathieu-Daudé /* Only advertise TOPOEXT features that AMD defines */
21829348028eSPhilippe Mathieu-Daudé bool amd_topoext_features_only;
21839348028eSPhilippe Mathieu-Daudé
21849348028eSPhilippe Mathieu-Daudé /* Enable auto level-increase for Intel Processor Trace leave */
21859348028eSPhilippe Mathieu-Daudé bool intel_pt_auto_level;
21869348028eSPhilippe Mathieu-Daudé
21879348028eSPhilippe Mathieu-Daudé /* if true fill the top bits of the MTRR_PHYSMASKn variable range */
21889348028eSPhilippe Mathieu-Daudé bool fill_mtrr_mask;
21899348028eSPhilippe Mathieu-Daudé
21909348028eSPhilippe Mathieu-Daudé /* if true override the phys_bits value with a value read from the host */
21919348028eSPhilippe Mathieu-Daudé bool host_phys_bits;
21929348028eSPhilippe Mathieu-Daudé
21939348028eSPhilippe Mathieu-Daudé /* if set, limit maximum value for phys_bits when host_phys_bits is true */
21949348028eSPhilippe Mathieu-Daudé uint8_t host_phys_bits_limit;
21959348028eSPhilippe Mathieu-Daudé
21969348028eSPhilippe Mathieu-Daudé /* Forcefully disable KVM PV features not exposed in guest CPUIDs */
21979348028eSPhilippe Mathieu-Daudé bool kvm_pv_enforce_cpuid;
21989348028eSPhilippe Mathieu-Daudé
21999348028eSPhilippe Mathieu-Daudé /* Number of physical address bits supported */
22009348028eSPhilippe Mathieu-Daudé uint32_t phys_bits;
22019348028eSPhilippe Mathieu-Daudé
22029348028eSPhilippe Mathieu-Daudé /*
22039348028eSPhilippe Mathieu-Daudé * Number of guest physical address bits available. Usually this is
22049348028eSPhilippe Mathieu-Daudé * identical to host physical address bits. With NPT or EPT 4-level
22059348028eSPhilippe Mathieu-Daudé * paging, guest physical address space might be restricted to 48 bits
22069348028eSPhilippe Mathieu-Daudé * even if the host cpu supports more physical address bits.
22079348028eSPhilippe Mathieu-Daudé */
22089348028eSPhilippe Mathieu-Daudé uint32_t guest_phys_bits;
22099348028eSPhilippe Mathieu-Daudé
22109348028eSPhilippe Mathieu-Daudé /* in order to simplify APIC support, we leave this pointer to the
22119348028eSPhilippe Mathieu-Daudé user */
2212fcf5ef2aSThomas Huth struct DeviceState *apic_state;
2213fcf5ef2aSThomas Huth struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram;
2214ac701a4fSKeqian Zhu Notifier machine_done;
2215fcf5ef2aSThomas Huth
2216fcf5ef2aSThomas Huth struct kvm_msrs *kvm_msr_buf;
221792d5f1a4SPaolo Bonzini
2218fcf5ef2aSThomas Huth int32_t node_id; /* NUMA node this CPU belongs to */
2219fcf5ef2aSThomas Huth int32_t socket_id;
22201af0006aSJanosch Frank int32_t die_id;
2221fcf5ef2aSThomas Huth int32_t module_id;
22221af0006aSJanosch Frank int32_t core_id;
2223fcf5ef2aSThomas Huth int32_t thread_id;
22241af0006aSJanosch Frank
2225fcf5ef2aSThomas Huth int32_t hv_max_vps;
22261af0006aSJanosch Frank
2227fcf5ef2aSThomas Huth bool xen_vapic;
22288a5b974bSMarc-André Lureau };
2229fcf5ef2aSThomas Huth
2230fcf5ef2aSThomas Huth typedef struct X86CPUModel X86CPUModel;
223190c84c56SMarkus Armbruster
2232fcf5ef2aSThomas Huth /**
2233a010bdbeSAlex Bennée * X86CPUClass:
2234fcf5ef2aSThomas Huth * @cpu_def: CPU model definition
2235fcf5ef2aSThomas Huth * @host_cpuid_required: Whether CPU model requires cpuid from host.
22360442428aSMarkus Armbruster * @ordering: Ordering on the "-cpu help" CPU model list.
2237fcf5ef2aSThomas Huth * @migration_safe: See CpuDefinitionInfo::migration_safe
2238fcf5ef2aSThomas Huth * @static_model: See CpuDefinitionInfo::static
223976d0042bSPhilippe Mathieu-Daudé * @parent_realize: The parent class' realize handler.
22406d2d454aSPhilippe Mathieu-Daudé * @parent_phases: The parent class' reset phase handlers.
22416d2d454aSPhilippe Mathieu-Daudé *
2242fcf5ef2aSThomas Huth * An x86 CPU model or family.
22437ce08865SPhilippe Mathieu-Daudé */
2244bad5cfcdSMichael Tokarev struct X86CPUClass {
22456f529b75SPaolo Bonzini CPUClass parent_class;
224683a3d9c7SClaudio Fontana
2247bf13bfabSPaolo Bonzini /*
224883a3d9c7SClaudio Fontana * CPU definition, automatically loaded by instance_init if not NULL.
22497ce08865SPhilippe Mathieu-Daudé * Should be eventually replaced by subclass-specific property defaults.
225083a3d9c7SClaudio Fontana */
22515e76d84eSPaolo Bonzini X86CPUModel *model;
22525e76d84eSPaolo Bonzini
2253fcf5ef2aSThomas Huth bool host_cpuid_required;
2254fcf5ef2aSThomas Huth int ordering;
2255fcf5ef2aSThomas Huth bool migration_safe;
2256fcf5ef2aSThomas Huth bool static_model;
2257c117e5b1SPhilippe Mathieu-Daudé
2258fcf5ef2aSThomas Huth /*
2259fcf5ef2aSThomas Huth * Optional description of CPU model.
2260fcf5ef2aSThomas Huth * If unavailable, cpu_def->model_id is used.
2261fcf5ef2aSThomas Huth */
2262fcf5ef2aSThomas Huth const char *model_description;
2263fcf5ef2aSThomas Huth
2264fcf5ef2aSThomas Huth DeviceRealize parent_realize;
2265fcf5ef2aSThomas Huth DeviceUnrealize parent_unrealize;
2266fcf5ef2aSThomas Huth ResettablePhases parent_phases;
2267fcf5ef2aSThomas Huth };
2268fcf5ef2aSThomas Huth
2269fcf5ef2aSThomas Huth #ifndef CONFIG_USER_ONLY
2270fcf5ef2aSThomas Huth extern const VMStateDescription vmstate_x86_cpu;
2271fcf5ef2aSThomas Huth #endif
2272fcf5ef2aSThomas Huth
2273fcf5ef2aSThomas Huth int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
2274fcf5ef2aSThomas Huth
2275fcf5ef2aSThomas Huth int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
2276fcf5ef2aSThomas Huth int cpuid, DumpState *s);
2277fcf5ef2aSThomas Huth int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
2278fcf5ef2aSThomas Huth int cpuid, DumpState *s);
2279fcf5ef2aSThomas Huth int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
2280fcf5ef2aSThomas Huth DumpState *s);
2281fcf5ef2aSThomas Huth int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
2282fcf5ef2aSThomas Huth DumpState *s);
2283fcf5ef2aSThomas Huth
2284fcf5ef2aSThomas Huth bool x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
2285fcf5ef2aSThomas Huth Error **errp);
2286fcf5ef2aSThomas Huth
2287fcf5ef2aSThomas Huth void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags);
2288fcf5ef2aSThomas Huth
2289fcf5ef2aSThomas Huth int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
2290fcf5ef2aSThomas Huth int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
2291fcf5ef2aSThomas Huth void x86_cpu_gdb_init(CPUState *cs);
2292fcf5ef2aSThomas Huth
2293fcf5ef2aSThomas Huth void x86_cpu_list(void);
2294fcf5ef2aSThomas Huth int cpu_x86_support_mca_broadcast(CPUX86State *env);
22955e76d84eSPaolo Bonzini
22965e76d84eSPaolo Bonzini #ifndef CONFIG_USER_ONLY
2297fcf5ef2aSThomas Huth hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
2298fcf5ef2aSThomas Huth MemTxAttrs *attrs);
2299fcf5ef2aSThomas Huth int cpu_get_pic_interrupt(CPUX86State *s);
2300fcf5ef2aSThomas Huth
2301fcf5ef2aSThomas Huth /* MS-DOS compatibility mode FPU exception support */
2302fcf5ef2aSThomas Huth void x86_register_ferr_irq(qemu_irq irq);
2303fcf5ef2aSThomas Huth void fpu_check_raise_ferr_irq(CPUX86State *s);
2304fcf5ef2aSThomas Huth void cpu_set_ignne(void);
2305fcf5ef2aSThomas Huth void cpu_clear_ignne(void);
2306fcf5ef2aSThomas Huth #endif
2307fcf5ef2aSThomas Huth
2308fcf5ef2aSThomas Huth /* mpx_helper.c */
2309fcf5ef2aSThomas Huth void cpu_sync_bndcs_hflags(CPUX86State *env);
2310fcf5ef2aSThomas Huth
2311fcf5ef2aSThomas Huth /* this function must always be used to load data in the segment
2312fcf5ef2aSThomas Huth cache: it synchronizes the hflags with the segment cache values */
cpu_x86_load_seg_cache(CPUX86State * env,X86Seg seg_reg,unsigned int selector,target_ulong base,unsigned int limit,unsigned int flags)2313fcf5ef2aSThomas Huth static inline void cpu_x86_load_seg_cache(CPUX86State *env,
2314fcf5ef2aSThomas Huth X86Seg seg_reg, unsigned int selector,
2315fcf5ef2aSThomas Huth target_ulong base,
2316fcf5ef2aSThomas Huth unsigned int limit,
2317fcf5ef2aSThomas Huth unsigned int flags)
2318fcf5ef2aSThomas Huth {
2319fcf5ef2aSThomas Huth SegmentCache *sc;
2320fcf5ef2aSThomas Huth unsigned int new_hflags;
2321fcf5ef2aSThomas Huth
2322fcf5ef2aSThomas Huth sc = &env->segs[seg_reg];
2323fcf5ef2aSThomas Huth sc->selector = selector;
2324fcf5ef2aSThomas Huth sc->base = base;
2325fcf5ef2aSThomas Huth sc->limit = limit;
2326fcf5ef2aSThomas Huth sc->flags = flags;
2327fcf5ef2aSThomas Huth
2328fcf5ef2aSThomas Huth /* update the hidden flags */
2329fcf5ef2aSThomas Huth {
2330fcf5ef2aSThomas Huth if (seg_reg == R_CS) {
2331fcf5ef2aSThomas Huth #ifdef TARGET_X86_64
2332fcf5ef2aSThomas Huth if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
2333fcf5ef2aSThomas Huth /* long mode */
2334fcf5ef2aSThomas Huth env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
2335fcf5ef2aSThomas Huth env->hflags &= ~(HF_ADDSEG_MASK);
2336fcf5ef2aSThomas Huth } else
2337fcf5ef2aSThomas Huth #endif
2338fcf5ef2aSThomas Huth {
2339fcf5ef2aSThomas Huth /* legacy / compatibility case */
2340fcf5ef2aSThomas Huth new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
2341fcf5ef2aSThomas Huth >> (DESC_B_SHIFT - HF_CS32_SHIFT);
2342fcf5ef2aSThomas Huth env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
2343fcf5ef2aSThomas Huth new_hflags;
234476d8d0f8SRichard Henderson }
234576d8d0f8SRichard Henderson }
234676d8d0f8SRichard Henderson if (seg_reg == R_SS) {
234776d8d0f8SRichard Henderson int cpl = (flags >> DESC_DPL_SHIFT) & 3;
2348c117e5b1SPhilippe Mathieu-Daudé #if HF_CPL_MASK != 3
234976d8d0f8SRichard Henderson #error HF_CPL_MASK is hardcoded
235076d8d0f8SRichard Henderson #endif
23519c2fb9e1SRichard Henderson env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
23529c2fb9e1SRichard Henderson /* Possibly switch between BNDCFGS and BNDCFGU */
2353701890bdSRichard Henderson cpu_sync_bndcs_hflags(env);
2354701890bdSRichard Henderson }
2355fcf5ef2aSThomas Huth new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
2356fcf5ef2aSThomas Huth >> (DESC_B_SHIFT - HF_SS32_SHIFT);
2357f5cc5a5cSClaudio Fontana if (env->hflags & HF_CS64_MASK) {
2358f5cc5a5cSClaudio Fontana /* zero base assumed for DS, ES and SS in long mode */
2359f5cc5a5cSClaudio Fontana } else if (!(env->cr[0] & CR0_PE_MASK) ||
2360f5cc5a5cSClaudio Fontana (env->eflags & VM_MASK) ||
2361f5cc5a5cSClaudio Fontana !(env->hflags & HF_CS32_MASK)) {
2362f5cc5a5cSClaudio Fontana /* XXX: try to avoid this test. The problem comes from the
2363f5cc5a5cSClaudio Fontana fact that is real mode or vm86 mode we only modify the
2364ec19444aSMaciej S. Szmigiero 'base' and 'selector' fields of the segment cache to go
2365ec19444aSMaciej S. Szmigiero faster. A solution may be to force addseg to one in
236697afb47eSLara Lazier translate-i386.c. */
236797afb47eSLara Lazier new_hflags |= HF_ADDSEG_MASK;
2368f5cc5a5cSClaudio Fontana } else {
2369fcf5ef2aSThomas Huth new_hflags |= ((env->segs[R_DS].base |
2370fcf5ef2aSThomas Huth env->segs[R_ES].base |
2371fcf5ef2aSThomas Huth env->segs[R_SS].base) != 0) <<
2372fcf5ef2aSThomas Huth HF_ADDSEG_SHIFT;
2373b5ee0468SBui Quang Minh }
2374fcf5ef2aSThomas Huth env->hflags = (env->hflags &
2375fcf5ef2aSThomas Huth ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
2376b5ee0468SBui Quang Minh }
2377fcf5ef2aSThomas Huth }
2378fcf5ef2aSThomas Huth
cpu_x86_load_seg_cache_sipi(X86CPU * cpu,uint8_t sipi_vector)2379fcf5ef2aSThomas Huth static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
2380608db8dbSPaul Brook uint8_t sipi_vector)
2381fcf5ef2aSThomas Huth {
2382fcf5ef2aSThomas Huth CPUState *cs = CPU(cpu);
2383f8c45c65SPaolo Bonzini CPUX86State *env = &cpu->env;
2384f8c45c65SPaolo Bonzini
2385f8c45c65SPaolo Bonzini env->eip = 0;
2386f8c45c65SPaolo Bonzini cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
2387f8c45c65SPaolo Bonzini sipi_vector << 12,
2388f8c45c65SPaolo Bonzini env->segs[R_CS].limit,
2389f8c45c65SPaolo Bonzini env->segs[R_CS].flags);
2390f8c45c65SPaolo Bonzini cs->halted = 0;
2391f8c45c65SPaolo Bonzini }
2392f8c45c65SPaolo Bonzini
239363087289SClaudio Fontana int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
239463087289SClaudio Fontana target_ulong *base, unsigned int *limit,
239563087289SClaudio Fontana unsigned int *flags);
239663087289SClaudio Fontana
239763087289SClaudio Fontana /* op_helper.c */
2398fcf5ef2aSThomas Huth /* used for debug or cpu save/restore */
2399fcf5ef2aSThomas Huth
2400fcf5ef2aSThomas Huth /* cpu-exec.c */
2401fcf5ef2aSThomas Huth /*
2402fcf5ef2aSThomas Huth * The following helpers are only usable in user mode simulation.
2403fcf5ef2aSThomas Huth * The host pointers should come from lock_user().
2404fcf5ef2aSThomas Huth */
2405fcf5ef2aSThomas Huth void cpu_x86_load_seg(CPUX86State *s, X86Seg seg_reg, int selector);
2406fcf5ef2aSThomas Huth void cpu_x86_fsave(CPUX86State *s, void *host, size_t len);
2407fcf5ef2aSThomas Huth void cpu_x86_frstor(CPUX86State *s, void *host, size_t len);
2408fcf5ef2aSThomas Huth void cpu_x86_fxsave(CPUX86State *s, void *host, size_t len);
2409fcf5ef2aSThomas Huth void cpu_x86_fxrstor(CPUX86State *s, void *host, size_t len);
2410fcf5ef2aSThomas Huth void cpu_x86_xsave(CPUX86State *s, void *host, size_t len, uint64_t rbfm);
2411fcf5ef2aSThomas Huth bool cpu_x86_xrstor(CPUX86State *s, void *host, size_t len, uint64_t rbfm);
2412fcf5ef2aSThomas Huth
2413fcf5ef2aSThomas Huth /* cpu.c */
2414fcf5ef2aSThomas Huth void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
2415fcf5ef2aSThomas Huth uint32_t vendor2, uint32_t vendor3);
2416fcf5ef2aSThomas Huth typedef struct PropValue {
2417fcf5ef2aSThomas Huth const char *prop, *value;
24180dacec87SIgor Mammedov } PropValue;
2419311ca98dSIgor Mammedov void x86_cpu_apply_props(X86CPU *cpu, PropValue *props);
2420311ca98dSIgor Mammedov
2421311ca98dSIgor Mammedov void x86_cpu_after_reset(X86CPU *cpu);
2422311ca98dSIgor Mammedov
2423311ca98dSIgor Mammedov uint32_t cpu_x86_virtual_addr_width(CPUX86State *env);
2424311ca98dSIgor Mammedov
2425311ca98dSIgor Mammedov /* cpu.c other functions (cpuid) */
2426fcf5ef2aSThomas Huth void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2427fcf5ef2aSThomas Huth uint32_t *eax, uint32_t *ebx,
2428fcf5ef2aSThomas Huth uint32_t *ecx, uint32_t *edx);
242990f64153SPaolo Bonzini void cpu_clear_apic_feature(CPUX86State *env);
243090f64153SPaolo Bonzini void cpu_set_apic_feature(CPUX86State *env);
243190f64153SPaolo Bonzini void host_cpuid(uint32_t function, uint32_t count,
243290f64153SPaolo Bonzini uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
243390f64153SPaolo Bonzini bool cpu_has_x2apic_feature(CPUX86State *env);
243490f64153SPaolo Bonzini
243590f64153SPaolo Bonzini /* helper.c */
243690f64153SPaolo Bonzini void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
243790f64153SPaolo Bonzini void cpu_sync_avx_hflag(CPUX86State *env);
243890f64153SPaolo Bonzini
243990f64153SPaolo Bonzini #ifndef CONFIG_USER_ONLY
x86_asidx_from_attrs(CPUState * cs,MemTxAttrs attrs)244090f64153SPaolo Bonzini static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
244190f64153SPaolo Bonzini {
244290f64153SPaolo Bonzini return !!attrs.secure;
244390f64153SPaolo Bonzini }
244490f64153SPaolo Bonzini
cpu_addressspace(CPUState * cs,MemTxAttrs attrs)244598281984SRichard Henderson static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs)
24465f97afe2SPaolo Bonzini {
24475f97afe2SPaolo Bonzini return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs));
244890f64153SPaolo Bonzini }
24495f97afe2SPaolo Bonzini
24505f97afe2SPaolo Bonzini /*
24515f97afe2SPaolo Bonzini * load efer and update the corresponding hflags. XXX: do consistency
24525f97afe2SPaolo Bonzini * checks with cpuid bits?
245390f64153SPaolo Bonzini */
24545f97afe2SPaolo Bonzini void cpu_load_efer(CPUX86State *env, uint64_t val);
24555f97afe2SPaolo Bonzini uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
2456b1661801SPaolo Bonzini uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
2457b1661801SPaolo Bonzini uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
2458b1661801SPaolo Bonzini uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr);
2459b1661801SPaolo Bonzini void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val);
2460b1661801SPaolo Bonzini void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val);
2461b1661801SPaolo Bonzini void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val);
2462fffe424bSRichard Henderson void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
2463fffe424bSRichard Henderson void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
2464fcf5ef2aSThomas Huth #endif
2465fcf5ef2aSThomas Huth
2466fcf5ef2aSThomas Huth /* will be suppressed */
2467fcf5ef2aSThomas Huth void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
2468fcf5ef2aSThomas Huth void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
2469fcf5ef2aSThomas Huth void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
2470fcf5ef2aSThomas Huth void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
2471fcf5ef2aSThomas Huth
2472fcf5ef2aSThomas Huth /* hw/pc.c */
2473fcf5ef2aSThomas Huth uint64_t cpu_get_tsc(CPUX86State *env);
2474fcf5ef2aSThomas Huth
2475fcf5ef2aSThomas Huth #define CPU_RESOLVING_TYPE TYPE_X86_CPU
2476fcf5ef2aSThomas Huth
2477bb5de525SAnton Johansson #ifdef TARGET_X86_64
2478bb5de525SAnton Johansson #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64")
2479fcf5ef2aSThomas Huth #else
2480fcf5ef2aSThomas Huth #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32")
2481fcf5ef2aSThomas Huth #endif
2482b5e0d5d2SRichard Henderson
2483b5e0d5d2SRichard Henderson #define cpu_list x86_cpu_list
2484b5e0d5d2SRichard Henderson
2485b5e0d5d2SRichard Henderson /* MMU modes definitions */
2486b5e0d5d2SRichard Henderson #define MMU_KSMAP64_IDX 0
2487b5e0d5d2SRichard Henderson #define MMU_KSMAP32_IDX 1
2488b5e0d5d2SRichard Henderson #define MMU_USER64_IDX 2
2489fcf5ef2aSThomas Huth #define MMU_USER32_IDX 3
2490fcf5ef2aSThomas Huth #define MMU_KNOSMAP64_IDX 4
2491fcf5ef2aSThomas Huth #define MMU_KNOSMAP32_IDX 5
2492fcf5ef2aSThomas Huth #define MMU_PHYS_IDX 6
2493fcf5ef2aSThomas Huth #define MMU_NESTED_IDX 7
2494fcf5ef2aSThomas Huth
2495fcf5ef2aSThomas Huth #ifdef CONFIG_USER_ONLY
2496fcf5ef2aSThomas Huth #ifdef TARGET_X86_64
2497fcf5ef2aSThomas Huth #define MMU_USER_IDX MMU_USER64_IDX
2498fcf5ef2aSThomas Huth #else
2499fcf5ef2aSThomas Huth #define MMU_USER_IDX MMU_USER32_IDX
25002455e9cfSPaolo Bonzini #endif
2501fcf5ef2aSThomas Huth #endif
2502fcf5ef2aSThomas Huth
is_mmu_index_smap(int mmu_index)2503fcf5ef2aSThomas Huth static inline bool is_mmu_index_smap(int mmu_index)
250479c664f6SYang Zhong {
250579c664f6SYang Zhong return (mmu_index & ~1) == MMU_KSMAP64_IDX;
25062455e9cfSPaolo Bonzini }
250779c664f6SYang Zhong
is_mmu_index_user(int mmu_index)250879c664f6SYang Zhong static inline bool is_mmu_index_user(int mmu_index)
2509fcf5ef2aSThomas Huth {
2510fcf5ef2aSThomas Huth return (mmu_index & ~1) == MMU_USER64_IDX;
2511fcf5ef2aSThomas Huth }
2512fcf5ef2aSThomas Huth
is_mmu_index_32(int mmu_index)2513fcf5ef2aSThomas Huth static inline bool is_mmu_index_32(int mmu_index)
2514fcf5ef2aSThomas Huth {
2515fcf5ef2aSThomas Huth assert(mmu_index < MMU_PHYS_IDX);
2516c8bc83a4SPaolo Bonzini return mmu_index & 1;
2517c8bc83a4SPaolo Bonzini }
2518c8bc83a4SPaolo Bonzini
2519c8bc83a4SPaolo Bonzini int x86_mmu_index_pl(CPUX86State *env, unsigned pl);
2520c8bc83a4SPaolo Bonzini int cpu_mmu_index_kernel(CPUX86State *env);
2521c8bc83a4SPaolo Bonzini
2522c8bc83a4SPaolo Bonzini #define CC_DST (env->cc_dst)
2523c8bc83a4SPaolo Bonzini #define CC_SRC (env->cc_src)
2524c8bc83a4SPaolo Bonzini #define CC_SRC2 (env->cc_src2)
252518ab37baSLiran Alon #define CC_OP (env->cc_op)
252618ab37baSLiran Alon
252718ab37baSLiran Alon #include "exec/cpu-all.h"
252818ab37baSLiran Alon #include "svm.h"
252918ab37baSLiran Alon
2530b16c0e20SPaolo Bonzini #if !defined(CONFIG_USER_ONLY)
2531b16c0e20SPaolo Bonzini #include "hw/i386/apic.h"
2532b16c0e20SPaolo Bonzini #endif
2533b16c0e20SPaolo Bonzini
cpu_get_tb_cpu_state(CPUX86State * env,vaddr * pc,uint64_t * cs_base,uint32_t * flags)2534b16c0e20SPaolo Bonzini static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc,
253579a197abSLiran Alon uint64_t *cs_base, uint32_t *flags)
253679a197abSLiran Alon {
253779a197abSLiran Alon *flags = env->hflags |
253879a197abSLiran Alon (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
253979a197abSLiran Alon if (env->hflags & HF_CS64_MASK) {
254079a197abSLiran Alon *cs_base = 0;
254179a197abSLiran Alon *pc = env->eip;
254279a197abSLiran Alon } else {
254379a197abSLiran Alon *cs_base = env->segs[R_CS].base;
254479a197abSLiran Alon *pc = (uint32_t)(*cs_base + env->eip);
254579a197abSLiran Alon }
254679a197abSLiran Alon }
254779a197abSLiran Alon
254879a197abSLiran Alon void do_cpu_init(X86CPU *cpu);
254979a197abSLiran Alon
255079a197abSLiran Alon #define MCE_INJECT_BROADCAST 1
255179a197abSLiran Alon #define MCE_INJECT_UNCOND_AO 2
255279a197abSLiran Alon
255379a197abSLiran Alon void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
255479a197abSLiran Alon uint64_t status, uint64_t mcg_status, uint64_t addr,
255579a197abSLiran Alon uint64_t misc, int flags);
255679a197abSLiran Alon
2557616a89eaSPaolo Bonzini uint32_t cpu_cc_compute_all(CPUX86State *env1);
2558616a89eaSPaolo Bonzini
cpu_compute_eflags(CPUX86State * env)2559616a89eaSPaolo Bonzini static inline uint32_t cpu_compute_eflags(CPUX86State *env)
2560fcf5ef2aSThomas Huth {
25611d8ad165SYang Zhong uint32_t eflags = env->eflags;
25621d8ad165SYang Zhong if (tcg_enabled()) {
2563418b0f93SJoseph Myers eflags |= cpu_cc_compute_all(env) | (env->df & DF_MASK);
25641d8ad165SYang Zhong }
25651d8ad165SYang Zhong return eflags;
25661d8ad165SYang Zhong }
25671d8ad165SYang Zhong
cpu_get_mem_attrs(CPUX86State * env)25681d8ad165SYang Zhong static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
25691d8ad165SYang Zhong {
25701d8ad165SYang Zhong return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
25711d8ad165SYang Zhong }
25721d8ad165SYang Zhong
x86_get_a20_mask(CPUX86State * env)25731d8ad165SYang Zhong static inline int32_t x86_get_a20_mask(CPUX86State *env)
25741d8ad165SYang Zhong {
25751d8ad165SYang Zhong if (env->hflags & HF_SMM_MASK) {
25761d8ad165SYang Zhong return -1;
25771d8ad165SYang Zhong } else {
25781d8ad165SYang Zhong return env->a20_mask;
25791d8ad165SYang Zhong }
2580fcf5ef2aSThomas Huth }
2581fcf5ef2aSThomas Huth
cpu_has_vmx(CPUX86State * env)258227bd3216SRichard Henderson static inline bool cpu_has_vmx(CPUX86State *env)
258327bd3216SRichard Henderson {
258427bd3216SRichard Henderson return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
258527bd3216SRichard Henderson }
258627bd3216SRichard Henderson
cpu_has_svm(CPUX86State * env)2587813c6459SLara Lazier static inline bool cpu_has_svm(CPUX86State *env)
2588813c6459SLara Lazier {
2589813c6459SLara Lazier return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM;
259027bd3216SRichard Henderson }
2591fcf5ef2aSThomas Huth
259265c9d60aSPaolo Bonzini /*
2593813c6459SLara Lazier * In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
259427bd3216SRichard Henderson * Since it was set, CR4.VMXE must remain set as long as vCPU is in
259527bd3216SRichard Henderson * VMX operation. This is because CR4.VMXE is one of the bits set
2596fcf5ef2aSThomas Huth * in MSR_IA32_VMX_CR4_FIXED1.
2597fcf5ef2aSThomas Huth *
2598fcf5ef2aSThomas Huth * There is one exception to above statement when vCPU enters SMM mode.
2599fcf5ef2aSThomas Huth * When a vCPU enters SMM mode, it temporarily exit VMX operation and
2600fcf5ef2aSThomas Huth * may also reset CR4.VMXE during execution in SMM mode.
2601dcafd1efSEduardo Habkost * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation
2602dcafd1efSEduardo Habkost * and CR4.VMXE is restored to it's original value of being set.
2603dcafd1efSEduardo Habkost *
2604dcafd1efSEduardo Habkost * Therefore, when vCPU is not in SMM mode, we can infer whether
2605dcafd1efSEduardo Habkost * VMX is being used by examining CR4.VMXE. Otherwise, we cannot
26060788a56bSEduardo Habkost * know for certain.
26070788a56bSEduardo Habkost */
cpu_vmx_maybe_enabled(CPUX86State * env)26080788a56bSEduardo Habkost static inline bool cpu_vmx_maybe_enabled(CPUX86State *env)
26090788a56bSEduardo Habkost {
26100788a56bSEduardo Habkost return cpu_has_vmx(env) &&
26110788a56bSEduardo Habkost ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK));
2612dcafd1efSEduardo Habkost }
2613dcafd1efSEduardo Habkost
2614dcafd1efSEduardo Habkost /* excp_helper.c */
2615dcafd1efSEduardo Habkost int get_pg_mode(CPUX86State *env);
2616dcafd1efSEduardo Habkost
26170788a56bSEduardo Habkost /* fpu_helper.c */
26180788a56bSEduardo Habkost
26190788a56bSEduardo Habkost /* Set all non-runtime-variable float_status fields to x86 handling */
26200788a56bSEduardo Habkost void cpu_init_fp_statuses(CPUX86State *env);
26210788a56bSEduardo Habkost void update_fp_status(CPUX86State *env);
26220788a56bSEduardo Habkost void update_mxcsr_status(CPUX86State *env);
2623b5c6a3c1SPhilippe Mathieu-Daudé void update_mxcsr_from_sse_status(CPUX86State *env);
2624b5c6a3c1SPhilippe Mathieu-Daudé
cpu_set_mxcsr(CPUX86State * env,uint32_t mxcsr)26253b8484c5SPhilippe Mathieu-Daudé static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
26263b8484c5SPhilippe Mathieu-Daudé {
2627fcf5ef2aSThomas Huth env->mxcsr = mxcsr;
2628fcf5ef2aSThomas Huth if (tcg_enabled()) {
2629fcf5ef2aSThomas Huth update_mxcsr_status(env);
26300c36af8cSClaudio Fontana }
2631d3fd9e4bSMarkus Armbruster }
2632fcf5ef2aSThomas Huth
cpu_set_fpuc(CPUX86State * env,uint16_t fpuc)2633b5c6a3c1SPhilippe Mathieu-Daudé static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc)
2634b5c6a3c1SPhilippe Mathieu-Daudé {
2635fcf5ef2aSThomas Huth env->fpuc = fpuc;
2636fcf5ef2aSThomas Huth if (tcg_enabled()) {
2637fcf5ef2aSThomas Huth update_fp_status(env);
2638c0198c5fSDavid Edmondson }
2639c0198c5fSDavid Edmondson }
26405d245678SPaolo Bonzini
264135b1b927STao Wu /* svm_helper.c */
264235b1b927STao Wu #ifdef CONFIG_USER_ONLY
26432d384d7cSVitaly Kuznetsov static inline void
cpu_svm_check_intercept_param(CPUX86State * env1,uint32_t type,uint64_t param,uintptr_t retaddr)26442d384d7cSVitaly Kuznetsov cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
26452d384d7cSVitaly Kuznetsov uint64_t param, uintptr_t retaddr)
26462d384d7cSVitaly Kuznetsov { /* no-op */ }
26472d384d7cSVitaly Kuznetsov static inline bool
cpu_svm_has_intercept(CPUX86State * env,uint32_t type)2648213ff024SLara Lazier cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
2649213ff024SLara Lazier { return false; }
2650213ff024SLara Lazier #else
2651213ff024SLara Lazier void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
2652213ff024SLara Lazier uint64_t param, uintptr_t retaddr);
2653213ff024SLara Lazier bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type);
2654213ff024SLara Lazier #endif
2655213ff024SLara Lazier
2656213ff024SLara Lazier /* apic.c */
2657213ff024SLara Lazier void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
2658213ff024SLara Lazier void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
2659213ff024SLara Lazier TPRAccess access);
2660213ff024SLara Lazier
2661213ff024SLara Lazier /* Special values for X86CPUVersion: */
2662213ff024SLara Lazier
2663213ff024SLara Lazier /* Resolve to latest CPU version */
2664213ff024SLara Lazier #define CPU_VERSION_LATEST -1
2665213ff024SLara Lazier
2666213ff024SLara Lazier /*
2667213ff024SLara Lazier * Resolve to version defined by current machine type.
2668213ff024SLara Lazier * See x86_cpu_set_default_version()
2669213ff024SLara Lazier */
2670213ff024SLara Lazier #define CPU_VERSION_AUTO -2
2671213ff024SLara Lazier
2672213ff024SLara Lazier /* Don't resolve to any versioned CPU models, like old QEMU versions */
2673213ff024SLara Lazier #define CPU_VERSION_LEGACY 0
2674213ff024SLara Lazier
267501170671SBinbin Wu typedef int X86CPUVersion;
267601170671SBinbin Wu
267701170671SBinbin Wu /*
2678f88ddc40SXin Li * Set default CPU model version for CPU models having
2679f88ddc40SXin Li * version == CPU_VERSION_AUTO.
2680f88ddc40SXin Li */
2681213ff024SLara Lazier void x86_cpu_set_default_version(X86CPUVersion version);
2682213ff024SLara Lazier
2683213ff024SLara Lazier #ifndef CONFIG_USER_ONLY
26847760bb06SLara Lazier
26857760bb06SLara Lazier void do_cpu_sipi(X86CPU *cpu);
26867760bb06SLara Lazier
26877760bb06SLara Lazier #define APIC_DEFAULT_ADDRESS 0xfee00000
26887760bb06SLara Lazier #define APIC_SPACE_SIZE 0x100000
26897760bb06SLara Lazier
26907760bb06SLara Lazier /* cpu-dump.c */
26917760bb06SLara Lazier void x86_cpu_dump_local_apic_state(CPUState *cs, int flags);
26927760bb06SLara Lazier
26937760bb06SLara Lazier #endif
26947760bb06SLara Lazier
26957760bb06SLara Lazier /* cpu.c */
26967760bb06SLara Lazier bool cpu_is_bsp(X86CPU *cpu);
26977760bb06SLara Lazier
26987760bb06SLara Lazier void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen);
2699b26491b4SRichard Henderson void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen);
2700b26491b4SRichard Henderson uint32_t xsave_area_size(uint64_t mask, bool compacted);
2701b26491b4SRichard Henderson void x86_update_hflags(CPUX86State* env);
2702b26491b4SRichard Henderson
hyperv_feat_enabled(X86CPU * cpu,int feat)2703b26491b4SRichard Henderson static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat)
2704b26491b4SRichard Henderson {
2705fcf5ef2aSThomas Huth return !!(cpu->hyperv_features & BIT(feat));
2706 }
2707
cr4_reserved_bits(CPUX86State * env)2708 static inline uint64_t cr4_reserved_bits(CPUX86State *env)
2709 {
2710 uint64_t reserved_bits = CR4_RESERVED_MASK;
2711 if (!env->features[FEAT_XSAVE]) {
2712 reserved_bits |= CR4_OSXSAVE_MASK;
2713 }
2714 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMEP)) {
2715 reserved_bits |= CR4_SMEP_MASK;
2716 }
2717 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
2718 reserved_bits |= CR4_SMAP_MASK;
2719 }
2720 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE)) {
2721 reserved_bits |= CR4_FSGSBASE_MASK;
2722 }
2723 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
2724 reserved_bits |= CR4_PKE_MASK;
2725 }
2726 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57)) {
2727 reserved_bits |= CR4_LA57_MASK;
2728 }
2729 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) {
2730 reserved_bits |= CR4_UMIP_MASK;
2731 }
2732 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) {
2733 reserved_bits |= CR4_PKS_MASK;
2734 }
2735 if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_LAM)) {
2736 reserved_bits |= CR4_LAM_SUP_MASK;
2737 }
2738 if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED)) {
2739 reserved_bits |= CR4_FRED_MASK;
2740 }
2741 return reserved_bits;
2742 }
2743
ctl_has_irq(CPUX86State * env)2744 static inline bool ctl_has_irq(CPUX86State *env)
2745 {
2746 uint32_t int_prio;
2747 uint32_t tpr;
2748
2749 int_prio = (env->int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT;
2750 tpr = env->int_ctl & V_TPR_MASK;
2751
2752 if (env->int_ctl & V_IGN_TPR_MASK) {
2753 return (env->int_ctl & V_IRQ_MASK);
2754 }
2755
2756 return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
2757 }
2758
2759 #if defined(TARGET_X86_64) && \
2760 defined(CONFIG_USER_ONLY) && \
2761 defined(CONFIG_LINUX)
2762 # define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20)
2763 #endif
2764
2765 #endif /* I386_CPU_H */
2766