1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/cutils.h" 21 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "sysemu/kvm.h" 25 #include "sysemu/hvf.h" 26 #include "sysemu/cpus.h" 27 #include "kvm_i386.h" 28 29 #include "qemu/error-report.h" 30 #include "qemu/option.h" 31 #include "qemu/config-file.h" 32 #include "qapi/qmp/qerror.h" 33 #include "qapi/qmp/types.h" 34 35 #include "qapi-types.h" 36 #include "qapi-visit.h" 37 #include "qapi/visitor.h" 38 #include "qom/qom-qobject.h" 39 #include "sysemu/arch_init.h" 40 41 #if defined(CONFIG_KVM) 42 #include <linux/kvm_para.h> 43 #endif 44 45 #include "sysemu/sysemu.h" 46 #include "hw/qdev-properties.h" 47 #include "hw/i386/topology.h" 48 #ifndef CONFIG_USER_ONLY 49 #include "exec/address-spaces.h" 50 #include "hw/hw.h" 51 #include "hw/xen/xen.h" 52 #include "hw/i386/apic_internal.h" 53 #endif 54 55 #include "disas/capstone.h" 56 57 58 /* Cache topology CPUID constants: */ 59 60 /* CPUID Leaf 2 Descriptors */ 61 62 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c 63 #define CPUID_2_L1I_32KB_8WAY_64B 0x30 64 #define CPUID_2_L2_2MB_8WAY_64B 0x7d 65 #define CPUID_2_L3_16MB_16WAY_64B 0x4d 66 67 68 /* CPUID Leaf 4 constants: */ 69 70 /* EAX: */ 71 #define CPUID_4_TYPE_DCACHE 1 72 #define CPUID_4_TYPE_ICACHE 2 73 #define CPUID_4_TYPE_UNIFIED 3 74 75 #define CPUID_4_LEVEL(l) ((l) << 5) 76 77 #define CPUID_4_SELF_INIT_LEVEL (1 << 8) 78 #define CPUID_4_FULLY_ASSOC (1 << 9) 79 80 /* EDX: */ 81 #define CPUID_4_NO_INVD_SHARING (1 << 0) 82 #define CPUID_4_INCLUSIVE (1 << 1) 83 #define CPUID_4_COMPLEX_IDX (1 << 2) 84 85 #define ASSOC_FULL 0xFF 86 87 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 88 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 89 a == 2 ? 0x2 : \ 90 a == 4 ? 0x4 : \ 91 a == 8 ? 0x6 : \ 92 a == 16 ? 0x8 : \ 93 a == 32 ? 0xA : \ 94 a == 48 ? 0xB : \ 95 a == 64 ? 0xC : \ 96 a == 96 ? 0xD : \ 97 a == 128 ? 0xE : \ 98 a == ASSOC_FULL ? 0xF : \ 99 0 /* invalid value */) 100 101 102 /* Definitions of the hardcoded cache entries we expose: */ 103 104 /* L1 data cache: */ 105 #define L1D_LINE_SIZE 64 106 #define L1D_ASSOCIATIVITY 8 107 #define L1D_SETS 64 108 #define L1D_PARTITIONS 1 109 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */ 110 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B 111 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 112 #define L1D_LINES_PER_TAG 1 113 #define L1D_SIZE_KB_AMD 64 114 #define L1D_ASSOCIATIVITY_AMD 2 115 116 /* L1 instruction cache: */ 117 #define L1I_LINE_SIZE 64 118 #define L1I_ASSOCIATIVITY 8 119 #define L1I_SETS 64 120 #define L1I_PARTITIONS 1 121 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */ 122 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B 123 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 124 #define L1I_LINES_PER_TAG 1 125 #define L1I_SIZE_KB_AMD 64 126 #define L1I_ASSOCIATIVITY_AMD 2 127 128 /* Level 2 unified cache: */ 129 #define L2_LINE_SIZE 64 130 #define L2_ASSOCIATIVITY 16 131 #define L2_SETS 4096 132 #define L2_PARTITIONS 1 133 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */ 134 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 135 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B 136 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 137 #define L2_LINES_PER_TAG 1 138 #define L2_SIZE_KB_AMD 512 139 140 /* Level 3 unified cache: */ 141 #define L3_SIZE_KB 0 /* disabled */ 142 #define L3_ASSOCIATIVITY 0 /* disabled */ 143 #define L3_LINES_PER_TAG 0 /* disabled */ 144 #define L3_LINE_SIZE 0 /* disabled */ 145 #define L3_N_LINE_SIZE 64 146 #define L3_N_ASSOCIATIVITY 16 147 #define L3_N_SETS 16384 148 #define L3_N_PARTITIONS 1 149 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B 150 #define L3_N_LINES_PER_TAG 1 151 #define L3_N_SIZE_KB_AMD 16384 152 153 /* TLB definitions: */ 154 155 #define L1_DTLB_2M_ASSOC 1 156 #define L1_DTLB_2M_ENTRIES 255 157 #define L1_DTLB_4K_ASSOC 1 158 #define L1_DTLB_4K_ENTRIES 255 159 160 #define L1_ITLB_2M_ASSOC 1 161 #define L1_ITLB_2M_ENTRIES 255 162 #define L1_ITLB_4K_ASSOC 1 163 #define L1_ITLB_4K_ENTRIES 255 164 165 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 166 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 167 #define L2_DTLB_4K_ASSOC 4 168 #define L2_DTLB_4K_ENTRIES 512 169 170 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 171 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 172 #define L2_ITLB_4K_ASSOC 4 173 #define L2_ITLB_4K_ENTRIES 512 174 175 176 177 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 178 uint32_t vendor2, uint32_t vendor3) 179 { 180 int i; 181 for (i = 0; i < 4; i++) { 182 dst[i] = vendor1 >> (8 * i); 183 dst[i + 4] = vendor2 >> (8 * i); 184 dst[i + 8] = vendor3 >> (8 * i); 185 } 186 dst[CPUID_VENDOR_SZ] = '\0'; 187 } 188 189 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 190 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 191 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 192 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 193 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 194 CPUID_PSE36 | CPUID_FXSR) 195 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 196 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 197 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 198 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 199 CPUID_PAE | CPUID_SEP | CPUID_APIC) 200 201 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 202 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 203 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 204 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 205 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 206 /* partly implemented: 207 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 208 /* missing: 209 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 210 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 211 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 212 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 213 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 214 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR) 215 /* missing: 216 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 217 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 218 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 219 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 220 CPUID_EXT_F16C, CPUID_EXT_RDRAND */ 221 222 #ifdef TARGET_X86_64 223 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 224 #else 225 #define TCG_EXT2_X86_64_FEATURES 0 226 #endif 227 228 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 229 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 230 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 231 TCG_EXT2_X86_64_FEATURES) 232 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 233 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 234 #define TCG_EXT4_FEATURES 0 235 #define TCG_SVM_FEATURES 0 236 #define TCG_KVM_FEATURES 0 237 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 238 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 239 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 240 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 241 CPUID_7_0_EBX_ERMS) 242 /* missing: 243 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 244 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 245 CPUID_7_0_EBX_RDSEED */ 246 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \ 247 CPUID_7_0_ECX_LA57) 248 #define TCG_7_0_EDX_FEATURES 0 249 #define TCG_APM_FEATURES 0 250 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 251 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 252 /* missing: 253 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 254 255 typedef struct FeatureWordInfo { 256 /* feature flags names are taken from "Intel Processor Identification and 257 * the CPUID Instruction" and AMD's "CPUID Specification". 258 * In cases of disagreement between feature naming conventions, 259 * aliases may be added. 260 */ 261 const char *feat_names[32]; 262 uint32_t cpuid_eax; /* Input EAX for CPUID */ 263 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */ 264 uint32_t cpuid_ecx; /* Input ECX value for CPUID */ 265 int cpuid_reg; /* output register (R_* constant) */ 266 uint32_t tcg_features; /* Feature flags supported by TCG */ 267 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */ 268 uint32_t migratable_flags; /* Feature flags known to be migratable */ 269 } FeatureWordInfo; 270 271 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 272 [FEAT_1_EDX] = { 273 .feat_names = { 274 "fpu", "vme", "de", "pse", 275 "tsc", "msr", "pae", "mce", 276 "cx8", "apic", NULL, "sep", 277 "mtrr", "pge", "mca", "cmov", 278 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 279 NULL, "ds" /* Intel dts */, "acpi", "mmx", 280 "fxsr", "sse", "sse2", "ss", 281 "ht" /* Intel htt */, "tm", "ia64", "pbe", 282 }, 283 .cpuid_eax = 1, .cpuid_reg = R_EDX, 284 .tcg_features = TCG_FEATURES, 285 }, 286 [FEAT_1_ECX] = { 287 .feat_names = { 288 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 289 "ds-cpl", "vmx", "smx", "est", 290 "tm2", "ssse3", "cid", NULL, 291 "fma", "cx16", "xtpr", "pdcm", 292 NULL, "pcid", "dca", "sse4.1", 293 "sse4.2", "x2apic", "movbe", "popcnt", 294 "tsc-deadline", "aes", "xsave", "osxsave", 295 "avx", "f16c", "rdrand", "hypervisor", 296 }, 297 .cpuid_eax = 1, .cpuid_reg = R_ECX, 298 .tcg_features = TCG_EXT_FEATURES, 299 }, 300 /* Feature names that are already defined on feature_name[] but 301 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 302 * names on feat_names below. They are copied automatically 303 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 304 */ 305 [FEAT_8000_0001_EDX] = { 306 .feat_names = { 307 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 308 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 309 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 310 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 311 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 312 "nx", NULL, "mmxext", NULL /* mmx */, 313 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 314 NULL, "lm", "3dnowext", "3dnow", 315 }, 316 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX, 317 .tcg_features = TCG_EXT2_FEATURES, 318 }, 319 [FEAT_8000_0001_ECX] = { 320 .feat_names = { 321 "lahf-lm", "cmp-legacy", "svm", "extapic", 322 "cr8legacy", "abm", "sse4a", "misalignsse", 323 "3dnowprefetch", "osvw", "ibs", "xop", 324 "skinit", "wdt", NULL, "lwp", 325 "fma4", "tce", NULL, "nodeid-msr", 326 NULL, "tbm", "topoext", "perfctr-core", 327 "perfctr-nb", NULL, NULL, NULL, 328 NULL, NULL, NULL, NULL, 329 }, 330 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX, 331 .tcg_features = TCG_EXT3_FEATURES, 332 }, 333 [FEAT_C000_0001_EDX] = { 334 .feat_names = { 335 NULL, NULL, "xstore", "xstore-en", 336 NULL, NULL, "xcrypt", "xcrypt-en", 337 "ace2", "ace2-en", "phe", "phe-en", 338 "pmm", "pmm-en", NULL, NULL, 339 NULL, NULL, NULL, NULL, 340 NULL, NULL, NULL, NULL, 341 NULL, NULL, NULL, NULL, 342 NULL, NULL, NULL, NULL, 343 }, 344 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX, 345 .tcg_features = TCG_EXT4_FEATURES, 346 }, 347 [FEAT_KVM] = { 348 .feat_names = { 349 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 350 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 351 NULL, "kvm-pv-tlb-flush", NULL, NULL, 352 NULL, NULL, NULL, NULL, 353 NULL, NULL, NULL, NULL, 354 NULL, NULL, NULL, NULL, 355 "kvmclock-stable-bit", NULL, NULL, NULL, 356 NULL, NULL, NULL, NULL, 357 }, 358 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX, 359 .tcg_features = TCG_KVM_FEATURES, 360 }, 361 [FEAT_HYPERV_EAX] = { 362 .feat_names = { 363 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 364 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 365 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 366 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 367 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 368 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 369 NULL, NULL, NULL, NULL, 370 NULL, NULL, NULL, NULL, 371 NULL, NULL, NULL, NULL, 372 NULL, NULL, NULL, NULL, 373 NULL, NULL, NULL, NULL, 374 }, 375 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX, 376 }, 377 [FEAT_HYPERV_EBX] = { 378 .feat_names = { 379 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 380 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 381 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 382 NULL /* hv_create_port */, NULL /* hv_connect_port */, 383 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 384 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 385 NULL, NULL, 386 NULL, NULL, NULL, NULL, 387 NULL, NULL, NULL, NULL, 388 NULL, NULL, NULL, NULL, 389 NULL, NULL, NULL, NULL, 390 }, 391 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX, 392 }, 393 [FEAT_HYPERV_EDX] = { 394 .feat_names = { 395 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 396 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 397 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 398 NULL, NULL, 399 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 400 NULL, NULL, NULL, NULL, 401 NULL, NULL, NULL, NULL, 402 NULL, NULL, NULL, NULL, 403 NULL, NULL, NULL, NULL, 404 NULL, NULL, NULL, NULL, 405 }, 406 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX, 407 }, 408 [FEAT_SVM] = { 409 .feat_names = { 410 "npt", "lbrv", "svm-lock", "nrip-save", 411 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 412 NULL, NULL, "pause-filter", NULL, 413 "pfthreshold", NULL, NULL, NULL, 414 NULL, NULL, NULL, NULL, 415 NULL, NULL, NULL, NULL, 416 NULL, NULL, NULL, NULL, 417 NULL, NULL, NULL, NULL, 418 }, 419 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX, 420 .tcg_features = TCG_SVM_FEATURES, 421 }, 422 [FEAT_7_0_EBX] = { 423 .feat_names = { 424 "fsgsbase", "tsc-adjust", NULL, "bmi1", 425 "hle", "avx2", NULL, "smep", 426 "bmi2", "erms", "invpcid", "rtm", 427 NULL, NULL, "mpx", NULL, 428 "avx512f", "avx512dq", "rdseed", "adx", 429 "smap", "avx512ifma", "pcommit", "clflushopt", 430 "clwb", NULL, "avx512pf", "avx512er", 431 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 432 }, 433 .cpuid_eax = 7, 434 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 435 .cpuid_reg = R_EBX, 436 .tcg_features = TCG_7_0_EBX_FEATURES, 437 }, 438 [FEAT_7_0_ECX] = { 439 .feat_names = { 440 NULL, "avx512vbmi", "umip", "pku", 441 "ospke", NULL, "avx512vbmi2", NULL, 442 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 443 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 444 "la57", NULL, NULL, NULL, 445 NULL, NULL, "rdpid", NULL, 446 NULL, NULL, NULL, NULL, 447 NULL, NULL, NULL, NULL, 448 }, 449 .cpuid_eax = 7, 450 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 451 .cpuid_reg = R_ECX, 452 .tcg_features = TCG_7_0_ECX_FEATURES, 453 }, 454 [FEAT_7_0_EDX] = { 455 .feat_names = { 456 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 457 NULL, NULL, NULL, NULL, 458 NULL, NULL, NULL, NULL, 459 NULL, NULL, NULL, NULL, 460 NULL, NULL, NULL, NULL, 461 NULL, NULL, NULL, NULL, 462 NULL, NULL, "spec-ctrl", NULL, 463 NULL, NULL, NULL, NULL, 464 }, 465 .cpuid_eax = 7, 466 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 467 .cpuid_reg = R_EDX, 468 .tcg_features = TCG_7_0_EDX_FEATURES, 469 }, 470 [FEAT_8000_0007_EDX] = { 471 .feat_names = { 472 NULL, NULL, NULL, NULL, 473 NULL, NULL, NULL, NULL, 474 "invtsc", NULL, NULL, NULL, 475 NULL, NULL, NULL, NULL, 476 NULL, NULL, NULL, NULL, 477 NULL, NULL, NULL, NULL, 478 NULL, NULL, NULL, NULL, 479 NULL, NULL, NULL, NULL, 480 }, 481 .cpuid_eax = 0x80000007, 482 .cpuid_reg = R_EDX, 483 .tcg_features = TCG_APM_FEATURES, 484 .unmigratable_flags = CPUID_APM_INVTSC, 485 }, 486 [FEAT_8000_0008_EBX] = { 487 .feat_names = { 488 NULL, NULL, NULL, NULL, 489 NULL, NULL, NULL, NULL, 490 NULL, NULL, NULL, NULL, 491 "ibpb", NULL, NULL, NULL, 492 NULL, NULL, NULL, NULL, 493 NULL, NULL, NULL, NULL, 494 NULL, NULL, NULL, NULL, 495 NULL, NULL, NULL, NULL, 496 }, 497 .cpuid_eax = 0x80000008, 498 .cpuid_reg = R_EBX, 499 .tcg_features = 0, 500 .unmigratable_flags = 0, 501 }, 502 [FEAT_XSAVE] = { 503 .feat_names = { 504 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 505 NULL, NULL, NULL, NULL, 506 NULL, NULL, NULL, NULL, 507 NULL, NULL, NULL, NULL, 508 NULL, NULL, NULL, NULL, 509 NULL, NULL, NULL, NULL, 510 NULL, NULL, NULL, NULL, 511 NULL, NULL, NULL, NULL, 512 }, 513 .cpuid_eax = 0xd, 514 .cpuid_needs_ecx = true, .cpuid_ecx = 1, 515 .cpuid_reg = R_EAX, 516 .tcg_features = TCG_XSAVE_FEATURES, 517 }, 518 [FEAT_6_EAX] = { 519 .feat_names = { 520 NULL, NULL, "arat", NULL, 521 NULL, NULL, NULL, NULL, 522 NULL, NULL, NULL, NULL, 523 NULL, NULL, NULL, NULL, 524 NULL, NULL, NULL, NULL, 525 NULL, NULL, NULL, NULL, 526 NULL, NULL, NULL, NULL, 527 NULL, NULL, NULL, NULL, 528 }, 529 .cpuid_eax = 6, .cpuid_reg = R_EAX, 530 .tcg_features = TCG_6_EAX_FEATURES, 531 }, 532 [FEAT_XSAVE_COMP_LO] = { 533 .cpuid_eax = 0xD, 534 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 535 .cpuid_reg = R_EAX, 536 .tcg_features = ~0U, 537 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 538 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 539 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 540 XSTATE_PKRU_MASK, 541 }, 542 [FEAT_XSAVE_COMP_HI] = { 543 .cpuid_eax = 0xD, 544 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 545 .cpuid_reg = R_EDX, 546 .tcg_features = ~0U, 547 }, 548 }; 549 550 typedef struct X86RegisterInfo32 { 551 /* Name of register */ 552 const char *name; 553 /* QAPI enum value register */ 554 X86CPURegister32 qapi_enum; 555 } X86RegisterInfo32; 556 557 #define REGISTER(reg) \ 558 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 559 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 560 REGISTER(EAX), 561 REGISTER(ECX), 562 REGISTER(EDX), 563 REGISTER(EBX), 564 REGISTER(ESP), 565 REGISTER(EBP), 566 REGISTER(ESI), 567 REGISTER(EDI), 568 }; 569 #undef REGISTER 570 571 typedef struct ExtSaveArea { 572 uint32_t feature, bits; 573 uint32_t offset, size; 574 } ExtSaveArea; 575 576 static const ExtSaveArea x86_ext_save_areas[] = { 577 [XSTATE_FP_BIT] = { 578 /* x87 FP state component is always enabled if XSAVE is supported */ 579 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 580 /* x87 state is in the legacy region of the XSAVE area */ 581 .offset = 0, 582 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 583 }, 584 [XSTATE_SSE_BIT] = { 585 /* SSE state component is always enabled if XSAVE is supported */ 586 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 587 /* SSE state is in the legacy region of the XSAVE area */ 588 .offset = 0, 589 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 590 }, 591 [XSTATE_YMM_BIT] = 592 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 593 .offset = offsetof(X86XSaveArea, avx_state), 594 .size = sizeof(XSaveAVX) }, 595 [XSTATE_BNDREGS_BIT] = 596 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 597 .offset = offsetof(X86XSaveArea, bndreg_state), 598 .size = sizeof(XSaveBNDREG) }, 599 [XSTATE_BNDCSR_BIT] = 600 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 601 .offset = offsetof(X86XSaveArea, bndcsr_state), 602 .size = sizeof(XSaveBNDCSR) }, 603 [XSTATE_OPMASK_BIT] = 604 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 605 .offset = offsetof(X86XSaveArea, opmask_state), 606 .size = sizeof(XSaveOpmask) }, 607 [XSTATE_ZMM_Hi256_BIT] = 608 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 609 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 610 .size = sizeof(XSaveZMM_Hi256) }, 611 [XSTATE_Hi16_ZMM_BIT] = 612 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 613 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 614 .size = sizeof(XSaveHi16_ZMM) }, 615 [XSTATE_PKRU_BIT] = 616 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 617 .offset = offsetof(X86XSaveArea, pkru_state), 618 .size = sizeof(XSavePKRU) }, 619 }; 620 621 static uint32_t xsave_area_size(uint64_t mask) 622 { 623 int i; 624 uint64_t ret = 0; 625 626 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 627 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 628 if ((mask >> i) & 1) { 629 ret = MAX(ret, esa->offset + esa->size); 630 } 631 } 632 return ret; 633 } 634 635 static inline bool accel_uses_host_cpuid(void) 636 { 637 return kvm_enabled() || hvf_enabled(); 638 } 639 640 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 641 { 642 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 643 cpu->env.features[FEAT_XSAVE_COMP_LO]; 644 } 645 646 const char *get_register_name_32(unsigned int reg) 647 { 648 if (reg >= CPU_NB_REGS32) { 649 return NULL; 650 } 651 return x86_reg_info_32[reg].name; 652 } 653 654 /* 655 * Returns the set of feature flags that are supported and migratable by 656 * QEMU, for a given FeatureWord. 657 */ 658 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w) 659 { 660 FeatureWordInfo *wi = &feature_word_info[w]; 661 uint32_t r = 0; 662 int i; 663 664 for (i = 0; i < 32; i++) { 665 uint32_t f = 1U << i; 666 667 /* If the feature name is known, it is implicitly considered migratable, 668 * unless it is explicitly set in unmigratable_flags */ 669 if ((wi->migratable_flags & f) || 670 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 671 r |= f; 672 } 673 } 674 return r; 675 } 676 677 void host_cpuid(uint32_t function, uint32_t count, 678 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 679 { 680 uint32_t vec[4]; 681 682 #ifdef __x86_64__ 683 asm volatile("cpuid" 684 : "=a"(vec[0]), "=b"(vec[1]), 685 "=c"(vec[2]), "=d"(vec[3]) 686 : "0"(function), "c"(count) : "cc"); 687 #elif defined(__i386__) 688 asm volatile("pusha \n\t" 689 "cpuid \n\t" 690 "mov %%eax, 0(%2) \n\t" 691 "mov %%ebx, 4(%2) \n\t" 692 "mov %%ecx, 8(%2) \n\t" 693 "mov %%edx, 12(%2) \n\t" 694 "popa" 695 : : "a"(function), "c"(count), "S"(vec) 696 : "memory", "cc"); 697 #else 698 abort(); 699 #endif 700 701 if (eax) 702 *eax = vec[0]; 703 if (ebx) 704 *ebx = vec[1]; 705 if (ecx) 706 *ecx = vec[2]; 707 if (edx) 708 *edx = vec[3]; 709 } 710 711 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 712 { 713 uint32_t eax, ebx, ecx, edx; 714 715 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 716 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 717 718 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 719 if (family) { 720 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 721 } 722 if (model) { 723 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 724 } 725 if (stepping) { 726 *stepping = eax & 0x0F; 727 } 728 } 729 730 /* CPU class name definitions: */ 731 732 /* Return type name for a given CPU model name 733 * Caller is responsible for freeing the returned string. 734 */ 735 static char *x86_cpu_type_name(const char *model_name) 736 { 737 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 738 } 739 740 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 741 { 742 ObjectClass *oc; 743 char *typename; 744 745 if (cpu_model == NULL) { 746 return NULL; 747 } 748 749 typename = x86_cpu_type_name(cpu_model); 750 oc = object_class_by_name(typename); 751 g_free(typename); 752 return oc; 753 } 754 755 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 756 { 757 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 758 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 759 return g_strndup(class_name, 760 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 761 } 762 763 struct X86CPUDefinition { 764 const char *name; 765 uint32_t level; 766 uint32_t xlevel; 767 /* vendor is zero-terminated, 12 character ASCII string */ 768 char vendor[CPUID_VENDOR_SZ + 1]; 769 int family; 770 int model; 771 int stepping; 772 FeatureWordArray features; 773 const char *model_id; 774 }; 775 776 static X86CPUDefinition builtin_x86_defs[] = { 777 { 778 .name = "qemu64", 779 .level = 0xd, 780 .vendor = CPUID_VENDOR_AMD, 781 .family = 6, 782 .model = 6, 783 .stepping = 3, 784 .features[FEAT_1_EDX] = 785 PPRO_FEATURES | 786 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 787 CPUID_PSE36, 788 .features[FEAT_1_ECX] = 789 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 790 .features[FEAT_8000_0001_EDX] = 791 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 792 .features[FEAT_8000_0001_ECX] = 793 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 794 .xlevel = 0x8000000A, 795 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 796 }, 797 { 798 .name = "phenom", 799 .level = 5, 800 .vendor = CPUID_VENDOR_AMD, 801 .family = 16, 802 .model = 2, 803 .stepping = 3, 804 /* Missing: CPUID_HT */ 805 .features[FEAT_1_EDX] = 806 PPRO_FEATURES | 807 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 808 CPUID_PSE36 | CPUID_VME, 809 .features[FEAT_1_ECX] = 810 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 811 CPUID_EXT_POPCNT, 812 .features[FEAT_8000_0001_EDX] = 813 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 814 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 815 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 816 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 817 CPUID_EXT3_CR8LEG, 818 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 819 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 820 .features[FEAT_8000_0001_ECX] = 821 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 822 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 823 /* Missing: CPUID_SVM_LBRV */ 824 .features[FEAT_SVM] = 825 CPUID_SVM_NPT, 826 .xlevel = 0x8000001A, 827 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 828 }, 829 { 830 .name = "core2duo", 831 .level = 10, 832 .vendor = CPUID_VENDOR_INTEL, 833 .family = 6, 834 .model = 15, 835 .stepping = 11, 836 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 837 .features[FEAT_1_EDX] = 838 PPRO_FEATURES | 839 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 840 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 841 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 842 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 843 .features[FEAT_1_ECX] = 844 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 845 CPUID_EXT_CX16, 846 .features[FEAT_8000_0001_EDX] = 847 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 848 .features[FEAT_8000_0001_ECX] = 849 CPUID_EXT3_LAHF_LM, 850 .xlevel = 0x80000008, 851 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 852 }, 853 { 854 .name = "kvm64", 855 .level = 0xd, 856 .vendor = CPUID_VENDOR_INTEL, 857 .family = 15, 858 .model = 6, 859 .stepping = 1, 860 /* Missing: CPUID_HT */ 861 .features[FEAT_1_EDX] = 862 PPRO_FEATURES | CPUID_VME | 863 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 864 CPUID_PSE36, 865 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 866 .features[FEAT_1_ECX] = 867 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 868 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 869 .features[FEAT_8000_0001_EDX] = 870 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 871 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 872 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 873 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 874 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 875 .features[FEAT_8000_0001_ECX] = 876 0, 877 .xlevel = 0x80000008, 878 .model_id = "Common KVM processor" 879 }, 880 { 881 .name = "qemu32", 882 .level = 4, 883 .vendor = CPUID_VENDOR_INTEL, 884 .family = 6, 885 .model = 6, 886 .stepping = 3, 887 .features[FEAT_1_EDX] = 888 PPRO_FEATURES, 889 .features[FEAT_1_ECX] = 890 CPUID_EXT_SSE3, 891 .xlevel = 0x80000004, 892 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 893 }, 894 { 895 .name = "kvm32", 896 .level = 5, 897 .vendor = CPUID_VENDOR_INTEL, 898 .family = 15, 899 .model = 6, 900 .stepping = 1, 901 .features[FEAT_1_EDX] = 902 PPRO_FEATURES | CPUID_VME | 903 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 904 .features[FEAT_1_ECX] = 905 CPUID_EXT_SSE3, 906 .features[FEAT_8000_0001_ECX] = 907 0, 908 .xlevel = 0x80000008, 909 .model_id = "Common 32-bit KVM processor" 910 }, 911 { 912 .name = "coreduo", 913 .level = 10, 914 .vendor = CPUID_VENDOR_INTEL, 915 .family = 6, 916 .model = 14, 917 .stepping = 8, 918 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 919 .features[FEAT_1_EDX] = 920 PPRO_FEATURES | CPUID_VME | 921 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 922 CPUID_SS, 923 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 924 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 925 .features[FEAT_1_ECX] = 926 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 927 .features[FEAT_8000_0001_EDX] = 928 CPUID_EXT2_NX, 929 .xlevel = 0x80000008, 930 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 931 }, 932 { 933 .name = "486", 934 .level = 1, 935 .vendor = CPUID_VENDOR_INTEL, 936 .family = 4, 937 .model = 8, 938 .stepping = 0, 939 .features[FEAT_1_EDX] = 940 I486_FEATURES, 941 .xlevel = 0, 942 .model_id = "", 943 }, 944 { 945 .name = "pentium", 946 .level = 1, 947 .vendor = CPUID_VENDOR_INTEL, 948 .family = 5, 949 .model = 4, 950 .stepping = 3, 951 .features[FEAT_1_EDX] = 952 PENTIUM_FEATURES, 953 .xlevel = 0, 954 .model_id = "", 955 }, 956 { 957 .name = "pentium2", 958 .level = 2, 959 .vendor = CPUID_VENDOR_INTEL, 960 .family = 6, 961 .model = 5, 962 .stepping = 2, 963 .features[FEAT_1_EDX] = 964 PENTIUM2_FEATURES, 965 .xlevel = 0, 966 .model_id = "", 967 }, 968 { 969 .name = "pentium3", 970 .level = 3, 971 .vendor = CPUID_VENDOR_INTEL, 972 .family = 6, 973 .model = 7, 974 .stepping = 3, 975 .features[FEAT_1_EDX] = 976 PENTIUM3_FEATURES, 977 .xlevel = 0, 978 .model_id = "", 979 }, 980 { 981 .name = "athlon", 982 .level = 2, 983 .vendor = CPUID_VENDOR_AMD, 984 .family = 6, 985 .model = 2, 986 .stepping = 3, 987 .features[FEAT_1_EDX] = 988 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 989 CPUID_MCA, 990 .features[FEAT_8000_0001_EDX] = 991 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 992 .xlevel = 0x80000008, 993 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 994 }, 995 { 996 .name = "n270", 997 .level = 10, 998 .vendor = CPUID_VENDOR_INTEL, 999 .family = 6, 1000 .model = 28, 1001 .stepping = 2, 1002 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1003 .features[FEAT_1_EDX] = 1004 PPRO_FEATURES | 1005 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 1006 CPUID_ACPI | CPUID_SS, 1007 /* Some CPUs got no CPUID_SEP */ 1008 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 1009 * CPUID_EXT_XTPR */ 1010 .features[FEAT_1_ECX] = 1011 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1012 CPUID_EXT_MOVBE, 1013 .features[FEAT_8000_0001_EDX] = 1014 CPUID_EXT2_NX, 1015 .features[FEAT_8000_0001_ECX] = 1016 CPUID_EXT3_LAHF_LM, 1017 .xlevel = 0x80000008, 1018 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 1019 }, 1020 { 1021 .name = "Conroe", 1022 .level = 10, 1023 .vendor = CPUID_VENDOR_INTEL, 1024 .family = 6, 1025 .model = 15, 1026 .stepping = 3, 1027 .features[FEAT_1_EDX] = 1028 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1029 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1030 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1031 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1032 CPUID_DE | CPUID_FP87, 1033 .features[FEAT_1_ECX] = 1034 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1035 .features[FEAT_8000_0001_EDX] = 1036 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1037 .features[FEAT_8000_0001_ECX] = 1038 CPUID_EXT3_LAHF_LM, 1039 .xlevel = 0x80000008, 1040 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 1041 }, 1042 { 1043 .name = "Penryn", 1044 .level = 10, 1045 .vendor = CPUID_VENDOR_INTEL, 1046 .family = 6, 1047 .model = 23, 1048 .stepping = 3, 1049 .features[FEAT_1_EDX] = 1050 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1051 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1052 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1053 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1054 CPUID_DE | CPUID_FP87, 1055 .features[FEAT_1_ECX] = 1056 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1057 CPUID_EXT_SSE3, 1058 .features[FEAT_8000_0001_EDX] = 1059 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1060 .features[FEAT_8000_0001_ECX] = 1061 CPUID_EXT3_LAHF_LM, 1062 .xlevel = 0x80000008, 1063 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 1064 }, 1065 { 1066 .name = "Nehalem", 1067 .level = 11, 1068 .vendor = CPUID_VENDOR_INTEL, 1069 .family = 6, 1070 .model = 26, 1071 .stepping = 3, 1072 .features[FEAT_1_EDX] = 1073 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1074 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1075 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1076 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1077 CPUID_DE | CPUID_FP87, 1078 .features[FEAT_1_ECX] = 1079 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1080 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1081 .features[FEAT_8000_0001_EDX] = 1082 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1083 .features[FEAT_8000_0001_ECX] = 1084 CPUID_EXT3_LAHF_LM, 1085 .xlevel = 0x80000008, 1086 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 1087 }, 1088 { 1089 .name = "Nehalem-IBRS", 1090 .level = 11, 1091 .vendor = CPUID_VENDOR_INTEL, 1092 .family = 6, 1093 .model = 26, 1094 .stepping = 3, 1095 .features[FEAT_1_EDX] = 1096 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1097 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1098 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1099 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1100 CPUID_DE | CPUID_FP87, 1101 .features[FEAT_1_ECX] = 1102 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1103 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1104 .features[FEAT_7_0_EDX] = 1105 CPUID_7_0_EDX_SPEC_CTRL, 1106 .features[FEAT_8000_0001_EDX] = 1107 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1108 .features[FEAT_8000_0001_ECX] = 1109 CPUID_EXT3_LAHF_LM, 1110 .xlevel = 0x80000008, 1111 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)", 1112 }, 1113 { 1114 .name = "Westmere", 1115 .level = 11, 1116 .vendor = CPUID_VENDOR_INTEL, 1117 .family = 6, 1118 .model = 44, 1119 .stepping = 1, 1120 .features[FEAT_1_EDX] = 1121 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1122 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1123 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1124 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1125 CPUID_DE | CPUID_FP87, 1126 .features[FEAT_1_ECX] = 1127 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1128 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1129 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1130 .features[FEAT_8000_0001_EDX] = 1131 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1132 .features[FEAT_8000_0001_ECX] = 1133 CPUID_EXT3_LAHF_LM, 1134 .features[FEAT_6_EAX] = 1135 CPUID_6_EAX_ARAT, 1136 .xlevel = 0x80000008, 1137 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 1138 }, 1139 { 1140 .name = "Westmere-IBRS", 1141 .level = 11, 1142 .vendor = CPUID_VENDOR_INTEL, 1143 .family = 6, 1144 .model = 44, 1145 .stepping = 1, 1146 .features[FEAT_1_EDX] = 1147 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1148 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1149 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1150 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1151 CPUID_DE | CPUID_FP87, 1152 .features[FEAT_1_ECX] = 1153 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1154 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1155 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1156 .features[FEAT_8000_0001_EDX] = 1157 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1158 .features[FEAT_8000_0001_ECX] = 1159 CPUID_EXT3_LAHF_LM, 1160 .features[FEAT_7_0_EDX] = 1161 CPUID_7_0_EDX_SPEC_CTRL, 1162 .features[FEAT_6_EAX] = 1163 CPUID_6_EAX_ARAT, 1164 .xlevel = 0x80000008, 1165 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)", 1166 }, 1167 { 1168 .name = "SandyBridge", 1169 .level = 0xd, 1170 .vendor = CPUID_VENDOR_INTEL, 1171 .family = 6, 1172 .model = 42, 1173 .stepping = 1, 1174 .features[FEAT_1_EDX] = 1175 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1176 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1177 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1178 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1179 CPUID_DE | CPUID_FP87, 1180 .features[FEAT_1_ECX] = 1181 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1182 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1183 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1184 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1185 CPUID_EXT_SSE3, 1186 .features[FEAT_8000_0001_EDX] = 1187 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1188 CPUID_EXT2_SYSCALL, 1189 .features[FEAT_8000_0001_ECX] = 1190 CPUID_EXT3_LAHF_LM, 1191 .features[FEAT_XSAVE] = 1192 CPUID_XSAVE_XSAVEOPT, 1193 .features[FEAT_6_EAX] = 1194 CPUID_6_EAX_ARAT, 1195 .xlevel = 0x80000008, 1196 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 1197 }, 1198 { 1199 .name = "SandyBridge-IBRS", 1200 .level = 0xd, 1201 .vendor = CPUID_VENDOR_INTEL, 1202 .family = 6, 1203 .model = 42, 1204 .stepping = 1, 1205 .features[FEAT_1_EDX] = 1206 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1207 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1208 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1209 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1210 CPUID_DE | CPUID_FP87, 1211 .features[FEAT_1_ECX] = 1212 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1213 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1214 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1215 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1216 CPUID_EXT_SSE3, 1217 .features[FEAT_8000_0001_EDX] = 1218 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1219 CPUID_EXT2_SYSCALL, 1220 .features[FEAT_8000_0001_ECX] = 1221 CPUID_EXT3_LAHF_LM, 1222 .features[FEAT_7_0_EDX] = 1223 CPUID_7_0_EDX_SPEC_CTRL, 1224 .features[FEAT_XSAVE] = 1225 CPUID_XSAVE_XSAVEOPT, 1226 .features[FEAT_6_EAX] = 1227 CPUID_6_EAX_ARAT, 1228 .xlevel = 0x80000008, 1229 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)", 1230 }, 1231 { 1232 .name = "IvyBridge", 1233 .level = 0xd, 1234 .vendor = CPUID_VENDOR_INTEL, 1235 .family = 6, 1236 .model = 58, 1237 .stepping = 9, 1238 .features[FEAT_1_EDX] = 1239 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1240 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1241 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1242 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1243 CPUID_DE | CPUID_FP87, 1244 .features[FEAT_1_ECX] = 1245 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1246 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1247 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1248 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1249 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1250 .features[FEAT_7_0_EBX] = 1251 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1252 CPUID_7_0_EBX_ERMS, 1253 .features[FEAT_8000_0001_EDX] = 1254 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1255 CPUID_EXT2_SYSCALL, 1256 .features[FEAT_8000_0001_ECX] = 1257 CPUID_EXT3_LAHF_LM, 1258 .features[FEAT_XSAVE] = 1259 CPUID_XSAVE_XSAVEOPT, 1260 .features[FEAT_6_EAX] = 1261 CPUID_6_EAX_ARAT, 1262 .xlevel = 0x80000008, 1263 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 1264 }, 1265 { 1266 .name = "IvyBridge-IBRS", 1267 .level = 0xd, 1268 .vendor = CPUID_VENDOR_INTEL, 1269 .family = 6, 1270 .model = 58, 1271 .stepping = 9, 1272 .features[FEAT_1_EDX] = 1273 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1274 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1275 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1276 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1277 CPUID_DE | CPUID_FP87, 1278 .features[FEAT_1_ECX] = 1279 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1280 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1281 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1282 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1283 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1284 .features[FEAT_7_0_EBX] = 1285 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1286 CPUID_7_0_EBX_ERMS, 1287 .features[FEAT_8000_0001_EDX] = 1288 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1289 CPUID_EXT2_SYSCALL, 1290 .features[FEAT_8000_0001_ECX] = 1291 CPUID_EXT3_LAHF_LM, 1292 .features[FEAT_7_0_EDX] = 1293 CPUID_7_0_EDX_SPEC_CTRL, 1294 .features[FEAT_XSAVE] = 1295 CPUID_XSAVE_XSAVEOPT, 1296 .features[FEAT_6_EAX] = 1297 CPUID_6_EAX_ARAT, 1298 .xlevel = 0x80000008, 1299 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)", 1300 }, 1301 { 1302 .name = "Haswell-noTSX", 1303 .level = 0xd, 1304 .vendor = CPUID_VENDOR_INTEL, 1305 .family = 6, 1306 .model = 60, 1307 .stepping = 1, 1308 .features[FEAT_1_EDX] = 1309 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1310 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1311 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1312 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1313 CPUID_DE | CPUID_FP87, 1314 .features[FEAT_1_ECX] = 1315 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1316 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1317 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1318 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1319 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1320 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1321 .features[FEAT_8000_0001_EDX] = 1322 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1323 CPUID_EXT2_SYSCALL, 1324 .features[FEAT_8000_0001_ECX] = 1325 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1326 .features[FEAT_7_0_EBX] = 1327 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1328 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1329 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID, 1330 .features[FEAT_XSAVE] = 1331 CPUID_XSAVE_XSAVEOPT, 1332 .features[FEAT_6_EAX] = 1333 CPUID_6_EAX_ARAT, 1334 .xlevel = 0x80000008, 1335 .model_id = "Intel Core Processor (Haswell, no TSX)", 1336 }, 1337 { 1338 .name = "Haswell-noTSX-IBRS", 1339 .level = 0xd, 1340 .vendor = CPUID_VENDOR_INTEL, 1341 .family = 6, 1342 .model = 60, 1343 .stepping = 1, 1344 .features[FEAT_1_EDX] = 1345 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1346 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1347 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1348 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1349 CPUID_DE | CPUID_FP87, 1350 .features[FEAT_1_ECX] = 1351 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1352 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1353 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1354 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1355 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1356 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1357 .features[FEAT_8000_0001_EDX] = 1358 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1359 CPUID_EXT2_SYSCALL, 1360 .features[FEAT_8000_0001_ECX] = 1361 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1362 .features[FEAT_7_0_EDX] = 1363 CPUID_7_0_EDX_SPEC_CTRL, 1364 .features[FEAT_7_0_EBX] = 1365 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1366 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1367 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID, 1368 .features[FEAT_XSAVE] = 1369 CPUID_XSAVE_XSAVEOPT, 1370 .features[FEAT_6_EAX] = 1371 CPUID_6_EAX_ARAT, 1372 .xlevel = 0x80000008, 1373 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)", 1374 }, 1375 { 1376 .name = "Haswell", 1377 .level = 0xd, 1378 .vendor = CPUID_VENDOR_INTEL, 1379 .family = 6, 1380 .model = 60, 1381 .stepping = 4, 1382 .features[FEAT_1_EDX] = 1383 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1384 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1385 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1386 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1387 CPUID_DE | CPUID_FP87, 1388 .features[FEAT_1_ECX] = 1389 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1390 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1391 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1392 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1393 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1394 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1395 .features[FEAT_8000_0001_EDX] = 1396 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1397 CPUID_EXT2_SYSCALL, 1398 .features[FEAT_8000_0001_ECX] = 1399 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1400 .features[FEAT_7_0_EBX] = 1401 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1402 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1403 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1404 CPUID_7_0_EBX_RTM, 1405 .features[FEAT_XSAVE] = 1406 CPUID_XSAVE_XSAVEOPT, 1407 .features[FEAT_6_EAX] = 1408 CPUID_6_EAX_ARAT, 1409 .xlevel = 0x80000008, 1410 .model_id = "Intel Core Processor (Haswell)", 1411 }, 1412 { 1413 .name = "Haswell-IBRS", 1414 .level = 0xd, 1415 .vendor = CPUID_VENDOR_INTEL, 1416 .family = 6, 1417 .model = 60, 1418 .stepping = 4, 1419 .features[FEAT_1_EDX] = 1420 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1421 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1422 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1423 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1424 CPUID_DE | CPUID_FP87, 1425 .features[FEAT_1_ECX] = 1426 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1427 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1428 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1429 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1430 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1431 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1432 .features[FEAT_8000_0001_EDX] = 1433 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1434 CPUID_EXT2_SYSCALL, 1435 .features[FEAT_8000_0001_ECX] = 1436 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1437 .features[FEAT_7_0_EDX] = 1438 CPUID_7_0_EDX_SPEC_CTRL, 1439 .features[FEAT_7_0_EBX] = 1440 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1441 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1442 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1443 CPUID_7_0_EBX_RTM, 1444 .features[FEAT_XSAVE] = 1445 CPUID_XSAVE_XSAVEOPT, 1446 .features[FEAT_6_EAX] = 1447 CPUID_6_EAX_ARAT, 1448 .xlevel = 0x80000008, 1449 .model_id = "Intel Core Processor (Haswell, IBRS)", 1450 }, 1451 { 1452 .name = "Broadwell-noTSX", 1453 .level = 0xd, 1454 .vendor = CPUID_VENDOR_INTEL, 1455 .family = 6, 1456 .model = 61, 1457 .stepping = 2, 1458 .features[FEAT_1_EDX] = 1459 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1460 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1461 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1462 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1463 CPUID_DE | CPUID_FP87, 1464 .features[FEAT_1_ECX] = 1465 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1466 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1467 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1468 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1469 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1470 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1471 .features[FEAT_8000_0001_EDX] = 1472 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1473 CPUID_EXT2_SYSCALL, 1474 .features[FEAT_8000_0001_ECX] = 1475 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1476 .features[FEAT_7_0_EBX] = 1477 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1478 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1479 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1480 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1481 CPUID_7_0_EBX_SMAP, 1482 .features[FEAT_XSAVE] = 1483 CPUID_XSAVE_XSAVEOPT, 1484 .features[FEAT_6_EAX] = 1485 CPUID_6_EAX_ARAT, 1486 .xlevel = 0x80000008, 1487 .model_id = "Intel Core Processor (Broadwell, no TSX)", 1488 }, 1489 { 1490 .name = "Broadwell-noTSX-IBRS", 1491 .level = 0xd, 1492 .vendor = CPUID_VENDOR_INTEL, 1493 .family = 6, 1494 .model = 61, 1495 .stepping = 2, 1496 .features[FEAT_1_EDX] = 1497 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1498 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1499 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1500 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1501 CPUID_DE | CPUID_FP87, 1502 .features[FEAT_1_ECX] = 1503 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1504 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1505 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1506 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1507 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1508 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1509 .features[FEAT_8000_0001_EDX] = 1510 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1511 CPUID_EXT2_SYSCALL, 1512 .features[FEAT_8000_0001_ECX] = 1513 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1514 .features[FEAT_7_0_EDX] = 1515 CPUID_7_0_EDX_SPEC_CTRL, 1516 .features[FEAT_7_0_EBX] = 1517 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1518 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1519 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1520 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1521 CPUID_7_0_EBX_SMAP, 1522 .features[FEAT_XSAVE] = 1523 CPUID_XSAVE_XSAVEOPT, 1524 .features[FEAT_6_EAX] = 1525 CPUID_6_EAX_ARAT, 1526 .xlevel = 0x80000008, 1527 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)", 1528 }, 1529 { 1530 .name = "Broadwell", 1531 .level = 0xd, 1532 .vendor = CPUID_VENDOR_INTEL, 1533 .family = 6, 1534 .model = 61, 1535 .stepping = 2, 1536 .features[FEAT_1_EDX] = 1537 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1538 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1539 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1540 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1541 CPUID_DE | CPUID_FP87, 1542 .features[FEAT_1_ECX] = 1543 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1544 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1545 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1546 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1547 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1548 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1549 .features[FEAT_8000_0001_EDX] = 1550 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1551 CPUID_EXT2_SYSCALL, 1552 .features[FEAT_8000_0001_ECX] = 1553 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1554 .features[FEAT_7_0_EBX] = 1555 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1556 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1557 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1558 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1559 CPUID_7_0_EBX_SMAP, 1560 .features[FEAT_XSAVE] = 1561 CPUID_XSAVE_XSAVEOPT, 1562 .features[FEAT_6_EAX] = 1563 CPUID_6_EAX_ARAT, 1564 .xlevel = 0x80000008, 1565 .model_id = "Intel Core Processor (Broadwell)", 1566 }, 1567 { 1568 .name = "Broadwell-IBRS", 1569 .level = 0xd, 1570 .vendor = CPUID_VENDOR_INTEL, 1571 .family = 6, 1572 .model = 61, 1573 .stepping = 2, 1574 .features[FEAT_1_EDX] = 1575 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1576 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1577 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1578 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1579 CPUID_DE | CPUID_FP87, 1580 .features[FEAT_1_ECX] = 1581 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1582 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1583 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1584 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1585 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1586 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1587 .features[FEAT_8000_0001_EDX] = 1588 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1589 CPUID_EXT2_SYSCALL, 1590 .features[FEAT_8000_0001_ECX] = 1591 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1592 .features[FEAT_7_0_EDX] = 1593 CPUID_7_0_EDX_SPEC_CTRL, 1594 .features[FEAT_7_0_EBX] = 1595 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1596 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1597 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1598 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1599 CPUID_7_0_EBX_SMAP, 1600 .features[FEAT_XSAVE] = 1601 CPUID_XSAVE_XSAVEOPT, 1602 .features[FEAT_6_EAX] = 1603 CPUID_6_EAX_ARAT, 1604 .xlevel = 0x80000008, 1605 .model_id = "Intel Core Processor (Broadwell, IBRS)", 1606 }, 1607 { 1608 .name = "Skylake-Client", 1609 .level = 0xd, 1610 .vendor = CPUID_VENDOR_INTEL, 1611 .family = 6, 1612 .model = 94, 1613 .stepping = 3, 1614 .features[FEAT_1_EDX] = 1615 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1616 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1617 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1618 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1619 CPUID_DE | CPUID_FP87, 1620 .features[FEAT_1_ECX] = 1621 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1622 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1623 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1624 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1625 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1626 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1627 .features[FEAT_8000_0001_EDX] = 1628 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1629 CPUID_EXT2_SYSCALL, 1630 .features[FEAT_8000_0001_ECX] = 1631 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1632 .features[FEAT_7_0_EBX] = 1633 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1634 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1635 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1636 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1637 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX, 1638 /* Missing: XSAVES (not supported by some Linux versions, 1639 * including v4.1 to v4.12). 1640 * KVM doesn't yet expose any XSAVES state save component, 1641 * and the only one defined in Skylake (processor tracing) 1642 * probably will block migration anyway. 1643 */ 1644 .features[FEAT_XSAVE] = 1645 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 1646 CPUID_XSAVE_XGETBV1, 1647 .features[FEAT_6_EAX] = 1648 CPUID_6_EAX_ARAT, 1649 .xlevel = 0x80000008, 1650 .model_id = "Intel Core Processor (Skylake)", 1651 }, 1652 { 1653 .name = "Skylake-Client-IBRS", 1654 .level = 0xd, 1655 .vendor = CPUID_VENDOR_INTEL, 1656 .family = 6, 1657 .model = 94, 1658 .stepping = 3, 1659 .features[FEAT_1_EDX] = 1660 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1661 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1662 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1663 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1664 CPUID_DE | CPUID_FP87, 1665 .features[FEAT_1_ECX] = 1666 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1667 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1668 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1669 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1670 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1671 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1672 .features[FEAT_8000_0001_EDX] = 1673 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1674 CPUID_EXT2_SYSCALL, 1675 .features[FEAT_8000_0001_ECX] = 1676 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1677 .features[FEAT_7_0_EDX] = 1678 CPUID_7_0_EDX_SPEC_CTRL, 1679 .features[FEAT_7_0_EBX] = 1680 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1681 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1682 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1683 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1684 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX, 1685 /* Missing: XSAVES (not supported by some Linux versions, 1686 * including v4.1 to v4.12). 1687 * KVM doesn't yet expose any XSAVES state save component, 1688 * and the only one defined in Skylake (processor tracing) 1689 * probably will block migration anyway. 1690 */ 1691 .features[FEAT_XSAVE] = 1692 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 1693 CPUID_XSAVE_XGETBV1, 1694 .features[FEAT_6_EAX] = 1695 CPUID_6_EAX_ARAT, 1696 .xlevel = 0x80000008, 1697 .model_id = "Intel Core Processor (Skylake, IBRS)", 1698 }, 1699 { 1700 .name = "Skylake-Server", 1701 .level = 0xd, 1702 .vendor = CPUID_VENDOR_INTEL, 1703 .family = 6, 1704 .model = 85, 1705 .stepping = 4, 1706 .features[FEAT_1_EDX] = 1707 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1708 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1709 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1710 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1711 CPUID_DE | CPUID_FP87, 1712 .features[FEAT_1_ECX] = 1713 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1714 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1715 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1716 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1717 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1718 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1719 .features[FEAT_8000_0001_EDX] = 1720 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 1721 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1722 .features[FEAT_8000_0001_ECX] = 1723 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1724 .features[FEAT_7_0_EBX] = 1725 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1726 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1727 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1728 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1729 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB | 1730 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 1731 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 1732 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 1733 /* Missing: XSAVES (not supported by some Linux versions, 1734 * including v4.1 to v4.12). 1735 * KVM doesn't yet expose any XSAVES state save component, 1736 * and the only one defined in Skylake (processor tracing) 1737 * probably will block migration anyway. 1738 */ 1739 .features[FEAT_XSAVE] = 1740 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 1741 CPUID_XSAVE_XGETBV1, 1742 .features[FEAT_6_EAX] = 1743 CPUID_6_EAX_ARAT, 1744 .xlevel = 0x80000008, 1745 .model_id = "Intel Xeon Processor (Skylake)", 1746 }, 1747 { 1748 .name = "Skylake-Server-IBRS", 1749 .level = 0xd, 1750 .vendor = CPUID_VENDOR_INTEL, 1751 .family = 6, 1752 .model = 85, 1753 .stepping = 4, 1754 .features[FEAT_1_EDX] = 1755 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1756 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1757 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1758 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1759 CPUID_DE | CPUID_FP87, 1760 .features[FEAT_1_ECX] = 1761 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1762 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1763 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1764 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1765 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1766 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1767 .features[FEAT_8000_0001_EDX] = 1768 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 1769 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1770 .features[FEAT_8000_0001_ECX] = 1771 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1772 .features[FEAT_7_0_EDX] = 1773 CPUID_7_0_EDX_SPEC_CTRL, 1774 .features[FEAT_7_0_EBX] = 1775 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1776 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1777 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1778 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1779 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB | 1780 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 1781 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 1782 CPUID_7_0_EBX_AVX512VL, 1783 /* Missing: XSAVES (not supported by some Linux versions, 1784 * including v4.1 to v4.12). 1785 * KVM doesn't yet expose any XSAVES state save component, 1786 * and the only one defined in Skylake (processor tracing) 1787 * probably will block migration anyway. 1788 */ 1789 .features[FEAT_XSAVE] = 1790 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 1791 CPUID_XSAVE_XGETBV1, 1792 .features[FEAT_6_EAX] = 1793 CPUID_6_EAX_ARAT, 1794 .xlevel = 0x80000008, 1795 .model_id = "Intel Xeon Processor (Skylake, IBRS)", 1796 }, 1797 { 1798 .name = "Opteron_G1", 1799 .level = 5, 1800 .vendor = CPUID_VENDOR_AMD, 1801 .family = 15, 1802 .model = 6, 1803 .stepping = 1, 1804 .features[FEAT_1_EDX] = 1805 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1806 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1807 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1808 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1809 CPUID_DE | CPUID_FP87, 1810 .features[FEAT_1_ECX] = 1811 CPUID_EXT_SSE3, 1812 .features[FEAT_8000_0001_EDX] = 1813 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1814 .xlevel = 0x80000008, 1815 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 1816 }, 1817 { 1818 .name = "Opteron_G2", 1819 .level = 5, 1820 .vendor = CPUID_VENDOR_AMD, 1821 .family = 15, 1822 .model = 6, 1823 .stepping = 1, 1824 .features[FEAT_1_EDX] = 1825 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1826 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1827 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1828 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1829 CPUID_DE | CPUID_FP87, 1830 .features[FEAT_1_ECX] = 1831 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 1832 /* Missing: CPUID_EXT2_RDTSCP */ 1833 .features[FEAT_8000_0001_EDX] = 1834 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1835 .features[FEAT_8000_0001_ECX] = 1836 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 1837 .xlevel = 0x80000008, 1838 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 1839 }, 1840 { 1841 .name = "Opteron_G3", 1842 .level = 5, 1843 .vendor = CPUID_VENDOR_AMD, 1844 .family = 16, 1845 .model = 2, 1846 .stepping = 3, 1847 .features[FEAT_1_EDX] = 1848 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1849 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1850 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1851 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1852 CPUID_DE | CPUID_FP87, 1853 .features[FEAT_1_ECX] = 1854 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 1855 CPUID_EXT_SSE3, 1856 /* Missing: CPUID_EXT2_RDTSCP */ 1857 .features[FEAT_8000_0001_EDX] = 1858 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1859 .features[FEAT_8000_0001_ECX] = 1860 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 1861 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 1862 .xlevel = 0x80000008, 1863 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 1864 }, 1865 { 1866 .name = "Opteron_G4", 1867 .level = 0xd, 1868 .vendor = CPUID_VENDOR_AMD, 1869 .family = 21, 1870 .model = 1, 1871 .stepping = 2, 1872 .features[FEAT_1_EDX] = 1873 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1874 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1875 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1876 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1877 CPUID_DE | CPUID_FP87, 1878 .features[FEAT_1_ECX] = 1879 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1880 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1881 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1882 CPUID_EXT_SSE3, 1883 /* Missing: CPUID_EXT2_RDTSCP */ 1884 .features[FEAT_8000_0001_EDX] = 1885 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 1886 CPUID_EXT2_SYSCALL, 1887 .features[FEAT_8000_0001_ECX] = 1888 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 1889 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 1890 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 1891 CPUID_EXT3_LAHF_LM, 1892 /* no xsaveopt! */ 1893 .xlevel = 0x8000001A, 1894 .model_id = "AMD Opteron 62xx class CPU", 1895 }, 1896 { 1897 .name = "Opteron_G5", 1898 .level = 0xd, 1899 .vendor = CPUID_VENDOR_AMD, 1900 .family = 21, 1901 .model = 2, 1902 .stepping = 0, 1903 .features[FEAT_1_EDX] = 1904 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1905 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1906 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1907 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1908 CPUID_DE | CPUID_FP87, 1909 .features[FEAT_1_ECX] = 1910 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 1911 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1912 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 1913 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1914 /* Missing: CPUID_EXT2_RDTSCP */ 1915 .features[FEAT_8000_0001_EDX] = 1916 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 1917 CPUID_EXT2_SYSCALL, 1918 .features[FEAT_8000_0001_ECX] = 1919 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 1920 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 1921 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 1922 CPUID_EXT3_LAHF_LM, 1923 /* no xsaveopt! */ 1924 .xlevel = 0x8000001A, 1925 .model_id = "AMD Opteron 63xx class CPU", 1926 }, 1927 { 1928 .name = "EPYC", 1929 .level = 0xd, 1930 .vendor = CPUID_VENDOR_AMD, 1931 .family = 23, 1932 .model = 1, 1933 .stepping = 2, 1934 .features[FEAT_1_EDX] = 1935 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 1936 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 1937 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 1938 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 1939 CPUID_VME | CPUID_FP87, 1940 .features[FEAT_1_ECX] = 1941 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 1942 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 1943 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1944 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 1945 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1946 .features[FEAT_8000_0001_EDX] = 1947 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 1948 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 1949 CPUID_EXT2_SYSCALL, 1950 .features[FEAT_8000_0001_ECX] = 1951 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 1952 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 1953 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 1954 .features[FEAT_7_0_EBX] = 1955 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 1956 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 1957 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 1958 CPUID_7_0_EBX_SHA_NI, 1959 /* Missing: XSAVES (not supported by some Linux versions, 1960 * including v4.1 to v4.12). 1961 * KVM doesn't yet expose any XSAVES state save component. 1962 */ 1963 .features[FEAT_XSAVE] = 1964 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 1965 CPUID_XSAVE_XGETBV1, 1966 .features[FEAT_6_EAX] = 1967 CPUID_6_EAX_ARAT, 1968 .xlevel = 0x8000000A, 1969 .model_id = "AMD EPYC Processor", 1970 }, 1971 { 1972 .name = "EPYC-IBPB", 1973 .level = 0xd, 1974 .vendor = CPUID_VENDOR_AMD, 1975 .family = 23, 1976 .model = 1, 1977 .stepping = 2, 1978 .features[FEAT_1_EDX] = 1979 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 1980 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 1981 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 1982 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 1983 CPUID_VME | CPUID_FP87, 1984 .features[FEAT_1_ECX] = 1985 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 1986 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 1987 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1988 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 1989 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1990 .features[FEAT_8000_0001_EDX] = 1991 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 1992 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 1993 CPUID_EXT2_SYSCALL, 1994 .features[FEAT_8000_0001_ECX] = 1995 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 1996 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 1997 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 1998 .features[FEAT_8000_0008_EBX] = 1999 CPUID_8000_0008_EBX_IBPB, 2000 .features[FEAT_7_0_EBX] = 2001 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2002 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2003 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2004 CPUID_7_0_EBX_SHA_NI, 2005 /* Missing: XSAVES (not supported by some Linux versions, 2006 * including v4.1 to v4.12). 2007 * KVM doesn't yet expose any XSAVES state save component. 2008 */ 2009 .features[FEAT_XSAVE] = 2010 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2011 CPUID_XSAVE_XGETBV1, 2012 .features[FEAT_6_EAX] = 2013 CPUID_6_EAX_ARAT, 2014 .xlevel = 0x8000000A, 2015 .model_id = "AMD EPYC Processor (with IBPB)", 2016 }, 2017 }; 2018 2019 typedef struct PropValue { 2020 const char *prop, *value; 2021 } PropValue; 2022 2023 /* KVM-specific features that are automatically added/removed 2024 * from all CPU models when KVM is enabled. 2025 */ 2026 static PropValue kvm_default_props[] = { 2027 { "kvmclock", "on" }, 2028 { "kvm-nopiodelay", "on" }, 2029 { "kvm-asyncpf", "on" }, 2030 { "kvm-steal-time", "on" }, 2031 { "kvm-pv-eoi", "on" }, 2032 { "kvmclock-stable-bit", "on" }, 2033 { "x2apic", "on" }, 2034 { "acpi", "off" }, 2035 { "monitor", "off" }, 2036 { "svm", "off" }, 2037 { NULL, NULL }, 2038 }; 2039 2040 /* TCG-specific defaults that override all CPU models when using TCG 2041 */ 2042 static PropValue tcg_default_props[] = { 2043 { "vme", "off" }, 2044 { NULL, NULL }, 2045 }; 2046 2047 2048 void x86_cpu_change_kvm_default(const char *prop, const char *value) 2049 { 2050 PropValue *pv; 2051 for (pv = kvm_default_props; pv->prop; pv++) { 2052 if (!strcmp(pv->prop, prop)) { 2053 pv->value = value; 2054 break; 2055 } 2056 } 2057 2058 /* It is valid to call this function only for properties that 2059 * are already present in the kvm_default_props table. 2060 */ 2061 assert(pv->prop); 2062 } 2063 2064 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 2065 bool migratable_only); 2066 2067 static bool lmce_supported(void) 2068 { 2069 uint64_t mce_cap = 0; 2070 2071 #ifdef CONFIG_KVM 2072 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 2073 return false; 2074 } 2075 #endif 2076 2077 return !!(mce_cap & MCG_LMCE_P); 2078 } 2079 2080 #define CPUID_MODEL_ID_SZ 48 2081 2082 /** 2083 * cpu_x86_fill_model_id: 2084 * Get CPUID model ID string from host CPU. 2085 * 2086 * @str should have at least CPUID_MODEL_ID_SZ bytes 2087 * 2088 * The function does NOT add a null terminator to the string 2089 * automatically. 2090 */ 2091 static int cpu_x86_fill_model_id(char *str) 2092 { 2093 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 2094 int i; 2095 2096 for (i = 0; i < 3; i++) { 2097 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 2098 memcpy(str + i * 16 + 0, &eax, 4); 2099 memcpy(str + i * 16 + 4, &ebx, 4); 2100 memcpy(str + i * 16 + 8, &ecx, 4); 2101 memcpy(str + i * 16 + 12, &edx, 4); 2102 } 2103 return 0; 2104 } 2105 2106 static Property max_x86_cpu_properties[] = { 2107 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 2108 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 2109 DEFINE_PROP_END_OF_LIST() 2110 }; 2111 2112 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 2113 { 2114 DeviceClass *dc = DEVICE_CLASS(oc); 2115 X86CPUClass *xcc = X86_CPU_CLASS(oc); 2116 2117 xcc->ordering = 9; 2118 2119 xcc->model_description = 2120 "Enables all features supported by the accelerator in the current host"; 2121 2122 dc->props = max_x86_cpu_properties; 2123 } 2124 2125 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp); 2126 2127 static void max_x86_cpu_initfn(Object *obj) 2128 { 2129 X86CPU *cpu = X86_CPU(obj); 2130 CPUX86State *env = &cpu->env; 2131 KVMState *s = kvm_state; 2132 2133 /* We can't fill the features array here because we don't know yet if 2134 * "migratable" is true or false. 2135 */ 2136 cpu->max_features = true; 2137 2138 if (accel_uses_host_cpuid()) { 2139 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 2140 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 2141 int family, model, stepping; 2142 X86CPUDefinition host_cpudef = { }; 2143 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 2144 2145 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 2146 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx); 2147 2148 host_vendor_fms(vendor, &family, &model, &stepping); 2149 2150 cpu_x86_fill_model_id(model_id); 2151 2152 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort); 2153 object_property_set_int(OBJECT(cpu), family, "family", &error_abort); 2154 object_property_set_int(OBJECT(cpu), model, "model", &error_abort); 2155 object_property_set_int(OBJECT(cpu), stepping, "stepping", 2156 &error_abort); 2157 object_property_set_str(OBJECT(cpu), model_id, "model-id", 2158 &error_abort); 2159 2160 if (kvm_enabled()) { 2161 env->cpuid_min_level = 2162 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 2163 env->cpuid_min_xlevel = 2164 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 2165 env->cpuid_min_xlevel2 = 2166 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 2167 } else { 2168 env->cpuid_min_level = 2169 hvf_get_supported_cpuid(0x0, 0, R_EAX); 2170 env->cpuid_min_xlevel = 2171 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 2172 env->cpuid_min_xlevel2 = 2173 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 2174 } 2175 2176 if (lmce_supported()) { 2177 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 2178 } 2179 } else { 2180 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 2181 "vendor", &error_abort); 2182 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 2183 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 2184 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 2185 object_property_set_str(OBJECT(cpu), 2186 "QEMU TCG CPU version " QEMU_HW_VERSION, 2187 "model-id", &error_abort); 2188 } 2189 2190 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 2191 } 2192 2193 static const TypeInfo max_x86_cpu_type_info = { 2194 .name = X86_CPU_TYPE_NAME("max"), 2195 .parent = TYPE_X86_CPU, 2196 .instance_init = max_x86_cpu_initfn, 2197 .class_init = max_x86_cpu_class_init, 2198 }; 2199 2200 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 2201 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 2202 { 2203 X86CPUClass *xcc = X86_CPU_CLASS(oc); 2204 2205 xcc->host_cpuid_required = true; 2206 xcc->ordering = 8; 2207 2208 if (kvm_enabled()) { 2209 xcc->model_description = 2210 "KVM processor with all supported host features "; 2211 } else if (hvf_enabled()) { 2212 xcc->model_description = 2213 "HVF processor with all supported host features "; 2214 } 2215 } 2216 2217 static const TypeInfo host_x86_cpu_type_info = { 2218 .name = X86_CPU_TYPE_NAME("host"), 2219 .parent = X86_CPU_TYPE_NAME("max"), 2220 .class_init = host_x86_cpu_class_init, 2221 }; 2222 2223 #endif 2224 2225 static void report_unavailable_features(FeatureWord w, uint32_t mask) 2226 { 2227 FeatureWordInfo *f = &feature_word_info[w]; 2228 int i; 2229 2230 for (i = 0; i < 32; ++i) { 2231 if ((1UL << i) & mask) { 2232 const char *reg = get_register_name_32(f->cpuid_reg); 2233 assert(reg); 2234 warn_report("%s doesn't support requested feature: " 2235 "CPUID.%02XH:%s%s%s [bit %d]", 2236 accel_uses_host_cpuid() ? "host" : "TCG", 2237 f->cpuid_eax, reg, 2238 f->feat_names[i] ? "." : "", 2239 f->feat_names[i] ? f->feat_names[i] : "", i); 2240 } 2241 } 2242 } 2243 2244 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 2245 const char *name, void *opaque, 2246 Error **errp) 2247 { 2248 X86CPU *cpu = X86_CPU(obj); 2249 CPUX86State *env = &cpu->env; 2250 int64_t value; 2251 2252 value = (env->cpuid_version >> 8) & 0xf; 2253 if (value == 0xf) { 2254 value += (env->cpuid_version >> 20) & 0xff; 2255 } 2256 visit_type_int(v, name, &value, errp); 2257 } 2258 2259 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 2260 const char *name, void *opaque, 2261 Error **errp) 2262 { 2263 X86CPU *cpu = X86_CPU(obj); 2264 CPUX86State *env = &cpu->env; 2265 const int64_t min = 0; 2266 const int64_t max = 0xff + 0xf; 2267 Error *local_err = NULL; 2268 int64_t value; 2269 2270 visit_type_int(v, name, &value, &local_err); 2271 if (local_err) { 2272 error_propagate(errp, local_err); 2273 return; 2274 } 2275 if (value < min || value > max) { 2276 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 2277 name ? name : "null", value, min, max); 2278 return; 2279 } 2280 2281 env->cpuid_version &= ~0xff00f00; 2282 if (value > 0x0f) { 2283 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 2284 } else { 2285 env->cpuid_version |= value << 8; 2286 } 2287 } 2288 2289 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 2290 const char *name, void *opaque, 2291 Error **errp) 2292 { 2293 X86CPU *cpu = X86_CPU(obj); 2294 CPUX86State *env = &cpu->env; 2295 int64_t value; 2296 2297 value = (env->cpuid_version >> 4) & 0xf; 2298 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 2299 visit_type_int(v, name, &value, errp); 2300 } 2301 2302 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 2303 const char *name, void *opaque, 2304 Error **errp) 2305 { 2306 X86CPU *cpu = X86_CPU(obj); 2307 CPUX86State *env = &cpu->env; 2308 const int64_t min = 0; 2309 const int64_t max = 0xff; 2310 Error *local_err = NULL; 2311 int64_t value; 2312 2313 visit_type_int(v, name, &value, &local_err); 2314 if (local_err) { 2315 error_propagate(errp, local_err); 2316 return; 2317 } 2318 if (value < min || value > max) { 2319 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 2320 name ? name : "null", value, min, max); 2321 return; 2322 } 2323 2324 env->cpuid_version &= ~0xf00f0; 2325 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 2326 } 2327 2328 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 2329 const char *name, void *opaque, 2330 Error **errp) 2331 { 2332 X86CPU *cpu = X86_CPU(obj); 2333 CPUX86State *env = &cpu->env; 2334 int64_t value; 2335 2336 value = env->cpuid_version & 0xf; 2337 visit_type_int(v, name, &value, errp); 2338 } 2339 2340 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 2341 const char *name, void *opaque, 2342 Error **errp) 2343 { 2344 X86CPU *cpu = X86_CPU(obj); 2345 CPUX86State *env = &cpu->env; 2346 const int64_t min = 0; 2347 const int64_t max = 0xf; 2348 Error *local_err = NULL; 2349 int64_t value; 2350 2351 visit_type_int(v, name, &value, &local_err); 2352 if (local_err) { 2353 error_propagate(errp, local_err); 2354 return; 2355 } 2356 if (value < min || value > max) { 2357 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 2358 name ? name : "null", value, min, max); 2359 return; 2360 } 2361 2362 env->cpuid_version &= ~0xf; 2363 env->cpuid_version |= value & 0xf; 2364 } 2365 2366 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 2367 { 2368 X86CPU *cpu = X86_CPU(obj); 2369 CPUX86State *env = &cpu->env; 2370 char *value; 2371 2372 value = g_malloc(CPUID_VENDOR_SZ + 1); 2373 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 2374 env->cpuid_vendor3); 2375 return value; 2376 } 2377 2378 static void x86_cpuid_set_vendor(Object *obj, const char *value, 2379 Error **errp) 2380 { 2381 X86CPU *cpu = X86_CPU(obj); 2382 CPUX86State *env = &cpu->env; 2383 int i; 2384 2385 if (strlen(value) != CPUID_VENDOR_SZ) { 2386 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 2387 return; 2388 } 2389 2390 env->cpuid_vendor1 = 0; 2391 env->cpuid_vendor2 = 0; 2392 env->cpuid_vendor3 = 0; 2393 for (i = 0; i < 4; i++) { 2394 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 2395 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 2396 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 2397 } 2398 } 2399 2400 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 2401 { 2402 X86CPU *cpu = X86_CPU(obj); 2403 CPUX86State *env = &cpu->env; 2404 char *value; 2405 int i; 2406 2407 value = g_malloc(48 + 1); 2408 for (i = 0; i < 48; i++) { 2409 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 2410 } 2411 value[48] = '\0'; 2412 return value; 2413 } 2414 2415 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 2416 Error **errp) 2417 { 2418 X86CPU *cpu = X86_CPU(obj); 2419 CPUX86State *env = &cpu->env; 2420 int c, len, i; 2421 2422 if (model_id == NULL) { 2423 model_id = ""; 2424 } 2425 len = strlen(model_id); 2426 memset(env->cpuid_model, 0, 48); 2427 for (i = 0; i < 48; i++) { 2428 if (i >= len) { 2429 c = '\0'; 2430 } else { 2431 c = (uint8_t)model_id[i]; 2432 } 2433 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 2434 } 2435 } 2436 2437 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 2438 void *opaque, Error **errp) 2439 { 2440 X86CPU *cpu = X86_CPU(obj); 2441 int64_t value; 2442 2443 value = cpu->env.tsc_khz * 1000; 2444 visit_type_int(v, name, &value, errp); 2445 } 2446 2447 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 2448 void *opaque, Error **errp) 2449 { 2450 X86CPU *cpu = X86_CPU(obj); 2451 const int64_t min = 0; 2452 const int64_t max = INT64_MAX; 2453 Error *local_err = NULL; 2454 int64_t value; 2455 2456 visit_type_int(v, name, &value, &local_err); 2457 if (local_err) { 2458 error_propagate(errp, local_err); 2459 return; 2460 } 2461 if (value < min || value > max) { 2462 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 2463 name ? name : "null", value, min, max); 2464 return; 2465 } 2466 2467 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 2468 } 2469 2470 /* Generic getter for "feature-words" and "filtered-features" properties */ 2471 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 2472 const char *name, void *opaque, 2473 Error **errp) 2474 { 2475 uint32_t *array = (uint32_t *)opaque; 2476 FeatureWord w; 2477 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 2478 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 2479 X86CPUFeatureWordInfoList *list = NULL; 2480 2481 for (w = 0; w < FEATURE_WORDS; w++) { 2482 FeatureWordInfo *wi = &feature_word_info[w]; 2483 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 2484 qwi->cpuid_input_eax = wi->cpuid_eax; 2485 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx; 2486 qwi->cpuid_input_ecx = wi->cpuid_ecx; 2487 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum; 2488 qwi->features = array[w]; 2489 2490 /* List will be in reverse order, but order shouldn't matter */ 2491 list_entries[w].next = list; 2492 list_entries[w].value = &word_infos[w]; 2493 list = &list_entries[w]; 2494 } 2495 2496 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 2497 } 2498 2499 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name, 2500 void *opaque, Error **errp) 2501 { 2502 X86CPU *cpu = X86_CPU(obj); 2503 int64_t value = cpu->hyperv_spinlock_attempts; 2504 2505 visit_type_int(v, name, &value, errp); 2506 } 2507 2508 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name, 2509 void *opaque, Error **errp) 2510 { 2511 const int64_t min = 0xFFF; 2512 const int64_t max = UINT_MAX; 2513 X86CPU *cpu = X86_CPU(obj); 2514 Error *err = NULL; 2515 int64_t value; 2516 2517 visit_type_int(v, name, &value, &err); 2518 if (err) { 2519 error_propagate(errp, err); 2520 return; 2521 } 2522 2523 if (value < min || value > max) { 2524 error_setg(errp, "Property %s.%s doesn't take value %" PRId64 2525 " (minimum: %" PRId64 ", maximum: %" PRId64 ")", 2526 object_get_typename(obj), name ? name : "null", 2527 value, min, max); 2528 return; 2529 } 2530 cpu->hyperv_spinlock_attempts = value; 2531 } 2532 2533 static const PropertyInfo qdev_prop_spinlocks = { 2534 .name = "int", 2535 .get = x86_get_hv_spinlocks, 2536 .set = x86_set_hv_spinlocks, 2537 }; 2538 2539 /* Convert all '_' in a feature string option name to '-', to make feature 2540 * name conform to QOM property naming rule, which uses '-' instead of '_'. 2541 */ 2542 static inline void feat2prop(char *s) 2543 { 2544 while ((s = strchr(s, '_'))) { 2545 *s = '-'; 2546 } 2547 } 2548 2549 /* Return the feature property name for a feature flag bit */ 2550 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 2551 { 2552 /* XSAVE components are automatically enabled by other features, 2553 * so return the original feature name instead 2554 */ 2555 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 2556 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 2557 2558 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 2559 x86_ext_save_areas[comp].bits) { 2560 w = x86_ext_save_areas[comp].feature; 2561 bitnr = ctz32(x86_ext_save_areas[comp].bits); 2562 } 2563 } 2564 2565 assert(bitnr < 32); 2566 assert(w < FEATURE_WORDS); 2567 return feature_word_info[w].feat_names[bitnr]; 2568 } 2569 2570 /* Compatibily hack to maintain legacy +-feat semantic, 2571 * where +-feat overwrites any feature set by 2572 * feat=on|feat even if the later is parsed after +-feat 2573 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 2574 */ 2575 static GList *plus_features, *minus_features; 2576 2577 static gint compare_string(gconstpointer a, gconstpointer b) 2578 { 2579 return g_strcmp0(a, b); 2580 } 2581 2582 /* Parse "+feature,-feature,feature=foo" CPU feature string 2583 */ 2584 static void x86_cpu_parse_featurestr(const char *typename, char *features, 2585 Error **errp) 2586 { 2587 char *featurestr; /* Single 'key=value" string being parsed */ 2588 static bool cpu_globals_initialized; 2589 bool ambiguous = false; 2590 2591 if (cpu_globals_initialized) { 2592 return; 2593 } 2594 cpu_globals_initialized = true; 2595 2596 if (!features) { 2597 return; 2598 } 2599 2600 for (featurestr = strtok(features, ","); 2601 featurestr; 2602 featurestr = strtok(NULL, ",")) { 2603 const char *name; 2604 const char *val = NULL; 2605 char *eq = NULL; 2606 char num[32]; 2607 GlobalProperty *prop; 2608 2609 /* Compatibility syntax: */ 2610 if (featurestr[0] == '+') { 2611 plus_features = g_list_append(plus_features, 2612 g_strdup(featurestr + 1)); 2613 continue; 2614 } else if (featurestr[0] == '-') { 2615 minus_features = g_list_append(minus_features, 2616 g_strdup(featurestr + 1)); 2617 continue; 2618 } 2619 2620 eq = strchr(featurestr, '='); 2621 if (eq) { 2622 *eq++ = 0; 2623 val = eq; 2624 } else { 2625 val = "on"; 2626 } 2627 2628 feat2prop(featurestr); 2629 name = featurestr; 2630 2631 if (g_list_find_custom(plus_features, name, compare_string)) { 2632 warn_report("Ambiguous CPU model string. " 2633 "Don't mix both \"+%s\" and \"%s=%s\"", 2634 name, name, val); 2635 ambiguous = true; 2636 } 2637 if (g_list_find_custom(minus_features, name, compare_string)) { 2638 warn_report("Ambiguous CPU model string. " 2639 "Don't mix both \"-%s\" and \"%s=%s\"", 2640 name, name, val); 2641 ambiguous = true; 2642 } 2643 2644 /* Special case: */ 2645 if (!strcmp(name, "tsc-freq")) { 2646 int ret; 2647 uint64_t tsc_freq; 2648 2649 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 2650 if (ret < 0 || tsc_freq > INT64_MAX) { 2651 error_setg(errp, "bad numerical value %s", val); 2652 return; 2653 } 2654 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 2655 val = num; 2656 name = "tsc-frequency"; 2657 } 2658 2659 prop = g_new0(typeof(*prop), 1); 2660 prop->driver = typename; 2661 prop->property = g_strdup(name); 2662 prop->value = g_strdup(val); 2663 prop->errp = &error_fatal; 2664 qdev_prop_register_global(prop); 2665 } 2666 2667 if (ambiguous) { 2668 warn_report("Compatibility of ambiguous CPU model " 2669 "strings won't be kept on future QEMU versions"); 2670 } 2671 } 2672 2673 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 2674 static int x86_cpu_filter_features(X86CPU *cpu); 2675 2676 /* Check for missing features that may prevent the CPU class from 2677 * running using the current machine and accelerator. 2678 */ 2679 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 2680 strList **missing_feats) 2681 { 2682 X86CPU *xc; 2683 FeatureWord w; 2684 Error *err = NULL; 2685 strList **next = missing_feats; 2686 2687 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 2688 strList *new = g_new0(strList, 1); 2689 new->value = g_strdup("kvm"); 2690 *missing_feats = new; 2691 return; 2692 } 2693 2694 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 2695 2696 x86_cpu_expand_features(xc, &err); 2697 if (err) { 2698 /* Errors at x86_cpu_expand_features should never happen, 2699 * but in case it does, just report the model as not 2700 * runnable at all using the "type" property. 2701 */ 2702 strList *new = g_new0(strList, 1); 2703 new->value = g_strdup("type"); 2704 *next = new; 2705 next = &new->next; 2706 } 2707 2708 x86_cpu_filter_features(xc); 2709 2710 for (w = 0; w < FEATURE_WORDS; w++) { 2711 uint32_t filtered = xc->filtered_features[w]; 2712 int i; 2713 for (i = 0; i < 32; i++) { 2714 if (filtered & (1UL << i)) { 2715 strList *new = g_new0(strList, 1); 2716 new->value = g_strdup(x86_cpu_feature_name(w, i)); 2717 *next = new; 2718 next = &new->next; 2719 } 2720 } 2721 } 2722 2723 object_unref(OBJECT(xc)); 2724 } 2725 2726 /* Print all cpuid feature names in featureset 2727 */ 2728 static void listflags(FILE *f, fprintf_function print, const char **featureset) 2729 { 2730 int bit; 2731 bool first = true; 2732 2733 for (bit = 0; bit < 32; bit++) { 2734 if (featureset[bit]) { 2735 print(f, "%s%s", first ? "" : " ", featureset[bit]); 2736 first = false; 2737 } 2738 } 2739 } 2740 2741 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 2742 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 2743 { 2744 ObjectClass *class_a = (ObjectClass *)a; 2745 ObjectClass *class_b = (ObjectClass *)b; 2746 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 2747 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 2748 const char *name_a, *name_b; 2749 2750 if (cc_a->ordering != cc_b->ordering) { 2751 return cc_a->ordering - cc_b->ordering; 2752 } else { 2753 name_a = object_class_get_name(class_a); 2754 name_b = object_class_get_name(class_b); 2755 return strcmp(name_a, name_b); 2756 } 2757 } 2758 2759 static GSList *get_sorted_cpu_model_list(void) 2760 { 2761 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 2762 list = g_slist_sort(list, x86_cpu_list_compare); 2763 return list; 2764 } 2765 2766 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 2767 { 2768 ObjectClass *oc = data; 2769 X86CPUClass *cc = X86_CPU_CLASS(oc); 2770 CPUListState *s = user_data; 2771 char *name = x86_cpu_class_get_model_name(cc); 2772 const char *desc = cc->model_description; 2773 if (!desc && cc->cpu_def) { 2774 desc = cc->cpu_def->model_id; 2775 } 2776 2777 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n", 2778 name, desc); 2779 g_free(name); 2780 } 2781 2782 /* list available CPU models and flags */ 2783 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf) 2784 { 2785 int i; 2786 CPUListState s = { 2787 .file = f, 2788 .cpu_fprintf = cpu_fprintf, 2789 }; 2790 GSList *list; 2791 2792 (*cpu_fprintf)(f, "Available CPUs:\n"); 2793 list = get_sorted_cpu_model_list(); 2794 g_slist_foreach(list, x86_cpu_list_entry, &s); 2795 g_slist_free(list); 2796 2797 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n"); 2798 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 2799 FeatureWordInfo *fw = &feature_word_info[i]; 2800 2801 (*cpu_fprintf)(f, " "); 2802 listflags(f, cpu_fprintf, fw->feat_names); 2803 (*cpu_fprintf)(f, "\n"); 2804 } 2805 } 2806 2807 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 2808 { 2809 ObjectClass *oc = data; 2810 X86CPUClass *cc = X86_CPU_CLASS(oc); 2811 CpuDefinitionInfoList **cpu_list = user_data; 2812 CpuDefinitionInfoList *entry; 2813 CpuDefinitionInfo *info; 2814 2815 info = g_malloc0(sizeof(*info)); 2816 info->name = x86_cpu_class_get_model_name(cc); 2817 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 2818 info->has_unavailable_features = true; 2819 info->q_typename = g_strdup(object_class_get_name(oc)); 2820 info->migration_safe = cc->migration_safe; 2821 info->has_migration_safe = true; 2822 info->q_static = cc->static_model; 2823 2824 entry = g_malloc0(sizeof(*entry)); 2825 entry->value = info; 2826 entry->next = *cpu_list; 2827 *cpu_list = entry; 2828 } 2829 2830 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp) 2831 { 2832 CpuDefinitionInfoList *cpu_list = NULL; 2833 GSList *list = get_sorted_cpu_model_list(); 2834 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 2835 g_slist_free(list); 2836 return cpu_list; 2837 } 2838 2839 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 2840 bool migratable_only) 2841 { 2842 FeatureWordInfo *wi = &feature_word_info[w]; 2843 uint32_t r; 2844 2845 if (kvm_enabled()) { 2846 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax, 2847 wi->cpuid_ecx, 2848 wi->cpuid_reg); 2849 } else if (hvf_enabled()) { 2850 r = hvf_get_supported_cpuid(wi->cpuid_eax, 2851 wi->cpuid_ecx, 2852 wi->cpuid_reg); 2853 } else if (tcg_enabled()) { 2854 r = wi->tcg_features; 2855 } else { 2856 return ~0; 2857 } 2858 if (migratable_only) { 2859 r &= x86_cpu_get_migratable_flags(w); 2860 } 2861 return r; 2862 } 2863 2864 static void x86_cpu_report_filtered_features(X86CPU *cpu) 2865 { 2866 FeatureWord w; 2867 2868 for (w = 0; w < FEATURE_WORDS; w++) { 2869 report_unavailable_features(w, cpu->filtered_features[w]); 2870 } 2871 } 2872 2873 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 2874 { 2875 PropValue *pv; 2876 for (pv = props; pv->prop; pv++) { 2877 if (!pv->value) { 2878 continue; 2879 } 2880 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 2881 &error_abort); 2882 } 2883 } 2884 2885 /* Load data from X86CPUDefinition into a X86CPU object 2886 */ 2887 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp) 2888 { 2889 CPUX86State *env = &cpu->env; 2890 const char *vendor; 2891 char host_vendor[CPUID_VENDOR_SZ + 1]; 2892 FeatureWord w; 2893 2894 /*NOTE: any property set by this function should be returned by 2895 * x86_cpu_static_props(), so static expansion of 2896 * query-cpu-model-expansion is always complete. 2897 */ 2898 2899 /* CPU models only set _minimum_ values for level/xlevel: */ 2900 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp); 2901 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp); 2902 2903 object_property_set_int(OBJECT(cpu), def->family, "family", errp); 2904 object_property_set_int(OBJECT(cpu), def->model, "model", errp); 2905 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp); 2906 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); 2907 for (w = 0; w < FEATURE_WORDS; w++) { 2908 env->features[w] = def->features[w]; 2909 } 2910 2911 /* Special cases not set in the X86CPUDefinition structs: */ 2912 /* TODO: in-kernel irqchip for hvf */ 2913 if (kvm_enabled()) { 2914 if (!kvm_irqchip_in_kernel()) { 2915 x86_cpu_change_kvm_default("x2apic", "off"); 2916 } 2917 2918 x86_cpu_apply_props(cpu, kvm_default_props); 2919 } else if (tcg_enabled()) { 2920 x86_cpu_apply_props(cpu, tcg_default_props); 2921 } 2922 2923 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 2924 2925 /* sysenter isn't supported in compatibility mode on AMD, 2926 * syscall isn't supported in compatibility mode on Intel. 2927 * Normally we advertise the actual CPU vendor, but you can 2928 * override this using the 'vendor' property if you want to use 2929 * KVM's sysenter/syscall emulation in compatibility mode and 2930 * when doing cross vendor migration 2931 */ 2932 vendor = def->vendor; 2933 if (accel_uses_host_cpuid()) { 2934 uint32_t ebx = 0, ecx = 0, edx = 0; 2935 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 2936 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 2937 vendor = host_vendor; 2938 } 2939 2940 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp); 2941 2942 } 2943 2944 /* Return a QDict containing keys for all properties that can be included 2945 * in static expansion of CPU models. All properties set by x86_cpu_load_def() 2946 * must be included in the dictionary. 2947 */ 2948 static QDict *x86_cpu_static_props(void) 2949 { 2950 FeatureWord w; 2951 int i; 2952 static const char *props[] = { 2953 "min-level", 2954 "min-xlevel", 2955 "family", 2956 "model", 2957 "stepping", 2958 "model-id", 2959 "vendor", 2960 "lmce", 2961 NULL, 2962 }; 2963 static QDict *d; 2964 2965 if (d) { 2966 return d; 2967 } 2968 2969 d = qdict_new(); 2970 for (i = 0; props[i]; i++) { 2971 qdict_put_null(d, props[i]); 2972 } 2973 2974 for (w = 0; w < FEATURE_WORDS; w++) { 2975 FeatureWordInfo *fi = &feature_word_info[w]; 2976 int bit; 2977 for (bit = 0; bit < 32; bit++) { 2978 if (!fi->feat_names[bit]) { 2979 continue; 2980 } 2981 qdict_put_null(d, fi->feat_names[bit]); 2982 } 2983 } 2984 2985 return d; 2986 } 2987 2988 /* Add an entry to @props dict, with the value for property. */ 2989 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 2990 { 2991 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 2992 &error_abort); 2993 2994 qdict_put_obj(props, prop, value); 2995 } 2996 2997 /* Convert CPU model data from X86CPU object to a property dictionary 2998 * that can recreate exactly the same CPU model. 2999 */ 3000 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 3001 { 3002 QDict *sprops = x86_cpu_static_props(); 3003 const QDictEntry *e; 3004 3005 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 3006 const char *prop = qdict_entry_key(e); 3007 x86_cpu_expand_prop(cpu, props, prop); 3008 } 3009 } 3010 3011 /* Convert CPU model data from X86CPU object to a property dictionary 3012 * that can recreate exactly the same CPU model, including every 3013 * writeable QOM property. 3014 */ 3015 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 3016 { 3017 ObjectPropertyIterator iter; 3018 ObjectProperty *prop; 3019 3020 object_property_iter_init(&iter, OBJECT(cpu)); 3021 while ((prop = object_property_iter_next(&iter))) { 3022 /* skip read-only or write-only properties */ 3023 if (!prop->get || !prop->set) { 3024 continue; 3025 } 3026 3027 /* "hotplugged" is the only property that is configurable 3028 * on the command-line but will be set differently on CPUs 3029 * created using "-cpu ... -smp ..." and by CPUs created 3030 * on the fly by x86_cpu_from_model() for querying. Skip it. 3031 */ 3032 if (!strcmp(prop->name, "hotplugged")) { 3033 continue; 3034 } 3035 x86_cpu_expand_prop(cpu, props, prop->name); 3036 } 3037 } 3038 3039 static void object_apply_props(Object *obj, QDict *props, Error **errp) 3040 { 3041 const QDictEntry *prop; 3042 Error *err = NULL; 3043 3044 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 3045 object_property_set_qobject(obj, qdict_entry_value(prop), 3046 qdict_entry_key(prop), &err); 3047 if (err) { 3048 break; 3049 } 3050 } 3051 3052 error_propagate(errp, err); 3053 } 3054 3055 /* Create X86CPU object according to model+props specification */ 3056 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 3057 { 3058 X86CPU *xc = NULL; 3059 X86CPUClass *xcc; 3060 Error *err = NULL; 3061 3062 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 3063 if (xcc == NULL) { 3064 error_setg(&err, "CPU model '%s' not found", model); 3065 goto out; 3066 } 3067 3068 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 3069 if (props) { 3070 object_apply_props(OBJECT(xc), props, &err); 3071 if (err) { 3072 goto out; 3073 } 3074 } 3075 3076 x86_cpu_expand_features(xc, &err); 3077 if (err) { 3078 goto out; 3079 } 3080 3081 out: 3082 if (err) { 3083 error_propagate(errp, err); 3084 object_unref(OBJECT(xc)); 3085 xc = NULL; 3086 } 3087 return xc; 3088 } 3089 3090 CpuModelExpansionInfo * 3091 arch_query_cpu_model_expansion(CpuModelExpansionType type, 3092 CpuModelInfo *model, 3093 Error **errp) 3094 { 3095 X86CPU *xc = NULL; 3096 Error *err = NULL; 3097 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 3098 QDict *props = NULL; 3099 const char *base_name; 3100 3101 xc = x86_cpu_from_model(model->name, 3102 model->has_props ? 3103 qobject_to_qdict(model->props) : 3104 NULL, &err); 3105 if (err) { 3106 goto out; 3107 } 3108 3109 props = qdict_new(); 3110 3111 switch (type) { 3112 case CPU_MODEL_EXPANSION_TYPE_STATIC: 3113 /* Static expansion will be based on "base" only */ 3114 base_name = "base"; 3115 x86_cpu_to_dict(xc, props); 3116 break; 3117 case CPU_MODEL_EXPANSION_TYPE_FULL: 3118 /* As we don't return every single property, full expansion needs 3119 * to keep the original model name+props, and add extra 3120 * properties on top of that. 3121 */ 3122 base_name = model->name; 3123 x86_cpu_to_dict_full(xc, props); 3124 break; 3125 default: 3126 error_setg(&err, "Unsupportted expansion type"); 3127 goto out; 3128 } 3129 3130 if (!props) { 3131 props = qdict_new(); 3132 } 3133 x86_cpu_to_dict(xc, props); 3134 3135 ret->model = g_new0(CpuModelInfo, 1); 3136 ret->model->name = g_strdup(base_name); 3137 ret->model->props = QOBJECT(props); 3138 ret->model->has_props = true; 3139 3140 out: 3141 object_unref(OBJECT(xc)); 3142 if (err) { 3143 error_propagate(errp, err); 3144 qapi_free_CpuModelExpansionInfo(ret); 3145 ret = NULL; 3146 } 3147 return ret; 3148 } 3149 3150 static gchar *x86_gdb_arch_name(CPUState *cs) 3151 { 3152 #ifdef TARGET_X86_64 3153 return g_strdup("i386:x86-64"); 3154 #else 3155 return g_strdup("i386"); 3156 #endif 3157 } 3158 3159 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 3160 { 3161 X86CPUDefinition *cpudef = data; 3162 X86CPUClass *xcc = X86_CPU_CLASS(oc); 3163 3164 xcc->cpu_def = cpudef; 3165 xcc->migration_safe = true; 3166 } 3167 3168 static void x86_register_cpudef_type(X86CPUDefinition *def) 3169 { 3170 char *typename = x86_cpu_type_name(def->name); 3171 TypeInfo ti = { 3172 .name = typename, 3173 .parent = TYPE_X86_CPU, 3174 .class_init = x86_cpu_cpudef_class_init, 3175 .class_data = def, 3176 }; 3177 3178 /* AMD aliases are handled at runtime based on CPUID vendor, so 3179 * they shouldn't be set on the CPU model table. 3180 */ 3181 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 3182 /* catch mistakes instead of silently truncating model_id when too long */ 3183 assert(def->model_id && strlen(def->model_id) <= 48); 3184 3185 3186 type_register(&ti); 3187 g_free(typename); 3188 } 3189 3190 #if !defined(CONFIG_USER_ONLY) 3191 3192 void cpu_clear_apic_feature(CPUX86State *env) 3193 { 3194 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 3195 } 3196 3197 #endif /* !CONFIG_USER_ONLY */ 3198 3199 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 3200 uint32_t *eax, uint32_t *ebx, 3201 uint32_t *ecx, uint32_t *edx) 3202 { 3203 X86CPU *cpu = x86_env_get_cpu(env); 3204 CPUState *cs = CPU(cpu); 3205 uint32_t pkg_offset; 3206 uint32_t limit; 3207 uint32_t signature[3]; 3208 3209 /* Calculate & apply limits for different index ranges */ 3210 if (index >= 0xC0000000) { 3211 limit = env->cpuid_xlevel2; 3212 } else if (index >= 0x80000000) { 3213 limit = env->cpuid_xlevel; 3214 } else if (index >= 0x40000000) { 3215 limit = 0x40000001; 3216 } else { 3217 limit = env->cpuid_level; 3218 } 3219 3220 if (index > limit) { 3221 /* Intel documentation states that invalid EAX input will 3222 * return the same information as EAX=cpuid_level 3223 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 3224 */ 3225 index = env->cpuid_level; 3226 } 3227 3228 switch(index) { 3229 case 0: 3230 *eax = env->cpuid_level; 3231 *ebx = env->cpuid_vendor1; 3232 *edx = env->cpuid_vendor2; 3233 *ecx = env->cpuid_vendor3; 3234 break; 3235 case 1: 3236 *eax = env->cpuid_version; 3237 *ebx = (cpu->apic_id << 24) | 3238 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 3239 *ecx = env->features[FEAT_1_ECX]; 3240 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 3241 *ecx |= CPUID_EXT_OSXSAVE; 3242 } 3243 *edx = env->features[FEAT_1_EDX]; 3244 if (cs->nr_cores * cs->nr_threads > 1) { 3245 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 3246 *edx |= CPUID_HT; 3247 } 3248 break; 3249 case 2: 3250 /* cache info: needed for Pentium Pro compatibility */ 3251 if (cpu->cache_info_passthrough) { 3252 host_cpuid(index, 0, eax, ebx, ecx, edx); 3253 break; 3254 } 3255 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 3256 *ebx = 0; 3257 if (!cpu->enable_l3_cache) { 3258 *ecx = 0; 3259 } else { 3260 *ecx = L3_N_DESCRIPTOR; 3261 } 3262 *edx = (L1D_DESCRIPTOR << 16) | \ 3263 (L1I_DESCRIPTOR << 8) | \ 3264 (L2_DESCRIPTOR); 3265 break; 3266 case 4: 3267 /* cache info: needed for Core compatibility */ 3268 if (cpu->cache_info_passthrough) { 3269 host_cpuid(index, count, eax, ebx, ecx, edx); 3270 *eax &= ~0xFC000000; 3271 } else { 3272 *eax = 0; 3273 switch (count) { 3274 case 0: /* L1 dcache info */ 3275 *eax |= CPUID_4_TYPE_DCACHE | \ 3276 CPUID_4_LEVEL(1) | \ 3277 CPUID_4_SELF_INIT_LEVEL; 3278 *ebx = (L1D_LINE_SIZE - 1) | \ 3279 ((L1D_PARTITIONS - 1) << 12) | \ 3280 ((L1D_ASSOCIATIVITY - 1) << 22); 3281 *ecx = L1D_SETS - 1; 3282 *edx = CPUID_4_NO_INVD_SHARING; 3283 break; 3284 case 1: /* L1 icache info */ 3285 *eax |= CPUID_4_TYPE_ICACHE | \ 3286 CPUID_4_LEVEL(1) | \ 3287 CPUID_4_SELF_INIT_LEVEL; 3288 *ebx = (L1I_LINE_SIZE - 1) | \ 3289 ((L1I_PARTITIONS - 1) << 12) | \ 3290 ((L1I_ASSOCIATIVITY - 1) << 22); 3291 *ecx = L1I_SETS - 1; 3292 *edx = CPUID_4_NO_INVD_SHARING; 3293 break; 3294 case 2: /* L2 cache info */ 3295 *eax |= CPUID_4_TYPE_UNIFIED | \ 3296 CPUID_4_LEVEL(2) | \ 3297 CPUID_4_SELF_INIT_LEVEL; 3298 if (cs->nr_threads > 1) { 3299 *eax |= (cs->nr_threads - 1) << 14; 3300 } 3301 *ebx = (L2_LINE_SIZE - 1) | \ 3302 ((L2_PARTITIONS - 1) << 12) | \ 3303 ((L2_ASSOCIATIVITY - 1) << 22); 3304 *ecx = L2_SETS - 1; 3305 *edx = CPUID_4_NO_INVD_SHARING; 3306 break; 3307 case 3: /* L3 cache info */ 3308 if (!cpu->enable_l3_cache) { 3309 *eax = 0; 3310 *ebx = 0; 3311 *ecx = 0; 3312 *edx = 0; 3313 break; 3314 } 3315 *eax |= CPUID_4_TYPE_UNIFIED | \ 3316 CPUID_4_LEVEL(3) | \ 3317 CPUID_4_SELF_INIT_LEVEL; 3318 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 3319 *eax |= ((1 << pkg_offset) - 1) << 14; 3320 *ebx = (L3_N_LINE_SIZE - 1) | \ 3321 ((L3_N_PARTITIONS - 1) << 12) | \ 3322 ((L3_N_ASSOCIATIVITY - 1) << 22); 3323 *ecx = L3_N_SETS - 1; 3324 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX; 3325 break; 3326 default: /* end of info */ 3327 *eax = 0; 3328 *ebx = 0; 3329 *ecx = 0; 3330 *edx = 0; 3331 break; 3332 } 3333 } 3334 3335 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 3336 if ((*eax & 31) && cs->nr_cores > 1) { 3337 *eax |= (cs->nr_cores - 1) << 26; 3338 } 3339 break; 3340 case 5: 3341 /* mwait info: needed for Core compatibility */ 3342 *eax = 0; /* Smallest monitor-line size in bytes */ 3343 *ebx = 0; /* Largest monitor-line size in bytes */ 3344 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 3345 *edx = 0; 3346 break; 3347 case 6: 3348 /* Thermal and Power Leaf */ 3349 *eax = env->features[FEAT_6_EAX]; 3350 *ebx = 0; 3351 *ecx = 0; 3352 *edx = 0; 3353 break; 3354 case 7: 3355 /* Structured Extended Feature Flags Enumeration Leaf */ 3356 if (count == 0) { 3357 *eax = 0; /* Maximum ECX value for sub-leaves */ 3358 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 3359 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 3360 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 3361 *ecx |= CPUID_7_0_ECX_OSPKE; 3362 } 3363 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 3364 } else { 3365 *eax = 0; 3366 *ebx = 0; 3367 *ecx = 0; 3368 *edx = 0; 3369 } 3370 break; 3371 case 9: 3372 /* Direct Cache Access Information Leaf */ 3373 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 3374 *ebx = 0; 3375 *ecx = 0; 3376 *edx = 0; 3377 break; 3378 case 0xA: 3379 /* Architectural Performance Monitoring Leaf */ 3380 if (kvm_enabled() && cpu->enable_pmu) { 3381 KVMState *s = cs->kvm_state; 3382 3383 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 3384 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 3385 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 3386 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 3387 } else if (hvf_enabled() && cpu->enable_pmu) { 3388 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 3389 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 3390 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 3391 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 3392 } else { 3393 *eax = 0; 3394 *ebx = 0; 3395 *ecx = 0; 3396 *edx = 0; 3397 } 3398 break; 3399 case 0xB: 3400 /* Extended Topology Enumeration Leaf */ 3401 if (!cpu->enable_cpuid_0xb) { 3402 *eax = *ebx = *ecx = *edx = 0; 3403 break; 3404 } 3405 3406 *ecx = count & 0xff; 3407 *edx = cpu->apic_id; 3408 3409 switch (count) { 3410 case 0: 3411 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads); 3412 *ebx = cs->nr_threads; 3413 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 3414 break; 3415 case 1: 3416 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 3417 *ebx = cs->nr_cores * cs->nr_threads; 3418 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 3419 break; 3420 default: 3421 *eax = 0; 3422 *ebx = 0; 3423 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 3424 } 3425 3426 assert(!(*eax & ~0x1f)); 3427 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 3428 break; 3429 case 0xD: { 3430 /* Processor Extended State */ 3431 *eax = 0; 3432 *ebx = 0; 3433 *ecx = 0; 3434 *edx = 0; 3435 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 3436 break; 3437 } 3438 3439 if (count == 0) { 3440 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 3441 *eax = env->features[FEAT_XSAVE_COMP_LO]; 3442 *edx = env->features[FEAT_XSAVE_COMP_HI]; 3443 *ebx = *ecx; 3444 } else if (count == 1) { 3445 *eax = env->features[FEAT_XSAVE]; 3446 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 3447 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 3448 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 3449 *eax = esa->size; 3450 *ebx = esa->offset; 3451 } 3452 } 3453 break; 3454 } 3455 case 0x40000000: 3456 /* 3457 * CPUID code in kvm_arch_init_vcpu() ignores stuff 3458 * set here, but we restrict to TCG none the less. 3459 */ 3460 if (tcg_enabled() && cpu->expose_tcg) { 3461 memcpy(signature, "TCGTCGTCGTCG", 12); 3462 *eax = 0x40000001; 3463 *ebx = signature[0]; 3464 *ecx = signature[1]; 3465 *edx = signature[2]; 3466 } else { 3467 *eax = 0; 3468 *ebx = 0; 3469 *ecx = 0; 3470 *edx = 0; 3471 } 3472 break; 3473 case 0x40000001: 3474 *eax = 0; 3475 *ebx = 0; 3476 *ecx = 0; 3477 *edx = 0; 3478 break; 3479 case 0x80000000: 3480 *eax = env->cpuid_xlevel; 3481 *ebx = env->cpuid_vendor1; 3482 *edx = env->cpuid_vendor2; 3483 *ecx = env->cpuid_vendor3; 3484 break; 3485 case 0x80000001: 3486 *eax = env->cpuid_version; 3487 *ebx = 0; 3488 *ecx = env->features[FEAT_8000_0001_ECX]; 3489 *edx = env->features[FEAT_8000_0001_EDX]; 3490 3491 /* The Linux kernel checks for the CMPLegacy bit and 3492 * discards multiple thread information if it is set. 3493 * So don't set it here for Intel to make Linux guests happy. 3494 */ 3495 if (cs->nr_cores * cs->nr_threads > 1) { 3496 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 3497 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 3498 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 3499 *ecx |= 1 << 1; /* CmpLegacy bit */ 3500 } 3501 } 3502 break; 3503 case 0x80000002: 3504 case 0x80000003: 3505 case 0x80000004: 3506 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 3507 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 3508 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 3509 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 3510 break; 3511 case 0x80000005: 3512 /* cache info (L1 cache) */ 3513 if (cpu->cache_info_passthrough) { 3514 host_cpuid(index, 0, eax, ebx, ecx, edx); 3515 break; 3516 } 3517 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ 3518 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 3519 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ 3520 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 3521 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \ 3522 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE); 3523 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \ 3524 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE); 3525 break; 3526 case 0x80000006: 3527 /* cache info (L2 cache) */ 3528 if (cpu->cache_info_passthrough) { 3529 host_cpuid(index, 0, eax, ebx, ecx, edx); 3530 break; 3531 } 3532 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ 3533 (L2_DTLB_2M_ENTRIES << 16) | \ 3534 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ 3535 (L2_ITLB_2M_ENTRIES); 3536 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ 3537 (L2_DTLB_4K_ENTRIES << 16) | \ 3538 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ 3539 (L2_ITLB_4K_ENTRIES); 3540 *ecx = (L2_SIZE_KB_AMD << 16) | \ 3541 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \ 3542 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE); 3543 if (!cpu->enable_l3_cache) { 3544 *edx = ((L3_SIZE_KB / 512) << 18) | \ 3545 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \ 3546 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE); 3547 } else { 3548 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \ 3549 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \ 3550 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE); 3551 } 3552 break; 3553 case 0x80000007: 3554 *eax = 0; 3555 *ebx = 0; 3556 *ecx = 0; 3557 *edx = env->features[FEAT_8000_0007_EDX]; 3558 break; 3559 case 0x80000008: 3560 /* virtual & phys address size in low 2 bytes. */ 3561 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 3562 /* 64 bit processor */ 3563 *eax = cpu->phys_bits; /* configurable physical bits */ 3564 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 3565 *eax |= 0x00003900; /* 57 bits virtual */ 3566 } else { 3567 *eax |= 0x00003000; /* 48 bits virtual */ 3568 } 3569 } else { 3570 *eax = cpu->phys_bits; 3571 } 3572 *ebx = env->features[FEAT_8000_0008_EBX]; 3573 *ecx = 0; 3574 *edx = 0; 3575 if (cs->nr_cores * cs->nr_threads > 1) { 3576 *ecx |= (cs->nr_cores * cs->nr_threads) - 1; 3577 } 3578 break; 3579 case 0x8000000A: 3580 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 3581 *eax = 0x00000001; /* SVM Revision */ 3582 *ebx = 0x00000010; /* nr of ASIDs */ 3583 *ecx = 0; 3584 *edx = env->features[FEAT_SVM]; /* optional features */ 3585 } else { 3586 *eax = 0; 3587 *ebx = 0; 3588 *ecx = 0; 3589 *edx = 0; 3590 } 3591 break; 3592 case 0xC0000000: 3593 *eax = env->cpuid_xlevel2; 3594 *ebx = 0; 3595 *ecx = 0; 3596 *edx = 0; 3597 break; 3598 case 0xC0000001: 3599 /* Support for VIA CPU's CPUID instruction */ 3600 *eax = env->cpuid_version; 3601 *ebx = 0; 3602 *ecx = 0; 3603 *edx = env->features[FEAT_C000_0001_EDX]; 3604 break; 3605 case 0xC0000002: 3606 case 0xC0000003: 3607 case 0xC0000004: 3608 /* Reserved for the future, and now filled with zero */ 3609 *eax = 0; 3610 *ebx = 0; 3611 *ecx = 0; 3612 *edx = 0; 3613 break; 3614 default: 3615 /* reserved values: zero */ 3616 *eax = 0; 3617 *ebx = 0; 3618 *ecx = 0; 3619 *edx = 0; 3620 break; 3621 } 3622 } 3623 3624 /* CPUClass::reset() */ 3625 static void x86_cpu_reset(CPUState *s) 3626 { 3627 X86CPU *cpu = X86_CPU(s); 3628 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 3629 CPUX86State *env = &cpu->env; 3630 target_ulong cr4; 3631 uint64_t xcr0; 3632 int i; 3633 3634 xcc->parent_reset(s); 3635 3636 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 3637 3638 env->old_exception = -1; 3639 3640 /* init to reset state */ 3641 3642 env->hflags2 |= HF2_GIF_MASK; 3643 3644 cpu_x86_update_cr0(env, 0x60000010); 3645 env->a20_mask = ~0x0; 3646 env->smbase = 0x30000; 3647 3648 env->idt.limit = 0xffff; 3649 env->gdt.limit = 0xffff; 3650 env->ldt.limit = 0xffff; 3651 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 3652 env->tr.limit = 0xffff; 3653 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 3654 3655 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 3656 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 3657 DESC_R_MASK | DESC_A_MASK); 3658 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 3659 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3660 DESC_A_MASK); 3661 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 3662 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3663 DESC_A_MASK); 3664 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 3665 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3666 DESC_A_MASK); 3667 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 3668 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3669 DESC_A_MASK); 3670 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 3671 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3672 DESC_A_MASK); 3673 3674 env->eip = 0xfff0; 3675 env->regs[R_EDX] = env->cpuid_version; 3676 3677 env->eflags = 0x2; 3678 3679 /* FPU init */ 3680 for (i = 0; i < 8; i++) { 3681 env->fptags[i] = 1; 3682 } 3683 cpu_set_fpuc(env, 0x37f); 3684 3685 env->mxcsr = 0x1f80; 3686 /* All units are in INIT state. */ 3687 env->xstate_bv = 0; 3688 3689 env->pat = 0x0007040600070406ULL; 3690 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 3691 3692 memset(env->dr, 0, sizeof(env->dr)); 3693 env->dr[6] = DR6_FIXED_1; 3694 env->dr[7] = DR7_FIXED_1; 3695 cpu_breakpoint_remove_all(s, BP_CPU); 3696 cpu_watchpoint_remove_all(s, BP_CPU); 3697 3698 cr4 = 0; 3699 xcr0 = XSTATE_FP_MASK; 3700 3701 #ifdef CONFIG_USER_ONLY 3702 /* Enable all the features for user-mode. */ 3703 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 3704 xcr0 |= XSTATE_SSE_MASK; 3705 } 3706 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 3707 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 3708 if (env->features[esa->feature] & esa->bits) { 3709 xcr0 |= 1ull << i; 3710 } 3711 } 3712 3713 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 3714 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 3715 } 3716 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 3717 cr4 |= CR4_FSGSBASE_MASK; 3718 } 3719 #endif 3720 3721 env->xcr0 = xcr0; 3722 cpu_x86_update_cr4(env, cr4); 3723 3724 /* 3725 * SDM 11.11.5 requires: 3726 * - IA32_MTRR_DEF_TYPE MSR.E = 0 3727 * - IA32_MTRR_PHYSMASKn.V = 0 3728 * All other bits are undefined. For simplification, zero it all. 3729 */ 3730 env->mtrr_deftype = 0; 3731 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 3732 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 3733 3734 env->interrupt_injected = -1; 3735 env->exception_injected = -1; 3736 env->nmi_injected = false; 3737 #if !defined(CONFIG_USER_ONLY) 3738 /* We hard-wire the BSP to the first CPU. */ 3739 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 3740 3741 s->halted = !cpu_is_bsp(cpu); 3742 3743 if (kvm_enabled()) { 3744 kvm_arch_reset_vcpu(cpu); 3745 } 3746 else if (hvf_enabled()) { 3747 hvf_reset_vcpu(s); 3748 } 3749 #endif 3750 } 3751 3752 #ifndef CONFIG_USER_ONLY 3753 bool cpu_is_bsp(X86CPU *cpu) 3754 { 3755 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 3756 } 3757 3758 /* TODO: remove me, when reset over QOM tree is implemented */ 3759 static void x86_cpu_machine_reset_cb(void *opaque) 3760 { 3761 X86CPU *cpu = opaque; 3762 cpu_reset(CPU(cpu)); 3763 } 3764 #endif 3765 3766 static void mce_init(X86CPU *cpu) 3767 { 3768 CPUX86State *cenv = &cpu->env; 3769 unsigned int bank; 3770 3771 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 3772 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 3773 (CPUID_MCE | CPUID_MCA)) { 3774 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 3775 (cpu->enable_lmce ? MCG_LMCE_P : 0); 3776 cenv->mcg_ctl = ~(uint64_t)0; 3777 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 3778 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 3779 } 3780 } 3781 } 3782 3783 #ifndef CONFIG_USER_ONLY 3784 APICCommonClass *apic_get_class(void) 3785 { 3786 const char *apic_type = "apic"; 3787 3788 /* TODO: in-kernel irqchip for hvf */ 3789 if (kvm_apic_in_kernel()) { 3790 apic_type = "kvm-apic"; 3791 } else if (xen_enabled()) { 3792 apic_type = "xen-apic"; 3793 } 3794 3795 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 3796 } 3797 3798 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 3799 { 3800 APICCommonState *apic; 3801 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 3802 3803 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class))); 3804 3805 object_property_add_child(OBJECT(cpu), "lapic", 3806 OBJECT(cpu->apic_state), &error_abort); 3807 object_unref(OBJECT(cpu->apic_state)); 3808 3809 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 3810 /* TODO: convert to link<> */ 3811 apic = APIC_COMMON(cpu->apic_state); 3812 apic->cpu = cpu; 3813 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 3814 } 3815 3816 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 3817 { 3818 APICCommonState *apic; 3819 static bool apic_mmio_map_once; 3820 3821 if (cpu->apic_state == NULL) { 3822 return; 3823 } 3824 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized", 3825 errp); 3826 3827 /* Map APIC MMIO area */ 3828 apic = APIC_COMMON(cpu->apic_state); 3829 if (!apic_mmio_map_once) { 3830 memory_region_add_subregion_overlap(get_system_memory(), 3831 apic->apicbase & 3832 MSR_IA32_APICBASE_BASE, 3833 &apic->io_memory, 3834 0x1000); 3835 apic_mmio_map_once = true; 3836 } 3837 } 3838 3839 static void x86_cpu_machine_done(Notifier *n, void *unused) 3840 { 3841 X86CPU *cpu = container_of(n, X86CPU, machine_done); 3842 MemoryRegion *smram = 3843 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 3844 3845 if (smram) { 3846 cpu->smram = g_new(MemoryRegion, 1); 3847 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 3848 smram, 0, 1ull << 32); 3849 memory_region_set_enabled(cpu->smram, true); 3850 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 3851 } 3852 } 3853 #else 3854 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 3855 { 3856 } 3857 #endif 3858 3859 /* Note: Only safe for use on x86(-64) hosts */ 3860 static uint32_t x86_host_phys_bits(void) 3861 { 3862 uint32_t eax; 3863 uint32_t host_phys_bits; 3864 3865 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 3866 if (eax >= 0x80000008) { 3867 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 3868 /* Note: According to AMD doc 25481 rev 2.34 they have a field 3869 * at 23:16 that can specify a maximum physical address bits for 3870 * the guest that can override this value; but I've not seen 3871 * anything with that set. 3872 */ 3873 host_phys_bits = eax & 0xff; 3874 } else { 3875 /* It's an odd 64 bit machine that doesn't have the leaf for 3876 * physical address bits; fall back to 36 that's most older 3877 * Intel. 3878 */ 3879 host_phys_bits = 36; 3880 } 3881 3882 return host_phys_bits; 3883 } 3884 3885 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 3886 { 3887 if (*min < value) { 3888 *min = value; 3889 } 3890 } 3891 3892 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 3893 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 3894 { 3895 CPUX86State *env = &cpu->env; 3896 FeatureWordInfo *fi = &feature_word_info[w]; 3897 uint32_t eax = fi->cpuid_eax; 3898 uint32_t region = eax & 0xF0000000; 3899 3900 if (!env->features[w]) { 3901 return; 3902 } 3903 3904 switch (region) { 3905 case 0x00000000: 3906 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 3907 break; 3908 case 0x80000000: 3909 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 3910 break; 3911 case 0xC0000000: 3912 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 3913 break; 3914 } 3915 } 3916 3917 /* Calculate XSAVE components based on the configured CPU feature flags */ 3918 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 3919 { 3920 CPUX86State *env = &cpu->env; 3921 int i; 3922 uint64_t mask; 3923 3924 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 3925 return; 3926 } 3927 3928 mask = 0; 3929 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 3930 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 3931 if (env->features[esa->feature] & esa->bits) { 3932 mask |= (1ULL << i); 3933 } 3934 } 3935 3936 env->features[FEAT_XSAVE_COMP_LO] = mask; 3937 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 3938 } 3939 3940 /***** Steps involved on loading and filtering CPUID data 3941 * 3942 * When initializing and realizing a CPU object, the steps 3943 * involved in setting up CPUID data are: 3944 * 3945 * 1) Loading CPU model definition (X86CPUDefinition). This is 3946 * implemented by x86_cpu_load_def() and should be completely 3947 * transparent, as it is done automatically by instance_init. 3948 * No code should need to look at X86CPUDefinition structs 3949 * outside instance_init. 3950 * 3951 * 2) CPU expansion. This is done by realize before CPUID 3952 * filtering, and will make sure host/accelerator data is 3953 * loaded for CPU models that depend on host capabilities 3954 * (e.g. "host"). Done by x86_cpu_expand_features(). 3955 * 3956 * 3) CPUID filtering. This initializes extra data related to 3957 * CPUID, and checks if the host supports all capabilities 3958 * required by the CPU. Runnability of a CPU model is 3959 * determined at this step. Done by x86_cpu_filter_features(). 3960 * 3961 * Some operations don't require all steps to be performed. 3962 * More precisely: 3963 * 3964 * - CPU instance creation (instance_init) will run only CPU 3965 * model loading. CPU expansion can't run at instance_init-time 3966 * because host/accelerator data may be not available yet. 3967 * - CPU realization will perform both CPU model expansion and CPUID 3968 * filtering, and return an error in case one of them fails. 3969 * - query-cpu-definitions needs to run all 3 steps. It needs 3970 * to run CPUID filtering, as the 'unavailable-features' 3971 * field is set based on the filtering results. 3972 * - The query-cpu-model-expansion QMP command only needs to run 3973 * CPU model loading and CPU expansion. It should not filter 3974 * any CPUID data based on host capabilities. 3975 */ 3976 3977 /* Expand CPU configuration data, based on configured features 3978 * and host/accelerator capabilities when appropriate. 3979 */ 3980 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 3981 { 3982 CPUX86State *env = &cpu->env; 3983 FeatureWord w; 3984 GList *l; 3985 Error *local_err = NULL; 3986 3987 /*TODO: Now cpu->max_features doesn't overwrite features 3988 * set using QOM properties, and we can convert 3989 * plus_features & minus_features to global properties 3990 * inside x86_cpu_parse_featurestr() too. 3991 */ 3992 if (cpu->max_features) { 3993 for (w = 0; w < FEATURE_WORDS; w++) { 3994 /* Override only features that weren't set explicitly 3995 * by the user. 3996 */ 3997 env->features[w] |= 3998 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 3999 ~env->user_features[w]; 4000 } 4001 } 4002 4003 for (l = plus_features; l; l = l->next) { 4004 const char *prop = l->data; 4005 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 4006 if (local_err) { 4007 goto out; 4008 } 4009 } 4010 4011 for (l = minus_features; l; l = l->next) { 4012 const char *prop = l->data; 4013 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 4014 if (local_err) { 4015 goto out; 4016 } 4017 } 4018 4019 if (!kvm_enabled() || !cpu->expose_kvm) { 4020 env->features[FEAT_KVM] = 0; 4021 } 4022 4023 x86_cpu_enable_xsave_components(cpu); 4024 4025 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 4026 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 4027 if (cpu->full_cpuid_auto_level) { 4028 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 4029 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 4030 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 4031 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 4032 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 4033 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 4034 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 4035 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 4036 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 4037 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 4038 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 4039 /* SVM requires CPUID[0x8000000A] */ 4040 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 4041 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 4042 } 4043 } 4044 4045 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 4046 if (env->cpuid_level == UINT32_MAX) { 4047 env->cpuid_level = env->cpuid_min_level; 4048 } 4049 if (env->cpuid_xlevel == UINT32_MAX) { 4050 env->cpuid_xlevel = env->cpuid_min_xlevel; 4051 } 4052 if (env->cpuid_xlevel2 == UINT32_MAX) { 4053 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 4054 } 4055 4056 out: 4057 if (local_err != NULL) { 4058 error_propagate(errp, local_err); 4059 } 4060 } 4061 4062 /* 4063 * Finishes initialization of CPUID data, filters CPU feature 4064 * words based on host availability of each feature. 4065 * 4066 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 4067 */ 4068 static int x86_cpu_filter_features(X86CPU *cpu) 4069 { 4070 CPUX86State *env = &cpu->env; 4071 FeatureWord w; 4072 int rv = 0; 4073 4074 for (w = 0; w < FEATURE_WORDS; w++) { 4075 uint32_t host_feat = 4076 x86_cpu_get_supported_feature_word(w, false); 4077 uint32_t requested_features = env->features[w]; 4078 env->features[w] &= host_feat; 4079 cpu->filtered_features[w] = requested_features & ~env->features[w]; 4080 if (cpu->filtered_features[w]) { 4081 rv = 1; 4082 } 4083 } 4084 4085 return rv; 4086 } 4087 4088 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ 4089 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ 4090 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) 4091 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ 4092 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ 4093 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) 4094 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 4095 { 4096 CPUState *cs = CPU(dev); 4097 X86CPU *cpu = X86_CPU(dev); 4098 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 4099 CPUX86State *env = &cpu->env; 4100 Error *local_err = NULL; 4101 static bool ht_warned; 4102 4103 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 4104 char *name = x86_cpu_class_get_model_name(xcc); 4105 error_setg(&local_err, "CPU model '%s' requires KVM", name); 4106 g_free(name); 4107 goto out; 4108 } 4109 4110 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 4111 error_setg(errp, "apic-id property was not initialized properly"); 4112 return; 4113 } 4114 4115 x86_cpu_expand_features(cpu, &local_err); 4116 if (local_err) { 4117 goto out; 4118 } 4119 4120 if (x86_cpu_filter_features(cpu) && 4121 (cpu->check_cpuid || cpu->enforce_cpuid)) { 4122 x86_cpu_report_filtered_features(cpu); 4123 if (cpu->enforce_cpuid) { 4124 error_setg(&local_err, 4125 accel_uses_host_cpuid() ? 4126 "Host doesn't support requested features" : 4127 "TCG doesn't support requested features"); 4128 goto out; 4129 } 4130 } 4131 4132 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 4133 * CPUID[1].EDX. 4134 */ 4135 if (IS_AMD_CPU(env)) { 4136 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 4137 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 4138 & CPUID_EXT2_AMD_ALIASES); 4139 } 4140 4141 /* For 64bit systems think about the number of physical bits to present. 4142 * ideally this should be the same as the host; anything other than matching 4143 * the host can cause incorrect guest behaviour. 4144 * QEMU used to pick the magic value of 40 bits that corresponds to 4145 * consumer AMD devices but nothing else. 4146 */ 4147 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 4148 if (accel_uses_host_cpuid()) { 4149 uint32_t host_phys_bits = x86_host_phys_bits(); 4150 static bool warned; 4151 4152 if (cpu->host_phys_bits) { 4153 /* The user asked for us to use the host physical bits */ 4154 cpu->phys_bits = host_phys_bits; 4155 } 4156 4157 /* Print a warning if the user set it to a value that's not the 4158 * host value. 4159 */ 4160 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 4161 !warned) { 4162 warn_report("Host physical bits (%u)" 4163 " does not match phys-bits property (%u)", 4164 host_phys_bits, cpu->phys_bits); 4165 warned = true; 4166 } 4167 4168 if (cpu->phys_bits && 4169 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 4170 cpu->phys_bits < 32)) { 4171 error_setg(errp, "phys-bits should be between 32 and %u " 4172 " (but is %u)", 4173 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 4174 return; 4175 } 4176 } else { 4177 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 4178 error_setg(errp, "TCG only supports phys-bits=%u", 4179 TCG_PHYS_ADDR_BITS); 4180 return; 4181 } 4182 } 4183 /* 0 means it was not explicitly set by the user (or by machine 4184 * compat_props or by the host code above). In this case, the default 4185 * is the value used by TCG (40). 4186 */ 4187 if (cpu->phys_bits == 0) { 4188 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 4189 } 4190 } else { 4191 /* For 32 bit systems don't use the user set value, but keep 4192 * phys_bits consistent with what we tell the guest. 4193 */ 4194 if (cpu->phys_bits != 0) { 4195 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 4196 return; 4197 } 4198 4199 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 4200 cpu->phys_bits = 36; 4201 } else { 4202 cpu->phys_bits = 32; 4203 } 4204 } 4205 cpu_exec_realizefn(cs, &local_err); 4206 if (local_err != NULL) { 4207 error_propagate(errp, local_err); 4208 return; 4209 } 4210 4211 #ifndef CONFIG_USER_ONLY 4212 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 4213 4214 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) { 4215 x86_cpu_apic_create(cpu, &local_err); 4216 if (local_err != NULL) { 4217 goto out; 4218 } 4219 } 4220 #endif 4221 4222 mce_init(cpu); 4223 4224 #ifndef CONFIG_USER_ONLY 4225 if (tcg_enabled()) { 4226 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 4227 cpu->cpu_as_root = g_new(MemoryRegion, 1); 4228 4229 /* Outer container... */ 4230 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 4231 memory_region_set_enabled(cpu->cpu_as_root, true); 4232 4233 /* ... with two regions inside: normal system memory with low 4234 * priority, and... 4235 */ 4236 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 4237 get_system_memory(), 0, ~0ull); 4238 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 4239 memory_region_set_enabled(cpu->cpu_as_mem, true); 4240 4241 cs->num_ases = 2; 4242 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 4243 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 4244 4245 /* ... SMRAM with higher priority, linked from /machine/smram. */ 4246 cpu->machine_done.notify = x86_cpu_machine_done; 4247 qemu_add_machine_init_done_notifier(&cpu->machine_done); 4248 } 4249 #endif 4250 4251 qemu_init_vcpu(cs); 4252 4253 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this 4254 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 4255 * based on inputs (sockets,cores,threads), it is still better to gives 4256 * users a warning. 4257 * 4258 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 4259 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 4260 */ 4261 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) { 4262 error_report("AMD CPU doesn't support hyperthreading. Please configure" 4263 " -smp options properly."); 4264 ht_warned = true; 4265 } 4266 4267 x86_cpu_apic_realize(cpu, &local_err); 4268 if (local_err != NULL) { 4269 goto out; 4270 } 4271 cpu_reset(cs); 4272 4273 xcc->parent_realize(dev, &local_err); 4274 4275 out: 4276 if (local_err != NULL) { 4277 error_propagate(errp, local_err); 4278 return; 4279 } 4280 } 4281 4282 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) 4283 { 4284 X86CPU *cpu = X86_CPU(dev); 4285 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 4286 Error *local_err = NULL; 4287 4288 #ifndef CONFIG_USER_ONLY 4289 cpu_remove_sync(CPU(dev)); 4290 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 4291 #endif 4292 4293 if (cpu->apic_state) { 4294 object_unparent(OBJECT(cpu->apic_state)); 4295 cpu->apic_state = NULL; 4296 } 4297 4298 xcc->parent_unrealize(dev, &local_err); 4299 if (local_err != NULL) { 4300 error_propagate(errp, local_err); 4301 return; 4302 } 4303 } 4304 4305 typedef struct BitProperty { 4306 FeatureWord w; 4307 uint32_t mask; 4308 } BitProperty; 4309 4310 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 4311 void *opaque, Error **errp) 4312 { 4313 X86CPU *cpu = X86_CPU(obj); 4314 BitProperty *fp = opaque; 4315 uint32_t f = cpu->env.features[fp->w]; 4316 bool value = (f & fp->mask) == fp->mask; 4317 visit_type_bool(v, name, &value, errp); 4318 } 4319 4320 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 4321 void *opaque, Error **errp) 4322 { 4323 DeviceState *dev = DEVICE(obj); 4324 X86CPU *cpu = X86_CPU(obj); 4325 BitProperty *fp = opaque; 4326 Error *local_err = NULL; 4327 bool value; 4328 4329 if (dev->realized) { 4330 qdev_prop_set_after_realize(dev, name, errp); 4331 return; 4332 } 4333 4334 visit_type_bool(v, name, &value, &local_err); 4335 if (local_err) { 4336 error_propagate(errp, local_err); 4337 return; 4338 } 4339 4340 if (value) { 4341 cpu->env.features[fp->w] |= fp->mask; 4342 } else { 4343 cpu->env.features[fp->w] &= ~fp->mask; 4344 } 4345 cpu->env.user_features[fp->w] |= fp->mask; 4346 } 4347 4348 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 4349 void *opaque) 4350 { 4351 BitProperty *prop = opaque; 4352 g_free(prop); 4353 } 4354 4355 /* Register a boolean property to get/set a single bit in a uint32_t field. 4356 * 4357 * The same property name can be registered multiple times to make it affect 4358 * multiple bits in the same FeatureWord. In that case, the getter will return 4359 * true only if all bits are set. 4360 */ 4361 static void x86_cpu_register_bit_prop(X86CPU *cpu, 4362 const char *prop_name, 4363 FeatureWord w, 4364 int bitnr) 4365 { 4366 BitProperty *fp; 4367 ObjectProperty *op; 4368 uint32_t mask = (1UL << bitnr); 4369 4370 op = object_property_find(OBJECT(cpu), prop_name, NULL); 4371 if (op) { 4372 fp = op->opaque; 4373 assert(fp->w == w); 4374 fp->mask |= mask; 4375 } else { 4376 fp = g_new0(BitProperty, 1); 4377 fp->w = w; 4378 fp->mask = mask; 4379 object_property_add(OBJECT(cpu), prop_name, "bool", 4380 x86_cpu_get_bit_prop, 4381 x86_cpu_set_bit_prop, 4382 x86_cpu_release_bit_prop, fp, &error_abort); 4383 } 4384 } 4385 4386 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 4387 FeatureWord w, 4388 int bitnr) 4389 { 4390 FeatureWordInfo *fi = &feature_word_info[w]; 4391 const char *name = fi->feat_names[bitnr]; 4392 4393 if (!name) { 4394 return; 4395 } 4396 4397 /* Property names should use "-" instead of "_". 4398 * Old names containing underscores are registered as aliases 4399 * using object_property_add_alias() 4400 */ 4401 assert(!strchr(name, '_')); 4402 /* aliases don't use "|" delimiters anymore, they are registered 4403 * manually using object_property_add_alias() */ 4404 assert(!strchr(name, '|')); 4405 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 4406 } 4407 4408 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 4409 { 4410 X86CPU *cpu = X86_CPU(cs); 4411 CPUX86State *env = &cpu->env; 4412 GuestPanicInformation *panic_info = NULL; 4413 4414 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 4415 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 4416 4417 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 4418 4419 assert(HV_CRASH_PARAMS >= 5); 4420 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 4421 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 4422 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 4423 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 4424 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 4425 } 4426 4427 return panic_info; 4428 } 4429 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 4430 const char *name, void *opaque, 4431 Error **errp) 4432 { 4433 CPUState *cs = CPU(obj); 4434 GuestPanicInformation *panic_info; 4435 4436 if (!cs->crash_occurred) { 4437 error_setg(errp, "No crash occured"); 4438 return; 4439 } 4440 4441 panic_info = x86_cpu_get_crash_info(cs); 4442 if (panic_info == NULL) { 4443 error_setg(errp, "No crash information"); 4444 return; 4445 } 4446 4447 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 4448 errp); 4449 qapi_free_GuestPanicInformation(panic_info); 4450 } 4451 4452 static void x86_cpu_initfn(Object *obj) 4453 { 4454 CPUState *cs = CPU(obj); 4455 X86CPU *cpu = X86_CPU(obj); 4456 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 4457 CPUX86State *env = &cpu->env; 4458 FeatureWord w; 4459 4460 cs->env_ptr = env; 4461 4462 object_property_add(obj, "family", "int", 4463 x86_cpuid_version_get_family, 4464 x86_cpuid_version_set_family, NULL, NULL, NULL); 4465 object_property_add(obj, "model", "int", 4466 x86_cpuid_version_get_model, 4467 x86_cpuid_version_set_model, NULL, NULL, NULL); 4468 object_property_add(obj, "stepping", "int", 4469 x86_cpuid_version_get_stepping, 4470 x86_cpuid_version_set_stepping, NULL, NULL, NULL); 4471 object_property_add_str(obj, "vendor", 4472 x86_cpuid_get_vendor, 4473 x86_cpuid_set_vendor, NULL); 4474 object_property_add_str(obj, "model-id", 4475 x86_cpuid_get_model_id, 4476 x86_cpuid_set_model_id, NULL); 4477 object_property_add(obj, "tsc-frequency", "int", 4478 x86_cpuid_get_tsc_freq, 4479 x86_cpuid_set_tsc_freq, NULL, NULL, NULL); 4480 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 4481 x86_cpu_get_feature_words, 4482 NULL, NULL, (void *)env->features, NULL); 4483 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 4484 x86_cpu_get_feature_words, 4485 NULL, NULL, (void *)cpu->filtered_features, NULL); 4486 4487 object_property_add(obj, "crash-information", "GuestPanicInformation", 4488 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); 4489 4490 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY; 4491 4492 for (w = 0; w < FEATURE_WORDS; w++) { 4493 int bitnr; 4494 4495 for (bitnr = 0; bitnr < 32; bitnr++) { 4496 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 4497 } 4498 } 4499 4500 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort); 4501 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort); 4502 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort); 4503 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort); 4504 object_property_add_alias(obj, "xd", obj, "nx", &error_abort); 4505 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort); 4506 object_property_add_alias(obj, "i64", obj, "lm", &error_abort); 4507 4508 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort); 4509 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort); 4510 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort); 4511 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort); 4512 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort); 4513 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort); 4514 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort); 4515 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort); 4516 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort); 4517 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort); 4518 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort); 4519 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort); 4520 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort); 4521 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort); 4522 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort); 4523 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort); 4524 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort); 4525 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort); 4526 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort); 4527 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort); 4528 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort); 4529 4530 if (xcc->cpu_def) { 4531 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort); 4532 } 4533 } 4534 4535 static int64_t x86_cpu_get_arch_id(CPUState *cs) 4536 { 4537 X86CPU *cpu = X86_CPU(cs); 4538 4539 return cpu->apic_id; 4540 } 4541 4542 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 4543 { 4544 X86CPU *cpu = X86_CPU(cs); 4545 4546 return cpu->env.cr[0] & CR0_PG_MASK; 4547 } 4548 4549 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 4550 { 4551 X86CPU *cpu = X86_CPU(cs); 4552 4553 cpu->env.eip = value; 4554 } 4555 4556 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 4557 { 4558 X86CPU *cpu = X86_CPU(cs); 4559 4560 cpu->env.eip = tb->pc - tb->cs_base; 4561 } 4562 4563 static bool x86_cpu_has_work(CPUState *cs) 4564 { 4565 X86CPU *cpu = X86_CPU(cs); 4566 CPUX86State *env = &cpu->env; 4567 4568 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD | 4569 CPU_INTERRUPT_POLL)) && 4570 (env->eflags & IF_MASK)) || 4571 (cs->interrupt_request & (CPU_INTERRUPT_NMI | 4572 CPU_INTERRUPT_INIT | 4573 CPU_INTERRUPT_SIPI | 4574 CPU_INTERRUPT_MCE)) || 4575 ((cs->interrupt_request & CPU_INTERRUPT_SMI) && 4576 !(env->hflags & HF_SMM_MASK)); 4577 } 4578 4579 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 4580 { 4581 X86CPU *cpu = X86_CPU(cs); 4582 CPUX86State *env = &cpu->env; 4583 4584 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 4585 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 4586 : bfd_mach_i386_i8086); 4587 info->print_insn = print_insn_i386; 4588 4589 info->cap_arch = CS_ARCH_X86; 4590 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 4591 : env->hflags & HF_CS32_MASK ? CS_MODE_32 4592 : CS_MODE_16); 4593 info->cap_insn_unit = 1; 4594 info->cap_insn_split = 8; 4595 } 4596 4597 void x86_update_hflags(CPUX86State *env) 4598 { 4599 uint32_t hflags; 4600 #define HFLAG_COPY_MASK \ 4601 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 4602 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 4603 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 4604 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 4605 4606 hflags = env->hflags & HFLAG_COPY_MASK; 4607 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 4608 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 4609 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 4610 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 4611 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 4612 4613 if (env->cr[4] & CR4_OSFXSR_MASK) { 4614 hflags |= HF_OSFXSR_MASK; 4615 } 4616 4617 if (env->efer & MSR_EFER_LMA) { 4618 hflags |= HF_LMA_MASK; 4619 } 4620 4621 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 4622 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 4623 } else { 4624 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 4625 (DESC_B_SHIFT - HF_CS32_SHIFT); 4626 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 4627 (DESC_B_SHIFT - HF_SS32_SHIFT); 4628 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 4629 !(hflags & HF_CS32_MASK)) { 4630 hflags |= HF_ADDSEG_MASK; 4631 } else { 4632 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 4633 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 4634 } 4635 } 4636 env->hflags = hflags; 4637 } 4638 4639 static Property x86_cpu_properties[] = { 4640 #ifdef CONFIG_USER_ONLY 4641 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 4642 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 4643 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 4644 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 4645 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 4646 #else 4647 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 4648 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 4649 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 4650 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 4651 #endif 4652 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 4653 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 4654 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks }, 4655 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false), 4656 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false), 4657 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false), 4658 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false), 4659 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false), 4660 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false), 4661 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false), 4662 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false), 4663 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false), 4664 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 4665 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 4666 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 4667 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 4668 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 4669 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 4670 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 4671 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 4672 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 4673 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 4674 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 4675 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 4676 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 4677 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 4678 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 4679 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 4680 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 4681 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 4682 false), 4683 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 4684 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 4685 4686 /* 4687 * From "Requirements for Implementing the Microsoft 4688 * Hypervisor Interface": 4689 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 4690 * 4691 * "Starting with Windows Server 2012 and Windows 8, if 4692 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 4693 * the hypervisor imposes no specific limit to the number of VPs. 4694 * In this case, Windows Server 2012 guest VMs may use more than 4695 * 64 VPs, up to the maximum supported number of processors applicable 4696 * to the specific Windows version being used." 4697 */ 4698 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 4699 DEFINE_PROP_END_OF_LIST() 4700 }; 4701 4702 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 4703 { 4704 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4705 CPUClass *cc = CPU_CLASS(oc); 4706 DeviceClass *dc = DEVICE_CLASS(oc); 4707 4708 xcc->parent_realize = dc->realize; 4709 xcc->parent_unrealize = dc->unrealize; 4710 dc->realize = x86_cpu_realizefn; 4711 dc->unrealize = x86_cpu_unrealizefn; 4712 dc->props = x86_cpu_properties; 4713 4714 xcc->parent_reset = cc->reset; 4715 cc->reset = x86_cpu_reset; 4716 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 4717 4718 cc->class_by_name = x86_cpu_class_by_name; 4719 cc->parse_features = x86_cpu_parse_featurestr; 4720 cc->has_work = x86_cpu_has_work; 4721 #ifdef CONFIG_TCG 4722 cc->do_interrupt = x86_cpu_do_interrupt; 4723 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 4724 #endif 4725 cc->dump_state = x86_cpu_dump_state; 4726 cc->get_crash_info = x86_cpu_get_crash_info; 4727 cc->set_pc = x86_cpu_set_pc; 4728 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 4729 cc->gdb_read_register = x86_cpu_gdb_read_register; 4730 cc->gdb_write_register = x86_cpu_gdb_write_register; 4731 cc->get_arch_id = x86_cpu_get_arch_id; 4732 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 4733 #ifdef CONFIG_USER_ONLY 4734 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault; 4735 #else 4736 cc->asidx_from_attrs = x86_asidx_from_attrs; 4737 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 4738 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; 4739 cc->write_elf64_note = x86_cpu_write_elf64_note; 4740 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 4741 cc->write_elf32_note = x86_cpu_write_elf32_note; 4742 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 4743 cc->vmsd = &vmstate_x86_cpu; 4744 #endif 4745 cc->gdb_arch_name = x86_gdb_arch_name; 4746 #ifdef TARGET_X86_64 4747 cc->gdb_core_xml_file = "i386-64bit.xml"; 4748 cc->gdb_num_core_regs = 57; 4749 #else 4750 cc->gdb_core_xml_file = "i386-32bit.xml"; 4751 cc->gdb_num_core_regs = 41; 4752 #endif 4753 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 4754 cc->debug_excp_handler = breakpoint_handler; 4755 #endif 4756 cc->cpu_exec_enter = x86_cpu_exec_enter; 4757 cc->cpu_exec_exit = x86_cpu_exec_exit; 4758 #ifdef CONFIG_TCG 4759 cc->tcg_initialize = tcg_x86_init; 4760 #endif 4761 cc->disas_set_info = x86_disas_set_info; 4762 4763 dc->user_creatable = true; 4764 } 4765 4766 static const TypeInfo x86_cpu_type_info = { 4767 .name = TYPE_X86_CPU, 4768 .parent = TYPE_CPU, 4769 .instance_size = sizeof(X86CPU), 4770 .instance_init = x86_cpu_initfn, 4771 .abstract = true, 4772 .class_size = sizeof(X86CPUClass), 4773 .class_init = x86_cpu_common_class_init, 4774 }; 4775 4776 4777 /* "base" CPU model, used by query-cpu-model-expansion */ 4778 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 4779 { 4780 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4781 4782 xcc->static_model = true; 4783 xcc->migration_safe = true; 4784 xcc->model_description = "base CPU model type with no features enabled"; 4785 xcc->ordering = 8; 4786 } 4787 4788 static const TypeInfo x86_base_cpu_type_info = { 4789 .name = X86_CPU_TYPE_NAME("base"), 4790 .parent = TYPE_X86_CPU, 4791 .class_init = x86_cpu_base_class_init, 4792 }; 4793 4794 static void x86_cpu_register_types(void) 4795 { 4796 int i; 4797 4798 type_register_static(&x86_cpu_type_info); 4799 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 4800 x86_register_cpudef_type(&builtin_x86_defs[i]); 4801 } 4802 type_register_static(&max_x86_cpu_type_info); 4803 type_register_static(&x86_base_cpu_type_info); 4804 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 4805 type_register_static(&host_x86_cpu_type_info); 4806 #endif 4807 } 4808 4809 type_init(x86_cpu_register_types) 4810