1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/cutils.h" 21 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "sysemu/kvm.h" 25 #include "sysemu/cpus.h" 26 #include "kvm_i386.h" 27 28 #include "qemu/error-report.h" 29 #include "qemu/option.h" 30 #include "qemu/config-file.h" 31 #include "qapi/qmp/qerror.h" 32 #include "qapi/qmp/types.h" 33 34 #include "qapi-types.h" 35 #include "qapi-visit.h" 36 #include "qapi/visitor.h" 37 #include "qom/qom-qobject.h" 38 #include "sysemu/arch_init.h" 39 40 #if defined(CONFIG_KVM) 41 #include <linux/kvm_para.h> 42 #endif 43 44 #include "sysemu/sysemu.h" 45 #include "hw/qdev-properties.h" 46 #include "hw/i386/topology.h" 47 #ifndef CONFIG_USER_ONLY 48 #include "exec/address-spaces.h" 49 #include "hw/hw.h" 50 #include "hw/xen/xen.h" 51 #include "hw/i386/apic_internal.h" 52 #endif 53 54 55 /* Cache topology CPUID constants: */ 56 57 /* CPUID Leaf 2 Descriptors */ 58 59 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c 60 #define CPUID_2_L1I_32KB_8WAY_64B 0x30 61 #define CPUID_2_L2_2MB_8WAY_64B 0x7d 62 #define CPUID_2_L3_16MB_16WAY_64B 0x4d 63 64 65 /* CPUID Leaf 4 constants: */ 66 67 /* EAX: */ 68 #define CPUID_4_TYPE_DCACHE 1 69 #define CPUID_4_TYPE_ICACHE 2 70 #define CPUID_4_TYPE_UNIFIED 3 71 72 #define CPUID_4_LEVEL(l) ((l) << 5) 73 74 #define CPUID_4_SELF_INIT_LEVEL (1 << 8) 75 #define CPUID_4_FULLY_ASSOC (1 << 9) 76 77 /* EDX: */ 78 #define CPUID_4_NO_INVD_SHARING (1 << 0) 79 #define CPUID_4_INCLUSIVE (1 << 1) 80 #define CPUID_4_COMPLEX_IDX (1 << 2) 81 82 #define ASSOC_FULL 0xFF 83 84 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 85 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 86 a == 2 ? 0x2 : \ 87 a == 4 ? 0x4 : \ 88 a == 8 ? 0x6 : \ 89 a == 16 ? 0x8 : \ 90 a == 32 ? 0xA : \ 91 a == 48 ? 0xB : \ 92 a == 64 ? 0xC : \ 93 a == 96 ? 0xD : \ 94 a == 128 ? 0xE : \ 95 a == ASSOC_FULL ? 0xF : \ 96 0 /* invalid value */) 97 98 99 /* Definitions of the hardcoded cache entries we expose: */ 100 101 /* L1 data cache: */ 102 #define L1D_LINE_SIZE 64 103 #define L1D_ASSOCIATIVITY 8 104 #define L1D_SETS 64 105 #define L1D_PARTITIONS 1 106 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */ 107 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B 108 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 109 #define L1D_LINES_PER_TAG 1 110 #define L1D_SIZE_KB_AMD 64 111 #define L1D_ASSOCIATIVITY_AMD 2 112 113 /* L1 instruction cache: */ 114 #define L1I_LINE_SIZE 64 115 #define L1I_ASSOCIATIVITY 8 116 #define L1I_SETS 64 117 #define L1I_PARTITIONS 1 118 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */ 119 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B 120 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 121 #define L1I_LINES_PER_TAG 1 122 #define L1I_SIZE_KB_AMD 64 123 #define L1I_ASSOCIATIVITY_AMD 2 124 125 /* Level 2 unified cache: */ 126 #define L2_LINE_SIZE 64 127 #define L2_ASSOCIATIVITY 16 128 #define L2_SETS 4096 129 #define L2_PARTITIONS 1 130 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */ 131 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 132 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B 133 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 134 #define L2_LINES_PER_TAG 1 135 #define L2_SIZE_KB_AMD 512 136 137 /* Level 3 unified cache: */ 138 #define L3_SIZE_KB 0 /* disabled */ 139 #define L3_ASSOCIATIVITY 0 /* disabled */ 140 #define L3_LINES_PER_TAG 0 /* disabled */ 141 #define L3_LINE_SIZE 0 /* disabled */ 142 #define L3_N_LINE_SIZE 64 143 #define L3_N_ASSOCIATIVITY 16 144 #define L3_N_SETS 16384 145 #define L3_N_PARTITIONS 1 146 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B 147 #define L3_N_LINES_PER_TAG 1 148 #define L3_N_SIZE_KB_AMD 16384 149 150 /* TLB definitions: */ 151 152 #define L1_DTLB_2M_ASSOC 1 153 #define L1_DTLB_2M_ENTRIES 255 154 #define L1_DTLB_4K_ASSOC 1 155 #define L1_DTLB_4K_ENTRIES 255 156 157 #define L1_ITLB_2M_ASSOC 1 158 #define L1_ITLB_2M_ENTRIES 255 159 #define L1_ITLB_4K_ASSOC 1 160 #define L1_ITLB_4K_ENTRIES 255 161 162 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 163 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 164 #define L2_DTLB_4K_ASSOC 4 165 #define L2_DTLB_4K_ENTRIES 512 166 167 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 168 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 169 #define L2_ITLB_4K_ASSOC 4 170 #define L2_ITLB_4K_ENTRIES 512 171 172 173 174 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 175 uint32_t vendor2, uint32_t vendor3) 176 { 177 int i; 178 for (i = 0; i < 4; i++) { 179 dst[i] = vendor1 >> (8 * i); 180 dst[i + 4] = vendor2 >> (8 * i); 181 dst[i + 8] = vendor3 >> (8 * i); 182 } 183 dst[CPUID_VENDOR_SZ] = '\0'; 184 } 185 186 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 187 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 188 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 189 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 190 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 191 CPUID_PSE36 | CPUID_FXSR) 192 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 193 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 194 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 195 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 196 CPUID_PAE | CPUID_SEP | CPUID_APIC) 197 198 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 199 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 200 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 201 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 202 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 203 /* partly implemented: 204 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 205 /* missing: 206 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 207 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 208 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 209 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 210 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 211 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR) 212 /* missing: 213 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 214 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 215 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 216 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 217 CPUID_EXT_F16C, CPUID_EXT_RDRAND */ 218 219 #ifdef TARGET_X86_64 220 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 221 #else 222 #define TCG_EXT2_X86_64_FEATURES 0 223 #endif 224 225 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 226 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 227 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 228 TCG_EXT2_X86_64_FEATURES) 229 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 230 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 231 #define TCG_EXT4_FEATURES 0 232 #define TCG_SVM_FEATURES 0 233 #define TCG_KVM_FEATURES 0 234 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 235 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 236 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 237 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 238 CPUID_7_0_EBX_ERMS) 239 /* missing: 240 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 241 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 242 CPUID_7_0_EBX_RDSEED */ 243 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \ 244 CPUID_7_0_ECX_LA57) 245 #define TCG_7_0_EDX_FEATURES 0 246 #define TCG_APM_FEATURES 0 247 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 248 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 249 /* missing: 250 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 251 252 typedef struct FeatureWordInfo { 253 /* feature flags names are taken from "Intel Processor Identification and 254 * the CPUID Instruction" and AMD's "CPUID Specification". 255 * In cases of disagreement between feature naming conventions, 256 * aliases may be added. 257 */ 258 const char *feat_names[32]; 259 uint32_t cpuid_eax; /* Input EAX for CPUID */ 260 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */ 261 uint32_t cpuid_ecx; /* Input ECX value for CPUID */ 262 int cpuid_reg; /* output register (R_* constant) */ 263 uint32_t tcg_features; /* Feature flags supported by TCG */ 264 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */ 265 uint32_t migratable_flags; /* Feature flags known to be migratable */ 266 } FeatureWordInfo; 267 268 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 269 [FEAT_1_EDX] = { 270 .feat_names = { 271 "fpu", "vme", "de", "pse", 272 "tsc", "msr", "pae", "mce", 273 "cx8", "apic", NULL, "sep", 274 "mtrr", "pge", "mca", "cmov", 275 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 276 NULL, "ds" /* Intel dts */, "acpi", "mmx", 277 "fxsr", "sse", "sse2", "ss", 278 "ht" /* Intel htt */, "tm", "ia64", "pbe", 279 }, 280 .cpuid_eax = 1, .cpuid_reg = R_EDX, 281 .tcg_features = TCG_FEATURES, 282 }, 283 [FEAT_1_ECX] = { 284 .feat_names = { 285 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 286 "ds-cpl", "vmx", "smx", "est", 287 "tm2", "ssse3", "cid", NULL, 288 "fma", "cx16", "xtpr", "pdcm", 289 NULL, "pcid", "dca", "sse4.1", 290 "sse4.2", "x2apic", "movbe", "popcnt", 291 "tsc-deadline", "aes", "xsave", "osxsave", 292 "avx", "f16c", "rdrand", "hypervisor", 293 }, 294 .cpuid_eax = 1, .cpuid_reg = R_ECX, 295 .tcg_features = TCG_EXT_FEATURES, 296 }, 297 /* Feature names that are already defined on feature_name[] but 298 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 299 * names on feat_names below. They are copied automatically 300 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 301 */ 302 [FEAT_8000_0001_EDX] = { 303 .feat_names = { 304 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 305 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 306 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 307 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 308 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 309 "nx", NULL, "mmxext", NULL /* mmx */, 310 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 311 NULL, "lm", "3dnowext", "3dnow", 312 }, 313 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX, 314 .tcg_features = TCG_EXT2_FEATURES, 315 }, 316 [FEAT_8000_0001_ECX] = { 317 .feat_names = { 318 "lahf-lm", "cmp-legacy", "svm", "extapic", 319 "cr8legacy", "abm", "sse4a", "misalignsse", 320 "3dnowprefetch", "osvw", "ibs", "xop", 321 "skinit", "wdt", NULL, "lwp", 322 "fma4", "tce", NULL, "nodeid-msr", 323 NULL, "tbm", "topoext", "perfctr-core", 324 "perfctr-nb", NULL, NULL, NULL, 325 NULL, NULL, NULL, NULL, 326 }, 327 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX, 328 .tcg_features = TCG_EXT3_FEATURES, 329 }, 330 [FEAT_C000_0001_EDX] = { 331 .feat_names = { 332 NULL, NULL, "xstore", "xstore-en", 333 NULL, NULL, "xcrypt", "xcrypt-en", 334 "ace2", "ace2-en", "phe", "phe-en", 335 "pmm", "pmm-en", NULL, NULL, 336 NULL, NULL, NULL, NULL, 337 NULL, NULL, NULL, NULL, 338 NULL, NULL, NULL, NULL, 339 NULL, NULL, NULL, NULL, 340 }, 341 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX, 342 .tcg_features = TCG_EXT4_FEATURES, 343 }, 344 [FEAT_KVM] = { 345 .feat_names = { 346 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 347 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 348 NULL, NULL, NULL, NULL, 349 NULL, NULL, NULL, NULL, 350 NULL, NULL, NULL, NULL, 351 NULL, NULL, NULL, NULL, 352 "kvmclock-stable-bit", NULL, NULL, NULL, 353 NULL, NULL, NULL, NULL, 354 }, 355 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX, 356 .tcg_features = TCG_KVM_FEATURES, 357 }, 358 [FEAT_HYPERV_EAX] = { 359 .feat_names = { 360 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 361 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 362 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 363 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 364 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 365 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 366 NULL, NULL, NULL, NULL, 367 NULL, NULL, NULL, NULL, 368 NULL, NULL, NULL, NULL, 369 NULL, NULL, NULL, NULL, 370 NULL, NULL, NULL, NULL, 371 }, 372 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX, 373 }, 374 [FEAT_HYPERV_EBX] = { 375 .feat_names = { 376 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 377 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 378 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 379 NULL /* hv_create_port */, NULL /* hv_connect_port */, 380 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 381 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 382 NULL, NULL, 383 NULL, NULL, NULL, NULL, 384 NULL, NULL, NULL, NULL, 385 NULL, NULL, NULL, NULL, 386 NULL, NULL, NULL, NULL, 387 }, 388 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX, 389 }, 390 [FEAT_HYPERV_EDX] = { 391 .feat_names = { 392 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 393 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 394 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 395 NULL, NULL, 396 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 397 NULL, NULL, NULL, NULL, 398 NULL, NULL, NULL, NULL, 399 NULL, NULL, NULL, NULL, 400 NULL, NULL, NULL, NULL, 401 NULL, NULL, NULL, NULL, 402 }, 403 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX, 404 }, 405 [FEAT_SVM] = { 406 .feat_names = { 407 "npt", "lbrv", "svm-lock", "nrip-save", 408 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 409 NULL, NULL, "pause-filter", NULL, 410 "pfthreshold", NULL, NULL, NULL, 411 NULL, NULL, NULL, NULL, 412 NULL, NULL, NULL, NULL, 413 NULL, NULL, NULL, NULL, 414 NULL, NULL, NULL, NULL, 415 }, 416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX, 417 .tcg_features = TCG_SVM_FEATURES, 418 }, 419 [FEAT_7_0_EBX] = { 420 .feat_names = { 421 "fsgsbase", "tsc-adjust", NULL, "bmi1", 422 "hle", "avx2", NULL, "smep", 423 "bmi2", "erms", "invpcid", "rtm", 424 NULL, NULL, "mpx", NULL, 425 "avx512f", "avx512dq", "rdseed", "adx", 426 "smap", "avx512ifma", "pcommit", "clflushopt", 427 "clwb", NULL, "avx512pf", "avx512er", 428 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 429 }, 430 .cpuid_eax = 7, 431 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 432 .cpuid_reg = R_EBX, 433 .tcg_features = TCG_7_0_EBX_FEATURES, 434 }, 435 [FEAT_7_0_ECX] = { 436 .feat_names = { 437 NULL, "avx512vbmi", "umip", "pku", 438 "ospke", NULL, NULL, NULL, 439 NULL, NULL, NULL, NULL, 440 NULL, NULL, "avx512-vpopcntdq", NULL, 441 "la57", NULL, NULL, NULL, 442 NULL, NULL, "rdpid", NULL, 443 NULL, NULL, NULL, NULL, 444 NULL, NULL, NULL, NULL, 445 }, 446 .cpuid_eax = 7, 447 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 448 .cpuid_reg = R_ECX, 449 .tcg_features = TCG_7_0_ECX_FEATURES, 450 }, 451 [FEAT_7_0_EDX] = { 452 .feat_names = { 453 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 454 NULL, NULL, NULL, NULL, 455 NULL, NULL, NULL, NULL, 456 NULL, NULL, NULL, NULL, 457 NULL, NULL, NULL, NULL, 458 NULL, NULL, NULL, NULL, 459 NULL, NULL, NULL, NULL, 460 NULL, NULL, NULL, NULL, 461 }, 462 .cpuid_eax = 7, 463 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 464 .cpuid_reg = R_EDX, 465 .tcg_features = TCG_7_0_EDX_FEATURES, 466 }, 467 [FEAT_8000_0007_EDX] = { 468 .feat_names = { 469 NULL, NULL, NULL, NULL, 470 NULL, NULL, NULL, NULL, 471 "invtsc", NULL, NULL, NULL, 472 NULL, NULL, NULL, NULL, 473 NULL, NULL, NULL, NULL, 474 NULL, NULL, NULL, NULL, 475 NULL, NULL, NULL, NULL, 476 NULL, NULL, NULL, NULL, 477 }, 478 .cpuid_eax = 0x80000007, 479 .cpuid_reg = R_EDX, 480 .tcg_features = TCG_APM_FEATURES, 481 .unmigratable_flags = CPUID_APM_INVTSC, 482 }, 483 [FEAT_XSAVE] = { 484 .feat_names = { 485 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 486 NULL, NULL, NULL, NULL, 487 NULL, NULL, NULL, NULL, 488 NULL, NULL, NULL, NULL, 489 NULL, NULL, NULL, NULL, 490 NULL, NULL, NULL, NULL, 491 NULL, NULL, NULL, NULL, 492 NULL, NULL, NULL, NULL, 493 }, 494 .cpuid_eax = 0xd, 495 .cpuid_needs_ecx = true, .cpuid_ecx = 1, 496 .cpuid_reg = R_EAX, 497 .tcg_features = TCG_XSAVE_FEATURES, 498 }, 499 [FEAT_6_EAX] = { 500 .feat_names = { 501 NULL, NULL, "arat", NULL, 502 NULL, NULL, NULL, NULL, 503 NULL, NULL, NULL, NULL, 504 NULL, NULL, NULL, NULL, 505 NULL, NULL, NULL, NULL, 506 NULL, NULL, NULL, NULL, 507 NULL, NULL, NULL, NULL, 508 NULL, NULL, NULL, NULL, 509 }, 510 .cpuid_eax = 6, .cpuid_reg = R_EAX, 511 .tcg_features = TCG_6_EAX_FEATURES, 512 }, 513 [FEAT_XSAVE_COMP_LO] = { 514 .cpuid_eax = 0xD, 515 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 516 .cpuid_reg = R_EAX, 517 .tcg_features = ~0U, 518 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 519 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 520 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 521 XSTATE_PKRU_MASK, 522 }, 523 [FEAT_XSAVE_COMP_HI] = { 524 .cpuid_eax = 0xD, 525 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 526 .cpuid_reg = R_EDX, 527 .tcg_features = ~0U, 528 }, 529 }; 530 531 typedef struct X86RegisterInfo32 { 532 /* Name of register */ 533 const char *name; 534 /* QAPI enum value register */ 535 X86CPURegister32 qapi_enum; 536 } X86RegisterInfo32; 537 538 #define REGISTER(reg) \ 539 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 540 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 541 REGISTER(EAX), 542 REGISTER(ECX), 543 REGISTER(EDX), 544 REGISTER(EBX), 545 REGISTER(ESP), 546 REGISTER(EBP), 547 REGISTER(ESI), 548 REGISTER(EDI), 549 }; 550 #undef REGISTER 551 552 typedef struct ExtSaveArea { 553 uint32_t feature, bits; 554 uint32_t offset, size; 555 } ExtSaveArea; 556 557 static const ExtSaveArea x86_ext_save_areas[] = { 558 [XSTATE_FP_BIT] = { 559 /* x87 FP state component is always enabled if XSAVE is supported */ 560 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 561 /* x87 state is in the legacy region of the XSAVE area */ 562 .offset = 0, 563 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 564 }, 565 [XSTATE_SSE_BIT] = { 566 /* SSE state component is always enabled if XSAVE is supported */ 567 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 568 /* SSE state is in the legacy region of the XSAVE area */ 569 .offset = 0, 570 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 571 }, 572 [XSTATE_YMM_BIT] = 573 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 574 .offset = offsetof(X86XSaveArea, avx_state), 575 .size = sizeof(XSaveAVX) }, 576 [XSTATE_BNDREGS_BIT] = 577 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 578 .offset = offsetof(X86XSaveArea, bndreg_state), 579 .size = sizeof(XSaveBNDREG) }, 580 [XSTATE_BNDCSR_BIT] = 581 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 582 .offset = offsetof(X86XSaveArea, bndcsr_state), 583 .size = sizeof(XSaveBNDCSR) }, 584 [XSTATE_OPMASK_BIT] = 585 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 586 .offset = offsetof(X86XSaveArea, opmask_state), 587 .size = sizeof(XSaveOpmask) }, 588 [XSTATE_ZMM_Hi256_BIT] = 589 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 590 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 591 .size = sizeof(XSaveZMM_Hi256) }, 592 [XSTATE_Hi16_ZMM_BIT] = 593 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 594 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 595 .size = sizeof(XSaveHi16_ZMM) }, 596 [XSTATE_PKRU_BIT] = 597 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 598 .offset = offsetof(X86XSaveArea, pkru_state), 599 .size = sizeof(XSavePKRU) }, 600 }; 601 602 static uint32_t xsave_area_size(uint64_t mask) 603 { 604 int i; 605 uint64_t ret = 0; 606 607 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 608 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 609 if ((mask >> i) & 1) { 610 ret = MAX(ret, esa->offset + esa->size); 611 } 612 } 613 return ret; 614 } 615 616 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 617 { 618 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 619 cpu->env.features[FEAT_XSAVE_COMP_LO]; 620 } 621 622 const char *get_register_name_32(unsigned int reg) 623 { 624 if (reg >= CPU_NB_REGS32) { 625 return NULL; 626 } 627 return x86_reg_info_32[reg].name; 628 } 629 630 /* 631 * Returns the set of feature flags that are supported and migratable by 632 * QEMU, for a given FeatureWord. 633 */ 634 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w) 635 { 636 FeatureWordInfo *wi = &feature_word_info[w]; 637 uint32_t r = 0; 638 int i; 639 640 for (i = 0; i < 32; i++) { 641 uint32_t f = 1U << i; 642 643 /* If the feature name is known, it is implicitly considered migratable, 644 * unless it is explicitly set in unmigratable_flags */ 645 if ((wi->migratable_flags & f) || 646 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 647 r |= f; 648 } 649 } 650 return r; 651 } 652 653 void host_cpuid(uint32_t function, uint32_t count, 654 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 655 { 656 uint32_t vec[4]; 657 658 #ifdef __x86_64__ 659 asm volatile("cpuid" 660 : "=a"(vec[0]), "=b"(vec[1]), 661 "=c"(vec[2]), "=d"(vec[3]) 662 : "0"(function), "c"(count) : "cc"); 663 #elif defined(__i386__) 664 asm volatile("pusha \n\t" 665 "cpuid \n\t" 666 "mov %%eax, 0(%2) \n\t" 667 "mov %%ebx, 4(%2) \n\t" 668 "mov %%ecx, 8(%2) \n\t" 669 "mov %%edx, 12(%2) \n\t" 670 "popa" 671 : : "a"(function), "c"(count), "S"(vec) 672 : "memory", "cc"); 673 #else 674 abort(); 675 #endif 676 677 if (eax) 678 *eax = vec[0]; 679 if (ebx) 680 *ebx = vec[1]; 681 if (ecx) 682 *ecx = vec[2]; 683 if (edx) 684 *edx = vec[3]; 685 } 686 687 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 688 { 689 uint32_t eax, ebx, ecx, edx; 690 691 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 692 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 693 694 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 695 if (family) { 696 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 697 } 698 if (model) { 699 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 700 } 701 if (stepping) { 702 *stepping = eax & 0x0F; 703 } 704 } 705 706 /* CPU class name definitions: */ 707 708 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU 709 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX) 710 711 /* Return type name for a given CPU model name 712 * Caller is responsible for freeing the returned string. 713 */ 714 static char *x86_cpu_type_name(const char *model_name) 715 { 716 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 717 } 718 719 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 720 { 721 ObjectClass *oc; 722 char *typename; 723 724 if (cpu_model == NULL) { 725 return NULL; 726 } 727 728 typename = x86_cpu_type_name(cpu_model); 729 oc = object_class_by_name(typename); 730 g_free(typename); 731 return oc; 732 } 733 734 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 735 { 736 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 737 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 738 return g_strndup(class_name, 739 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 740 } 741 742 struct X86CPUDefinition { 743 const char *name; 744 uint32_t level; 745 uint32_t xlevel; 746 /* vendor is zero-terminated, 12 character ASCII string */ 747 char vendor[CPUID_VENDOR_SZ + 1]; 748 int family; 749 int model; 750 int stepping; 751 FeatureWordArray features; 752 char model_id[48]; 753 }; 754 755 static X86CPUDefinition builtin_x86_defs[] = { 756 { 757 .name = "qemu64", 758 .level = 0xd, 759 .vendor = CPUID_VENDOR_AMD, 760 .family = 6, 761 .model = 6, 762 .stepping = 3, 763 .features[FEAT_1_EDX] = 764 PPRO_FEATURES | 765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 766 CPUID_PSE36, 767 .features[FEAT_1_ECX] = 768 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 769 .features[FEAT_8000_0001_EDX] = 770 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 771 .features[FEAT_8000_0001_ECX] = 772 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 773 .xlevel = 0x8000000A, 774 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 775 }, 776 { 777 .name = "phenom", 778 .level = 5, 779 .vendor = CPUID_VENDOR_AMD, 780 .family = 16, 781 .model = 2, 782 .stepping = 3, 783 /* Missing: CPUID_HT */ 784 .features[FEAT_1_EDX] = 785 PPRO_FEATURES | 786 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 787 CPUID_PSE36 | CPUID_VME, 788 .features[FEAT_1_ECX] = 789 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 790 CPUID_EXT_POPCNT, 791 .features[FEAT_8000_0001_EDX] = 792 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 793 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 794 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 795 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 796 CPUID_EXT3_CR8LEG, 797 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 798 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 799 .features[FEAT_8000_0001_ECX] = 800 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 801 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 802 /* Missing: CPUID_SVM_LBRV */ 803 .features[FEAT_SVM] = 804 CPUID_SVM_NPT, 805 .xlevel = 0x8000001A, 806 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 807 }, 808 { 809 .name = "core2duo", 810 .level = 10, 811 .vendor = CPUID_VENDOR_INTEL, 812 .family = 6, 813 .model = 15, 814 .stepping = 11, 815 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 816 .features[FEAT_1_EDX] = 817 PPRO_FEATURES | 818 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 819 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 820 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 821 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 822 .features[FEAT_1_ECX] = 823 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 824 CPUID_EXT_CX16, 825 .features[FEAT_8000_0001_EDX] = 826 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 827 .features[FEAT_8000_0001_ECX] = 828 CPUID_EXT3_LAHF_LM, 829 .xlevel = 0x80000008, 830 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 831 }, 832 { 833 .name = "kvm64", 834 .level = 0xd, 835 .vendor = CPUID_VENDOR_INTEL, 836 .family = 15, 837 .model = 6, 838 .stepping = 1, 839 /* Missing: CPUID_HT */ 840 .features[FEAT_1_EDX] = 841 PPRO_FEATURES | CPUID_VME | 842 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 843 CPUID_PSE36, 844 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 845 .features[FEAT_1_ECX] = 846 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 847 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 848 .features[FEAT_8000_0001_EDX] = 849 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 850 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 851 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 852 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 853 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 854 .features[FEAT_8000_0001_ECX] = 855 0, 856 .xlevel = 0x80000008, 857 .model_id = "Common KVM processor" 858 }, 859 { 860 .name = "qemu32", 861 .level = 4, 862 .vendor = CPUID_VENDOR_INTEL, 863 .family = 6, 864 .model = 6, 865 .stepping = 3, 866 .features[FEAT_1_EDX] = 867 PPRO_FEATURES, 868 .features[FEAT_1_ECX] = 869 CPUID_EXT_SSE3, 870 .xlevel = 0x80000004, 871 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 872 }, 873 { 874 .name = "kvm32", 875 .level = 5, 876 .vendor = CPUID_VENDOR_INTEL, 877 .family = 15, 878 .model = 6, 879 .stepping = 1, 880 .features[FEAT_1_EDX] = 881 PPRO_FEATURES | CPUID_VME | 882 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 883 .features[FEAT_1_ECX] = 884 CPUID_EXT_SSE3, 885 .features[FEAT_8000_0001_ECX] = 886 0, 887 .xlevel = 0x80000008, 888 .model_id = "Common 32-bit KVM processor" 889 }, 890 { 891 .name = "coreduo", 892 .level = 10, 893 .vendor = CPUID_VENDOR_INTEL, 894 .family = 6, 895 .model = 14, 896 .stepping = 8, 897 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 898 .features[FEAT_1_EDX] = 899 PPRO_FEATURES | CPUID_VME | 900 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 901 CPUID_SS, 902 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 903 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 904 .features[FEAT_1_ECX] = 905 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 906 .features[FEAT_8000_0001_EDX] = 907 CPUID_EXT2_NX, 908 .xlevel = 0x80000008, 909 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 910 }, 911 { 912 .name = "486", 913 .level = 1, 914 .vendor = CPUID_VENDOR_INTEL, 915 .family = 4, 916 .model = 8, 917 .stepping = 0, 918 .features[FEAT_1_EDX] = 919 I486_FEATURES, 920 .xlevel = 0, 921 }, 922 { 923 .name = "pentium", 924 .level = 1, 925 .vendor = CPUID_VENDOR_INTEL, 926 .family = 5, 927 .model = 4, 928 .stepping = 3, 929 .features[FEAT_1_EDX] = 930 PENTIUM_FEATURES, 931 .xlevel = 0, 932 }, 933 { 934 .name = "pentium2", 935 .level = 2, 936 .vendor = CPUID_VENDOR_INTEL, 937 .family = 6, 938 .model = 5, 939 .stepping = 2, 940 .features[FEAT_1_EDX] = 941 PENTIUM2_FEATURES, 942 .xlevel = 0, 943 }, 944 { 945 .name = "pentium3", 946 .level = 3, 947 .vendor = CPUID_VENDOR_INTEL, 948 .family = 6, 949 .model = 7, 950 .stepping = 3, 951 .features[FEAT_1_EDX] = 952 PENTIUM3_FEATURES, 953 .xlevel = 0, 954 }, 955 { 956 .name = "athlon", 957 .level = 2, 958 .vendor = CPUID_VENDOR_AMD, 959 .family = 6, 960 .model = 2, 961 .stepping = 3, 962 .features[FEAT_1_EDX] = 963 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 964 CPUID_MCA, 965 .features[FEAT_8000_0001_EDX] = 966 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 967 .xlevel = 0x80000008, 968 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 969 }, 970 { 971 .name = "n270", 972 .level = 10, 973 .vendor = CPUID_VENDOR_INTEL, 974 .family = 6, 975 .model = 28, 976 .stepping = 2, 977 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 978 .features[FEAT_1_EDX] = 979 PPRO_FEATURES | 980 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 981 CPUID_ACPI | CPUID_SS, 982 /* Some CPUs got no CPUID_SEP */ 983 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 984 * CPUID_EXT_XTPR */ 985 .features[FEAT_1_ECX] = 986 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 987 CPUID_EXT_MOVBE, 988 .features[FEAT_8000_0001_EDX] = 989 CPUID_EXT2_NX, 990 .features[FEAT_8000_0001_ECX] = 991 CPUID_EXT3_LAHF_LM, 992 .xlevel = 0x80000008, 993 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 994 }, 995 { 996 .name = "Conroe", 997 .level = 10, 998 .vendor = CPUID_VENDOR_INTEL, 999 .family = 6, 1000 .model = 15, 1001 .stepping = 3, 1002 .features[FEAT_1_EDX] = 1003 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1004 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1005 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1006 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1007 CPUID_DE | CPUID_FP87, 1008 .features[FEAT_1_ECX] = 1009 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1010 .features[FEAT_8000_0001_EDX] = 1011 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1012 .features[FEAT_8000_0001_ECX] = 1013 CPUID_EXT3_LAHF_LM, 1014 .xlevel = 0x80000008, 1015 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 1016 }, 1017 { 1018 .name = "Penryn", 1019 .level = 10, 1020 .vendor = CPUID_VENDOR_INTEL, 1021 .family = 6, 1022 .model = 23, 1023 .stepping = 3, 1024 .features[FEAT_1_EDX] = 1025 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1026 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1027 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1028 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1029 CPUID_DE | CPUID_FP87, 1030 .features[FEAT_1_ECX] = 1031 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1032 CPUID_EXT_SSE3, 1033 .features[FEAT_8000_0001_EDX] = 1034 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1035 .features[FEAT_8000_0001_ECX] = 1036 CPUID_EXT3_LAHF_LM, 1037 .xlevel = 0x80000008, 1038 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 1039 }, 1040 { 1041 .name = "Nehalem", 1042 .level = 11, 1043 .vendor = CPUID_VENDOR_INTEL, 1044 .family = 6, 1045 .model = 26, 1046 .stepping = 3, 1047 .features[FEAT_1_EDX] = 1048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1052 CPUID_DE | CPUID_FP87, 1053 .features[FEAT_1_ECX] = 1054 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1055 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1056 .features[FEAT_8000_0001_EDX] = 1057 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1058 .features[FEAT_8000_0001_ECX] = 1059 CPUID_EXT3_LAHF_LM, 1060 .xlevel = 0x80000008, 1061 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 1062 }, 1063 { 1064 .name = "Westmere", 1065 .level = 11, 1066 .vendor = CPUID_VENDOR_INTEL, 1067 .family = 6, 1068 .model = 44, 1069 .stepping = 1, 1070 .features[FEAT_1_EDX] = 1071 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1072 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1073 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1074 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1075 CPUID_DE | CPUID_FP87, 1076 .features[FEAT_1_ECX] = 1077 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1078 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1079 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1080 .features[FEAT_8000_0001_EDX] = 1081 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1082 .features[FEAT_8000_0001_ECX] = 1083 CPUID_EXT3_LAHF_LM, 1084 .features[FEAT_6_EAX] = 1085 CPUID_6_EAX_ARAT, 1086 .xlevel = 0x80000008, 1087 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 1088 }, 1089 { 1090 .name = "SandyBridge", 1091 .level = 0xd, 1092 .vendor = CPUID_VENDOR_INTEL, 1093 .family = 6, 1094 .model = 42, 1095 .stepping = 1, 1096 .features[FEAT_1_EDX] = 1097 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1098 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1099 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1100 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1101 CPUID_DE | CPUID_FP87, 1102 .features[FEAT_1_ECX] = 1103 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1104 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1105 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1106 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1107 CPUID_EXT_SSE3, 1108 .features[FEAT_8000_0001_EDX] = 1109 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1110 CPUID_EXT2_SYSCALL, 1111 .features[FEAT_8000_0001_ECX] = 1112 CPUID_EXT3_LAHF_LM, 1113 .features[FEAT_XSAVE] = 1114 CPUID_XSAVE_XSAVEOPT, 1115 .features[FEAT_6_EAX] = 1116 CPUID_6_EAX_ARAT, 1117 .xlevel = 0x80000008, 1118 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 1119 }, 1120 { 1121 .name = "IvyBridge", 1122 .level = 0xd, 1123 .vendor = CPUID_VENDOR_INTEL, 1124 .family = 6, 1125 .model = 58, 1126 .stepping = 9, 1127 .features[FEAT_1_EDX] = 1128 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1129 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1130 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1131 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1132 CPUID_DE | CPUID_FP87, 1133 .features[FEAT_1_ECX] = 1134 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1135 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1136 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1137 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1138 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1139 .features[FEAT_7_0_EBX] = 1140 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1141 CPUID_7_0_EBX_ERMS, 1142 .features[FEAT_8000_0001_EDX] = 1143 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1144 CPUID_EXT2_SYSCALL, 1145 .features[FEAT_8000_0001_ECX] = 1146 CPUID_EXT3_LAHF_LM, 1147 .features[FEAT_XSAVE] = 1148 CPUID_XSAVE_XSAVEOPT, 1149 .features[FEAT_6_EAX] = 1150 CPUID_6_EAX_ARAT, 1151 .xlevel = 0x80000008, 1152 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 1153 }, 1154 { 1155 .name = "Haswell-noTSX", 1156 .level = 0xd, 1157 .vendor = CPUID_VENDOR_INTEL, 1158 .family = 6, 1159 .model = 60, 1160 .stepping = 1, 1161 .features[FEAT_1_EDX] = 1162 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1163 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1164 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1165 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1166 CPUID_DE | CPUID_FP87, 1167 .features[FEAT_1_ECX] = 1168 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1169 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1170 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1171 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1172 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1173 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1174 .features[FEAT_8000_0001_EDX] = 1175 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1176 CPUID_EXT2_SYSCALL, 1177 .features[FEAT_8000_0001_ECX] = 1178 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1179 .features[FEAT_7_0_EBX] = 1180 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1181 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1182 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID, 1183 .features[FEAT_XSAVE] = 1184 CPUID_XSAVE_XSAVEOPT, 1185 .features[FEAT_6_EAX] = 1186 CPUID_6_EAX_ARAT, 1187 .xlevel = 0x80000008, 1188 .model_id = "Intel Core Processor (Haswell, no TSX)", 1189 }, { 1190 .name = "Haswell", 1191 .level = 0xd, 1192 .vendor = CPUID_VENDOR_INTEL, 1193 .family = 6, 1194 .model = 60, 1195 .stepping = 4, 1196 .features[FEAT_1_EDX] = 1197 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1198 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1199 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1200 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1201 CPUID_DE | CPUID_FP87, 1202 .features[FEAT_1_ECX] = 1203 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1204 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1205 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1206 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1207 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1208 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1209 .features[FEAT_8000_0001_EDX] = 1210 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1211 CPUID_EXT2_SYSCALL, 1212 .features[FEAT_8000_0001_ECX] = 1213 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1214 .features[FEAT_7_0_EBX] = 1215 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1216 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1217 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1218 CPUID_7_0_EBX_RTM, 1219 .features[FEAT_XSAVE] = 1220 CPUID_XSAVE_XSAVEOPT, 1221 .features[FEAT_6_EAX] = 1222 CPUID_6_EAX_ARAT, 1223 .xlevel = 0x80000008, 1224 .model_id = "Intel Core Processor (Haswell)", 1225 }, 1226 { 1227 .name = "Broadwell-noTSX", 1228 .level = 0xd, 1229 .vendor = CPUID_VENDOR_INTEL, 1230 .family = 6, 1231 .model = 61, 1232 .stepping = 2, 1233 .features[FEAT_1_EDX] = 1234 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1235 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1236 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1237 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1238 CPUID_DE | CPUID_FP87, 1239 .features[FEAT_1_ECX] = 1240 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1241 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1242 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1243 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1244 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1245 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1246 .features[FEAT_8000_0001_EDX] = 1247 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1248 CPUID_EXT2_SYSCALL, 1249 .features[FEAT_8000_0001_ECX] = 1250 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1251 .features[FEAT_7_0_EBX] = 1252 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1253 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1254 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1255 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1256 CPUID_7_0_EBX_SMAP, 1257 .features[FEAT_XSAVE] = 1258 CPUID_XSAVE_XSAVEOPT, 1259 .features[FEAT_6_EAX] = 1260 CPUID_6_EAX_ARAT, 1261 .xlevel = 0x80000008, 1262 .model_id = "Intel Core Processor (Broadwell, no TSX)", 1263 }, 1264 { 1265 .name = "Broadwell", 1266 .level = 0xd, 1267 .vendor = CPUID_VENDOR_INTEL, 1268 .family = 6, 1269 .model = 61, 1270 .stepping = 2, 1271 .features[FEAT_1_EDX] = 1272 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1273 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1274 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1275 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1276 CPUID_DE | CPUID_FP87, 1277 .features[FEAT_1_ECX] = 1278 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1279 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1280 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1281 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1282 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1283 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1284 .features[FEAT_8000_0001_EDX] = 1285 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1286 CPUID_EXT2_SYSCALL, 1287 .features[FEAT_8000_0001_ECX] = 1288 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1289 .features[FEAT_7_0_EBX] = 1290 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1291 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1292 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1293 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1294 CPUID_7_0_EBX_SMAP, 1295 .features[FEAT_XSAVE] = 1296 CPUID_XSAVE_XSAVEOPT, 1297 .features[FEAT_6_EAX] = 1298 CPUID_6_EAX_ARAT, 1299 .xlevel = 0x80000008, 1300 .model_id = "Intel Core Processor (Broadwell)", 1301 }, 1302 { 1303 .name = "Skylake-Client", 1304 .level = 0xd, 1305 .vendor = CPUID_VENDOR_INTEL, 1306 .family = 6, 1307 .model = 94, 1308 .stepping = 3, 1309 .features[FEAT_1_EDX] = 1310 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1311 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1312 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1313 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1314 CPUID_DE | CPUID_FP87, 1315 .features[FEAT_1_ECX] = 1316 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1317 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1318 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1319 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1320 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1321 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1322 .features[FEAT_8000_0001_EDX] = 1323 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1324 CPUID_EXT2_SYSCALL, 1325 .features[FEAT_8000_0001_ECX] = 1326 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1327 .features[FEAT_7_0_EBX] = 1328 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1329 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1330 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1331 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1332 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX, 1333 /* Missing: XSAVES (not supported by some Linux versions, 1334 * including v4.1 to v4.12). 1335 * KVM doesn't yet expose any XSAVES state save component, 1336 * and the only one defined in Skylake (processor tracing) 1337 * probably will block migration anyway. 1338 */ 1339 .features[FEAT_XSAVE] = 1340 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 1341 CPUID_XSAVE_XGETBV1, 1342 .features[FEAT_6_EAX] = 1343 CPUID_6_EAX_ARAT, 1344 .xlevel = 0x80000008, 1345 .model_id = "Intel Core Processor (Skylake)", 1346 }, 1347 { 1348 .name = "Skylake-Server", 1349 .level = 0xd, 1350 .vendor = CPUID_VENDOR_INTEL, 1351 .family = 6, 1352 .model = 85, 1353 .stepping = 4, 1354 .features[FEAT_1_EDX] = 1355 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1356 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1357 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1358 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1359 CPUID_DE | CPUID_FP87, 1360 .features[FEAT_1_ECX] = 1361 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1362 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1363 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1364 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1365 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1366 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1367 .features[FEAT_8000_0001_EDX] = 1368 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 1369 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1370 .features[FEAT_8000_0001_ECX] = 1371 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1372 .features[FEAT_7_0_EBX] = 1373 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1374 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1375 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1376 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1377 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB | 1378 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 1379 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 1380 CPUID_7_0_EBX_AVX512VL, 1381 /* Missing: XSAVES (not supported by some Linux versions, 1382 * including v4.1 to v4.12). 1383 * KVM doesn't yet expose any XSAVES state save component, 1384 * and the only one defined in Skylake (processor tracing) 1385 * probably will block migration anyway. 1386 */ 1387 .features[FEAT_XSAVE] = 1388 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 1389 CPUID_XSAVE_XGETBV1, 1390 .features[FEAT_6_EAX] = 1391 CPUID_6_EAX_ARAT, 1392 .xlevel = 0x80000008, 1393 .model_id = "Intel Xeon Processor (Skylake)", 1394 }, 1395 { 1396 .name = "Opteron_G1", 1397 .level = 5, 1398 .vendor = CPUID_VENDOR_AMD, 1399 .family = 15, 1400 .model = 6, 1401 .stepping = 1, 1402 .features[FEAT_1_EDX] = 1403 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1404 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1405 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1406 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1407 CPUID_DE | CPUID_FP87, 1408 .features[FEAT_1_ECX] = 1409 CPUID_EXT_SSE3, 1410 .features[FEAT_8000_0001_EDX] = 1411 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1412 .xlevel = 0x80000008, 1413 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 1414 }, 1415 { 1416 .name = "Opteron_G2", 1417 .level = 5, 1418 .vendor = CPUID_VENDOR_AMD, 1419 .family = 15, 1420 .model = 6, 1421 .stepping = 1, 1422 .features[FEAT_1_EDX] = 1423 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1424 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1425 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1426 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1427 CPUID_DE | CPUID_FP87, 1428 .features[FEAT_1_ECX] = 1429 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 1430 /* Missing: CPUID_EXT2_RDTSCP */ 1431 .features[FEAT_8000_0001_EDX] = 1432 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1433 .features[FEAT_8000_0001_ECX] = 1434 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 1435 .xlevel = 0x80000008, 1436 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 1437 }, 1438 { 1439 .name = "Opteron_G3", 1440 .level = 5, 1441 .vendor = CPUID_VENDOR_AMD, 1442 .family = 16, 1443 .model = 2, 1444 .stepping = 3, 1445 .features[FEAT_1_EDX] = 1446 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1447 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1448 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1449 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1450 CPUID_DE | CPUID_FP87, 1451 .features[FEAT_1_ECX] = 1452 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 1453 CPUID_EXT_SSE3, 1454 /* Missing: CPUID_EXT2_RDTSCP */ 1455 .features[FEAT_8000_0001_EDX] = 1456 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1457 .features[FEAT_8000_0001_ECX] = 1458 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 1459 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 1460 .xlevel = 0x80000008, 1461 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 1462 }, 1463 { 1464 .name = "Opteron_G4", 1465 .level = 0xd, 1466 .vendor = CPUID_VENDOR_AMD, 1467 .family = 21, 1468 .model = 1, 1469 .stepping = 2, 1470 .features[FEAT_1_EDX] = 1471 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1472 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1473 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1474 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1475 CPUID_DE | CPUID_FP87, 1476 .features[FEAT_1_ECX] = 1477 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1478 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1479 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1480 CPUID_EXT_SSE3, 1481 /* Missing: CPUID_EXT2_RDTSCP */ 1482 .features[FEAT_8000_0001_EDX] = 1483 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 1484 CPUID_EXT2_SYSCALL, 1485 .features[FEAT_8000_0001_ECX] = 1486 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 1487 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 1488 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 1489 CPUID_EXT3_LAHF_LM, 1490 /* no xsaveopt! */ 1491 .xlevel = 0x8000001A, 1492 .model_id = "AMD Opteron 62xx class CPU", 1493 }, 1494 { 1495 .name = "Opteron_G5", 1496 .level = 0xd, 1497 .vendor = CPUID_VENDOR_AMD, 1498 .family = 21, 1499 .model = 2, 1500 .stepping = 0, 1501 .features[FEAT_1_EDX] = 1502 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1503 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1504 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1505 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1506 CPUID_DE | CPUID_FP87, 1507 .features[FEAT_1_ECX] = 1508 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 1509 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1510 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 1511 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1512 /* Missing: CPUID_EXT2_RDTSCP */ 1513 .features[FEAT_8000_0001_EDX] = 1514 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 1515 CPUID_EXT2_SYSCALL, 1516 .features[FEAT_8000_0001_ECX] = 1517 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 1518 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 1519 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 1520 CPUID_EXT3_LAHF_LM, 1521 /* no xsaveopt! */ 1522 .xlevel = 0x8000001A, 1523 .model_id = "AMD Opteron 63xx class CPU", 1524 }, 1525 }; 1526 1527 typedef struct PropValue { 1528 const char *prop, *value; 1529 } PropValue; 1530 1531 /* KVM-specific features that are automatically added/removed 1532 * from all CPU models when KVM is enabled. 1533 */ 1534 static PropValue kvm_default_props[] = { 1535 { "kvmclock", "on" }, 1536 { "kvm-nopiodelay", "on" }, 1537 { "kvm-asyncpf", "on" }, 1538 { "kvm-steal-time", "on" }, 1539 { "kvm-pv-eoi", "on" }, 1540 { "kvmclock-stable-bit", "on" }, 1541 { "x2apic", "on" }, 1542 { "acpi", "off" }, 1543 { "monitor", "off" }, 1544 { "svm", "off" }, 1545 { NULL, NULL }, 1546 }; 1547 1548 /* TCG-specific defaults that override all CPU models when using TCG 1549 */ 1550 static PropValue tcg_default_props[] = { 1551 { "vme", "off" }, 1552 { NULL, NULL }, 1553 }; 1554 1555 1556 void x86_cpu_change_kvm_default(const char *prop, const char *value) 1557 { 1558 PropValue *pv; 1559 for (pv = kvm_default_props; pv->prop; pv++) { 1560 if (!strcmp(pv->prop, prop)) { 1561 pv->value = value; 1562 break; 1563 } 1564 } 1565 1566 /* It is valid to call this function only for properties that 1567 * are already present in the kvm_default_props table. 1568 */ 1569 assert(pv->prop); 1570 } 1571 1572 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 1573 bool migratable_only); 1574 1575 static bool lmce_supported(void) 1576 { 1577 uint64_t mce_cap = 0; 1578 1579 #ifdef CONFIG_KVM 1580 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 1581 return false; 1582 } 1583 #endif 1584 1585 return !!(mce_cap & MCG_LMCE_P); 1586 } 1587 1588 static int cpu_x86_fill_model_id(char *str) 1589 { 1590 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 1591 int i; 1592 1593 for (i = 0; i < 3; i++) { 1594 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 1595 memcpy(str + i * 16 + 0, &eax, 4); 1596 memcpy(str + i * 16 + 4, &ebx, 4); 1597 memcpy(str + i * 16 + 8, &ecx, 4); 1598 memcpy(str + i * 16 + 12, &edx, 4); 1599 } 1600 return 0; 1601 } 1602 1603 static Property max_x86_cpu_properties[] = { 1604 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 1605 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 1606 DEFINE_PROP_END_OF_LIST() 1607 }; 1608 1609 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 1610 { 1611 DeviceClass *dc = DEVICE_CLASS(oc); 1612 X86CPUClass *xcc = X86_CPU_CLASS(oc); 1613 1614 xcc->ordering = 9; 1615 1616 xcc->model_description = 1617 "Enables all features supported by the accelerator in the current host"; 1618 1619 dc->props = max_x86_cpu_properties; 1620 } 1621 1622 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp); 1623 1624 static void max_x86_cpu_initfn(Object *obj) 1625 { 1626 X86CPU *cpu = X86_CPU(obj); 1627 CPUX86State *env = &cpu->env; 1628 KVMState *s = kvm_state; 1629 1630 /* We can't fill the features array here because we don't know yet if 1631 * "migratable" is true or false. 1632 */ 1633 cpu->max_features = true; 1634 1635 if (kvm_enabled()) { 1636 X86CPUDefinition host_cpudef = { }; 1637 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 1638 1639 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1640 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx); 1641 1642 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1643 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1644 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1645 host_cpudef.stepping = eax & 0x0F; 1646 1647 cpu_x86_fill_model_id(host_cpudef.model_id); 1648 1649 x86_cpu_load_def(cpu, &host_cpudef, &error_abort); 1650 1651 env->cpuid_min_level = 1652 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 1653 env->cpuid_min_xlevel = 1654 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 1655 env->cpuid_min_xlevel2 = 1656 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 1657 1658 if (lmce_supported()) { 1659 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 1660 } 1661 } else { 1662 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 1663 "vendor", &error_abort); 1664 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 1665 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 1666 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 1667 object_property_set_str(OBJECT(cpu), 1668 "QEMU TCG CPU version " QEMU_HW_VERSION, 1669 "model-id", &error_abort); 1670 } 1671 1672 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 1673 } 1674 1675 static const TypeInfo max_x86_cpu_type_info = { 1676 .name = X86_CPU_TYPE_NAME("max"), 1677 .parent = TYPE_X86_CPU, 1678 .instance_init = max_x86_cpu_initfn, 1679 .class_init = max_x86_cpu_class_init, 1680 }; 1681 1682 #ifdef CONFIG_KVM 1683 1684 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 1685 { 1686 X86CPUClass *xcc = X86_CPU_CLASS(oc); 1687 1688 xcc->kvm_required = true; 1689 xcc->ordering = 8; 1690 1691 xcc->model_description = 1692 "KVM processor with all supported host features " 1693 "(only available in KVM mode)"; 1694 } 1695 1696 static const TypeInfo host_x86_cpu_type_info = { 1697 .name = X86_CPU_TYPE_NAME("host"), 1698 .parent = X86_CPU_TYPE_NAME("max"), 1699 .class_init = host_x86_cpu_class_init, 1700 }; 1701 1702 #endif 1703 1704 static void report_unavailable_features(FeatureWord w, uint32_t mask) 1705 { 1706 FeatureWordInfo *f = &feature_word_info[w]; 1707 int i; 1708 1709 for (i = 0; i < 32; ++i) { 1710 if ((1UL << i) & mask) { 1711 const char *reg = get_register_name_32(f->cpuid_reg); 1712 assert(reg); 1713 fprintf(stderr, "warning: %s doesn't support requested feature: " 1714 "CPUID.%02XH:%s%s%s [bit %d]\n", 1715 kvm_enabled() ? "host" : "TCG", 1716 f->cpuid_eax, reg, 1717 f->feat_names[i] ? "." : "", 1718 f->feat_names[i] ? f->feat_names[i] : "", i); 1719 } 1720 } 1721 } 1722 1723 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 1724 const char *name, void *opaque, 1725 Error **errp) 1726 { 1727 X86CPU *cpu = X86_CPU(obj); 1728 CPUX86State *env = &cpu->env; 1729 int64_t value; 1730 1731 value = (env->cpuid_version >> 8) & 0xf; 1732 if (value == 0xf) { 1733 value += (env->cpuid_version >> 20) & 0xff; 1734 } 1735 visit_type_int(v, name, &value, errp); 1736 } 1737 1738 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 1739 const char *name, void *opaque, 1740 Error **errp) 1741 { 1742 X86CPU *cpu = X86_CPU(obj); 1743 CPUX86State *env = &cpu->env; 1744 const int64_t min = 0; 1745 const int64_t max = 0xff + 0xf; 1746 Error *local_err = NULL; 1747 int64_t value; 1748 1749 visit_type_int(v, name, &value, &local_err); 1750 if (local_err) { 1751 error_propagate(errp, local_err); 1752 return; 1753 } 1754 if (value < min || value > max) { 1755 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 1756 name ? name : "null", value, min, max); 1757 return; 1758 } 1759 1760 env->cpuid_version &= ~0xff00f00; 1761 if (value > 0x0f) { 1762 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 1763 } else { 1764 env->cpuid_version |= value << 8; 1765 } 1766 } 1767 1768 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 1769 const char *name, void *opaque, 1770 Error **errp) 1771 { 1772 X86CPU *cpu = X86_CPU(obj); 1773 CPUX86State *env = &cpu->env; 1774 int64_t value; 1775 1776 value = (env->cpuid_version >> 4) & 0xf; 1777 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 1778 visit_type_int(v, name, &value, errp); 1779 } 1780 1781 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 1782 const char *name, void *opaque, 1783 Error **errp) 1784 { 1785 X86CPU *cpu = X86_CPU(obj); 1786 CPUX86State *env = &cpu->env; 1787 const int64_t min = 0; 1788 const int64_t max = 0xff; 1789 Error *local_err = NULL; 1790 int64_t value; 1791 1792 visit_type_int(v, name, &value, &local_err); 1793 if (local_err) { 1794 error_propagate(errp, local_err); 1795 return; 1796 } 1797 if (value < min || value > max) { 1798 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 1799 name ? name : "null", value, min, max); 1800 return; 1801 } 1802 1803 env->cpuid_version &= ~0xf00f0; 1804 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 1805 } 1806 1807 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 1808 const char *name, void *opaque, 1809 Error **errp) 1810 { 1811 X86CPU *cpu = X86_CPU(obj); 1812 CPUX86State *env = &cpu->env; 1813 int64_t value; 1814 1815 value = env->cpuid_version & 0xf; 1816 visit_type_int(v, name, &value, errp); 1817 } 1818 1819 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 1820 const char *name, void *opaque, 1821 Error **errp) 1822 { 1823 X86CPU *cpu = X86_CPU(obj); 1824 CPUX86State *env = &cpu->env; 1825 const int64_t min = 0; 1826 const int64_t max = 0xf; 1827 Error *local_err = NULL; 1828 int64_t value; 1829 1830 visit_type_int(v, name, &value, &local_err); 1831 if (local_err) { 1832 error_propagate(errp, local_err); 1833 return; 1834 } 1835 if (value < min || value > max) { 1836 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 1837 name ? name : "null", value, min, max); 1838 return; 1839 } 1840 1841 env->cpuid_version &= ~0xf; 1842 env->cpuid_version |= value & 0xf; 1843 } 1844 1845 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 1846 { 1847 X86CPU *cpu = X86_CPU(obj); 1848 CPUX86State *env = &cpu->env; 1849 char *value; 1850 1851 value = g_malloc(CPUID_VENDOR_SZ + 1); 1852 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 1853 env->cpuid_vendor3); 1854 return value; 1855 } 1856 1857 static void x86_cpuid_set_vendor(Object *obj, const char *value, 1858 Error **errp) 1859 { 1860 X86CPU *cpu = X86_CPU(obj); 1861 CPUX86State *env = &cpu->env; 1862 int i; 1863 1864 if (strlen(value) != CPUID_VENDOR_SZ) { 1865 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 1866 return; 1867 } 1868 1869 env->cpuid_vendor1 = 0; 1870 env->cpuid_vendor2 = 0; 1871 env->cpuid_vendor3 = 0; 1872 for (i = 0; i < 4; i++) { 1873 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 1874 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 1875 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 1876 } 1877 } 1878 1879 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 1880 { 1881 X86CPU *cpu = X86_CPU(obj); 1882 CPUX86State *env = &cpu->env; 1883 char *value; 1884 int i; 1885 1886 value = g_malloc(48 + 1); 1887 for (i = 0; i < 48; i++) { 1888 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 1889 } 1890 value[48] = '\0'; 1891 return value; 1892 } 1893 1894 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 1895 Error **errp) 1896 { 1897 X86CPU *cpu = X86_CPU(obj); 1898 CPUX86State *env = &cpu->env; 1899 int c, len, i; 1900 1901 if (model_id == NULL) { 1902 model_id = ""; 1903 } 1904 len = strlen(model_id); 1905 memset(env->cpuid_model, 0, 48); 1906 for (i = 0; i < 48; i++) { 1907 if (i >= len) { 1908 c = '\0'; 1909 } else { 1910 c = (uint8_t)model_id[i]; 1911 } 1912 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 1913 } 1914 } 1915 1916 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 1917 void *opaque, Error **errp) 1918 { 1919 X86CPU *cpu = X86_CPU(obj); 1920 int64_t value; 1921 1922 value = cpu->env.tsc_khz * 1000; 1923 visit_type_int(v, name, &value, errp); 1924 } 1925 1926 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 1927 void *opaque, Error **errp) 1928 { 1929 X86CPU *cpu = X86_CPU(obj); 1930 const int64_t min = 0; 1931 const int64_t max = INT64_MAX; 1932 Error *local_err = NULL; 1933 int64_t value; 1934 1935 visit_type_int(v, name, &value, &local_err); 1936 if (local_err) { 1937 error_propagate(errp, local_err); 1938 return; 1939 } 1940 if (value < min || value > max) { 1941 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 1942 name ? name : "null", value, min, max); 1943 return; 1944 } 1945 1946 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 1947 } 1948 1949 /* Generic getter for "feature-words" and "filtered-features" properties */ 1950 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 1951 const char *name, void *opaque, 1952 Error **errp) 1953 { 1954 uint32_t *array = (uint32_t *)opaque; 1955 FeatureWord w; 1956 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 1957 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 1958 X86CPUFeatureWordInfoList *list = NULL; 1959 1960 for (w = 0; w < FEATURE_WORDS; w++) { 1961 FeatureWordInfo *wi = &feature_word_info[w]; 1962 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 1963 qwi->cpuid_input_eax = wi->cpuid_eax; 1964 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx; 1965 qwi->cpuid_input_ecx = wi->cpuid_ecx; 1966 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum; 1967 qwi->features = array[w]; 1968 1969 /* List will be in reverse order, but order shouldn't matter */ 1970 list_entries[w].next = list; 1971 list_entries[w].value = &word_infos[w]; 1972 list = &list_entries[w]; 1973 } 1974 1975 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 1976 } 1977 1978 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name, 1979 void *opaque, Error **errp) 1980 { 1981 X86CPU *cpu = X86_CPU(obj); 1982 int64_t value = cpu->hyperv_spinlock_attempts; 1983 1984 visit_type_int(v, name, &value, errp); 1985 } 1986 1987 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name, 1988 void *opaque, Error **errp) 1989 { 1990 const int64_t min = 0xFFF; 1991 const int64_t max = UINT_MAX; 1992 X86CPU *cpu = X86_CPU(obj); 1993 Error *err = NULL; 1994 int64_t value; 1995 1996 visit_type_int(v, name, &value, &err); 1997 if (err) { 1998 error_propagate(errp, err); 1999 return; 2000 } 2001 2002 if (value < min || value > max) { 2003 error_setg(errp, "Property %s.%s doesn't take value %" PRId64 2004 " (minimum: %" PRId64 ", maximum: %" PRId64 ")", 2005 object_get_typename(obj), name ? name : "null", 2006 value, min, max); 2007 return; 2008 } 2009 cpu->hyperv_spinlock_attempts = value; 2010 } 2011 2012 static const PropertyInfo qdev_prop_spinlocks = { 2013 .name = "int", 2014 .get = x86_get_hv_spinlocks, 2015 .set = x86_set_hv_spinlocks, 2016 }; 2017 2018 /* Convert all '_' in a feature string option name to '-', to make feature 2019 * name conform to QOM property naming rule, which uses '-' instead of '_'. 2020 */ 2021 static inline void feat2prop(char *s) 2022 { 2023 while ((s = strchr(s, '_'))) { 2024 *s = '-'; 2025 } 2026 } 2027 2028 /* Return the feature property name for a feature flag bit */ 2029 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 2030 { 2031 /* XSAVE components are automatically enabled by other features, 2032 * so return the original feature name instead 2033 */ 2034 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 2035 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 2036 2037 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 2038 x86_ext_save_areas[comp].bits) { 2039 w = x86_ext_save_areas[comp].feature; 2040 bitnr = ctz32(x86_ext_save_areas[comp].bits); 2041 } 2042 } 2043 2044 assert(bitnr < 32); 2045 assert(w < FEATURE_WORDS); 2046 return feature_word_info[w].feat_names[bitnr]; 2047 } 2048 2049 /* Compatibily hack to maintain legacy +-feat semantic, 2050 * where +-feat overwrites any feature set by 2051 * feat=on|feat even if the later is parsed after +-feat 2052 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 2053 */ 2054 static GList *plus_features, *minus_features; 2055 2056 static gint compare_string(gconstpointer a, gconstpointer b) 2057 { 2058 return g_strcmp0(a, b); 2059 } 2060 2061 /* Parse "+feature,-feature,feature=foo" CPU feature string 2062 */ 2063 static void x86_cpu_parse_featurestr(const char *typename, char *features, 2064 Error **errp) 2065 { 2066 char *featurestr; /* Single 'key=value" string being parsed */ 2067 static bool cpu_globals_initialized; 2068 bool ambiguous = false; 2069 2070 if (cpu_globals_initialized) { 2071 return; 2072 } 2073 cpu_globals_initialized = true; 2074 2075 if (!features) { 2076 return; 2077 } 2078 2079 for (featurestr = strtok(features, ","); 2080 featurestr; 2081 featurestr = strtok(NULL, ",")) { 2082 const char *name; 2083 const char *val = NULL; 2084 char *eq = NULL; 2085 char num[32]; 2086 GlobalProperty *prop; 2087 2088 /* Compatibility syntax: */ 2089 if (featurestr[0] == '+') { 2090 plus_features = g_list_append(plus_features, 2091 g_strdup(featurestr + 1)); 2092 continue; 2093 } else if (featurestr[0] == '-') { 2094 minus_features = g_list_append(minus_features, 2095 g_strdup(featurestr + 1)); 2096 continue; 2097 } 2098 2099 eq = strchr(featurestr, '='); 2100 if (eq) { 2101 *eq++ = 0; 2102 val = eq; 2103 } else { 2104 val = "on"; 2105 } 2106 2107 feat2prop(featurestr); 2108 name = featurestr; 2109 2110 if (g_list_find_custom(plus_features, name, compare_string)) { 2111 warn_report("Ambiguous CPU model string. " 2112 "Don't mix both \"+%s\" and \"%s=%s\"", 2113 name, name, val); 2114 ambiguous = true; 2115 } 2116 if (g_list_find_custom(minus_features, name, compare_string)) { 2117 warn_report("Ambiguous CPU model string. " 2118 "Don't mix both \"-%s\" and \"%s=%s\"", 2119 name, name, val); 2120 ambiguous = true; 2121 } 2122 2123 /* Special case: */ 2124 if (!strcmp(name, "tsc-freq")) { 2125 int ret; 2126 uint64_t tsc_freq; 2127 2128 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 2129 if (ret < 0 || tsc_freq > INT64_MAX) { 2130 error_setg(errp, "bad numerical value %s", val); 2131 return; 2132 } 2133 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 2134 val = num; 2135 name = "tsc-frequency"; 2136 } 2137 2138 prop = g_new0(typeof(*prop), 1); 2139 prop->driver = typename; 2140 prop->property = g_strdup(name); 2141 prop->value = g_strdup(val); 2142 prop->errp = &error_fatal; 2143 qdev_prop_register_global(prop); 2144 } 2145 2146 if (ambiguous) { 2147 warn_report("Compatibility of ambiguous CPU model " 2148 "strings won't be kept on future QEMU versions"); 2149 } 2150 } 2151 2152 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 2153 static int x86_cpu_filter_features(X86CPU *cpu); 2154 2155 /* Check for missing features that may prevent the CPU class from 2156 * running using the current machine and accelerator. 2157 */ 2158 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 2159 strList **missing_feats) 2160 { 2161 X86CPU *xc; 2162 FeatureWord w; 2163 Error *err = NULL; 2164 strList **next = missing_feats; 2165 2166 if (xcc->kvm_required && !kvm_enabled()) { 2167 strList *new = g_new0(strList, 1); 2168 new->value = g_strdup("kvm");; 2169 *missing_feats = new; 2170 return; 2171 } 2172 2173 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 2174 2175 x86_cpu_expand_features(xc, &err); 2176 if (err) { 2177 /* Errors at x86_cpu_expand_features should never happen, 2178 * but in case it does, just report the model as not 2179 * runnable at all using the "type" property. 2180 */ 2181 strList *new = g_new0(strList, 1); 2182 new->value = g_strdup("type"); 2183 *next = new; 2184 next = &new->next; 2185 } 2186 2187 x86_cpu_filter_features(xc); 2188 2189 for (w = 0; w < FEATURE_WORDS; w++) { 2190 uint32_t filtered = xc->filtered_features[w]; 2191 int i; 2192 for (i = 0; i < 32; i++) { 2193 if (filtered & (1UL << i)) { 2194 strList *new = g_new0(strList, 1); 2195 new->value = g_strdup(x86_cpu_feature_name(w, i)); 2196 *next = new; 2197 next = &new->next; 2198 } 2199 } 2200 } 2201 2202 object_unref(OBJECT(xc)); 2203 } 2204 2205 /* Print all cpuid feature names in featureset 2206 */ 2207 static void listflags(FILE *f, fprintf_function print, const char **featureset) 2208 { 2209 int bit; 2210 bool first = true; 2211 2212 for (bit = 0; bit < 32; bit++) { 2213 if (featureset[bit]) { 2214 print(f, "%s%s", first ? "" : " ", featureset[bit]); 2215 first = false; 2216 } 2217 } 2218 } 2219 2220 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 2221 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 2222 { 2223 ObjectClass *class_a = (ObjectClass *)a; 2224 ObjectClass *class_b = (ObjectClass *)b; 2225 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 2226 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 2227 const char *name_a, *name_b; 2228 2229 if (cc_a->ordering != cc_b->ordering) { 2230 return cc_a->ordering - cc_b->ordering; 2231 } else { 2232 name_a = object_class_get_name(class_a); 2233 name_b = object_class_get_name(class_b); 2234 return strcmp(name_a, name_b); 2235 } 2236 } 2237 2238 static GSList *get_sorted_cpu_model_list(void) 2239 { 2240 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 2241 list = g_slist_sort(list, x86_cpu_list_compare); 2242 return list; 2243 } 2244 2245 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 2246 { 2247 ObjectClass *oc = data; 2248 X86CPUClass *cc = X86_CPU_CLASS(oc); 2249 CPUListState *s = user_data; 2250 char *name = x86_cpu_class_get_model_name(cc); 2251 const char *desc = cc->model_description; 2252 if (!desc && cc->cpu_def) { 2253 desc = cc->cpu_def->model_id; 2254 } 2255 2256 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n", 2257 name, desc); 2258 g_free(name); 2259 } 2260 2261 /* list available CPU models and flags */ 2262 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf) 2263 { 2264 int i; 2265 CPUListState s = { 2266 .file = f, 2267 .cpu_fprintf = cpu_fprintf, 2268 }; 2269 GSList *list; 2270 2271 (*cpu_fprintf)(f, "Available CPUs:\n"); 2272 list = get_sorted_cpu_model_list(); 2273 g_slist_foreach(list, x86_cpu_list_entry, &s); 2274 g_slist_free(list); 2275 2276 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n"); 2277 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 2278 FeatureWordInfo *fw = &feature_word_info[i]; 2279 2280 (*cpu_fprintf)(f, " "); 2281 listflags(f, cpu_fprintf, fw->feat_names); 2282 (*cpu_fprintf)(f, "\n"); 2283 } 2284 } 2285 2286 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 2287 { 2288 ObjectClass *oc = data; 2289 X86CPUClass *cc = X86_CPU_CLASS(oc); 2290 CpuDefinitionInfoList **cpu_list = user_data; 2291 CpuDefinitionInfoList *entry; 2292 CpuDefinitionInfo *info; 2293 2294 info = g_malloc0(sizeof(*info)); 2295 info->name = x86_cpu_class_get_model_name(cc); 2296 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 2297 info->has_unavailable_features = true; 2298 info->q_typename = g_strdup(object_class_get_name(oc)); 2299 info->migration_safe = cc->migration_safe; 2300 info->has_migration_safe = true; 2301 info->q_static = cc->static_model; 2302 2303 entry = g_malloc0(sizeof(*entry)); 2304 entry->value = info; 2305 entry->next = *cpu_list; 2306 *cpu_list = entry; 2307 } 2308 2309 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp) 2310 { 2311 CpuDefinitionInfoList *cpu_list = NULL; 2312 GSList *list = get_sorted_cpu_model_list(); 2313 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 2314 g_slist_free(list); 2315 return cpu_list; 2316 } 2317 2318 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 2319 bool migratable_only) 2320 { 2321 FeatureWordInfo *wi = &feature_word_info[w]; 2322 uint32_t r; 2323 2324 if (kvm_enabled()) { 2325 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax, 2326 wi->cpuid_ecx, 2327 wi->cpuid_reg); 2328 } else if (tcg_enabled()) { 2329 r = wi->tcg_features; 2330 } else { 2331 return ~0; 2332 } 2333 if (migratable_only) { 2334 r &= x86_cpu_get_migratable_flags(w); 2335 } 2336 return r; 2337 } 2338 2339 static void x86_cpu_report_filtered_features(X86CPU *cpu) 2340 { 2341 FeatureWord w; 2342 2343 for (w = 0; w < FEATURE_WORDS; w++) { 2344 report_unavailable_features(w, cpu->filtered_features[w]); 2345 } 2346 } 2347 2348 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 2349 { 2350 PropValue *pv; 2351 for (pv = props; pv->prop; pv++) { 2352 if (!pv->value) { 2353 continue; 2354 } 2355 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 2356 &error_abort); 2357 } 2358 } 2359 2360 /* Load data from X86CPUDefinition into a X86CPU object 2361 */ 2362 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp) 2363 { 2364 CPUX86State *env = &cpu->env; 2365 const char *vendor; 2366 char host_vendor[CPUID_VENDOR_SZ + 1]; 2367 FeatureWord w; 2368 2369 /*NOTE: any property set by this function should be returned by 2370 * x86_cpu_static_props(), so static expansion of 2371 * query-cpu-model-expansion is always complete. 2372 */ 2373 2374 /* CPU models only set _minimum_ values for level/xlevel: */ 2375 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp); 2376 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp); 2377 2378 object_property_set_int(OBJECT(cpu), def->family, "family", errp); 2379 object_property_set_int(OBJECT(cpu), def->model, "model", errp); 2380 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp); 2381 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); 2382 for (w = 0; w < FEATURE_WORDS; w++) { 2383 env->features[w] = def->features[w]; 2384 } 2385 2386 /* Special cases not set in the X86CPUDefinition structs: */ 2387 if (kvm_enabled()) { 2388 if (!kvm_irqchip_in_kernel()) { 2389 x86_cpu_change_kvm_default("x2apic", "off"); 2390 } 2391 2392 x86_cpu_apply_props(cpu, kvm_default_props); 2393 } else if (tcg_enabled()) { 2394 x86_cpu_apply_props(cpu, tcg_default_props); 2395 } 2396 2397 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 2398 2399 /* sysenter isn't supported in compatibility mode on AMD, 2400 * syscall isn't supported in compatibility mode on Intel. 2401 * Normally we advertise the actual CPU vendor, but you can 2402 * override this using the 'vendor' property if you want to use 2403 * KVM's sysenter/syscall emulation in compatibility mode and 2404 * when doing cross vendor migration 2405 */ 2406 vendor = def->vendor; 2407 if (kvm_enabled()) { 2408 uint32_t ebx = 0, ecx = 0, edx = 0; 2409 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 2410 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 2411 vendor = host_vendor; 2412 } 2413 2414 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp); 2415 2416 } 2417 2418 /* Return a QDict containing keys for all properties that can be included 2419 * in static expansion of CPU models. All properties set by x86_cpu_load_def() 2420 * must be included in the dictionary. 2421 */ 2422 static QDict *x86_cpu_static_props(void) 2423 { 2424 FeatureWord w; 2425 int i; 2426 static const char *props[] = { 2427 "min-level", 2428 "min-xlevel", 2429 "family", 2430 "model", 2431 "stepping", 2432 "model-id", 2433 "vendor", 2434 "lmce", 2435 NULL, 2436 }; 2437 static QDict *d; 2438 2439 if (d) { 2440 return d; 2441 } 2442 2443 d = qdict_new(); 2444 for (i = 0; props[i]; i++) { 2445 qdict_put_obj(d, props[i], qnull()); 2446 } 2447 2448 for (w = 0; w < FEATURE_WORDS; w++) { 2449 FeatureWordInfo *fi = &feature_word_info[w]; 2450 int bit; 2451 for (bit = 0; bit < 32; bit++) { 2452 if (!fi->feat_names[bit]) { 2453 continue; 2454 } 2455 qdict_put_obj(d, fi->feat_names[bit], qnull()); 2456 } 2457 } 2458 2459 return d; 2460 } 2461 2462 /* Add an entry to @props dict, with the value for property. */ 2463 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 2464 { 2465 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 2466 &error_abort); 2467 2468 qdict_put_obj(props, prop, value); 2469 } 2470 2471 /* Convert CPU model data from X86CPU object to a property dictionary 2472 * that can recreate exactly the same CPU model. 2473 */ 2474 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 2475 { 2476 QDict *sprops = x86_cpu_static_props(); 2477 const QDictEntry *e; 2478 2479 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 2480 const char *prop = qdict_entry_key(e); 2481 x86_cpu_expand_prop(cpu, props, prop); 2482 } 2483 } 2484 2485 /* Convert CPU model data from X86CPU object to a property dictionary 2486 * that can recreate exactly the same CPU model, including every 2487 * writeable QOM property. 2488 */ 2489 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 2490 { 2491 ObjectPropertyIterator iter; 2492 ObjectProperty *prop; 2493 2494 object_property_iter_init(&iter, OBJECT(cpu)); 2495 while ((prop = object_property_iter_next(&iter))) { 2496 /* skip read-only or write-only properties */ 2497 if (!prop->get || !prop->set) { 2498 continue; 2499 } 2500 2501 /* "hotplugged" is the only property that is configurable 2502 * on the command-line but will be set differently on CPUs 2503 * created using "-cpu ... -smp ..." and by CPUs created 2504 * on the fly by x86_cpu_from_model() for querying. Skip it. 2505 */ 2506 if (!strcmp(prop->name, "hotplugged")) { 2507 continue; 2508 } 2509 x86_cpu_expand_prop(cpu, props, prop->name); 2510 } 2511 } 2512 2513 static void object_apply_props(Object *obj, QDict *props, Error **errp) 2514 { 2515 const QDictEntry *prop; 2516 Error *err = NULL; 2517 2518 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 2519 object_property_set_qobject(obj, qdict_entry_value(prop), 2520 qdict_entry_key(prop), &err); 2521 if (err) { 2522 break; 2523 } 2524 } 2525 2526 error_propagate(errp, err); 2527 } 2528 2529 /* Create X86CPU object according to model+props specification */ 2530 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 2531 { 2532 X86CPU *xc = NULL; 2533 X86CPUClass *xcc; 2534 Error *err = NULL; 2535 2536 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 2537 if (xcc == NULL) { 2538 error_setg(&err, "CPU model '%s' not found", model); 2539 goto out; 2540 } 2541 2542 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 2543 if (props) { 2544 object_apply_props(OBJECT(xc), props, &err); 2545 if (err) { 2546 goto out; 2547 } 2548 } 2549 2550 x86_cpu_expand_features(xc, &err); 2551 if (err) { 2552 goto out; 2553 } 2554 2555 out: 2556 if (err) { 2557 error_propagate(errp, err); 2558 object_unref(OBJECT(xc)); 2559 xc = NULL; 2560 } 2561 return xc; 2562 } 2563 2564 CpuModelExpansionInfo * 2565 arch_query_cpu_model_expansion(CpuModelExpansionType type, 2566 CpuModelInfo *model, 2567 Error **errp) 2568 { 2569 X86CPU *xc = NULL; 2570 Error *err = NULL; 2571 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 2572 QDict *props = NULL; 2573 const char *base_name; 2574 2575 xc = x86_cpu_from_model(model->name, 2576 model->has_props ? 2577 qobject_to_qdict(model->props) : 2578 NULL, &err); 2579 if (err) { 2580 goto out; 2581 } 2582 2583 props = qdict_new(); 2584 2585 switch (type) { 2586 case CPU_MODEL_EXPANSION_TYPE_STATIC: 2587 /* Static expansion will be based on "base" only */ 2588 base_name = "base"; 2589 x86_cpu_to_dict(xc, props); 2590 break; 2591 case CPU_MODEL_EXPANSION_TYPE_FULL: 2592 /* As we don't return every single property, full expansion needs 2593 * to keep the original model name+props, and add extra 2594 * properties on top of that. 2595 */ 2596 base_name = model->name; 2597 x86_cpu_to_dict_full(xc, props); 2598 break; 2599 default: 2600 error_setg(&err, "Unsupportted expansion type"); 2601 goto out; 2602 } 2603 2604 if (!props) { 2605 props = qdict_new(); 2606 } 2607 x86_cpu_to_dict(xc, props); 2608 2609 ret->model = g_new0(CpuModelInfo, 1); 2610 ret->model->name = g_strdup(base_name); 2611 ret->model->props = QOBJECT(props); 2612 ret->model->has_props = true; 2613 2614 out: 2615 object_unref(OBJECT(xc)); 2616 if (err) { 2617 error_propagate(errp, err); 2618 qapi_free_CpuModelExpansionInfo(ret); 2619 ret = NULL; 2620 } 2621 return ret; 2622 } 2623 2624 static gchar *x86_gdb_arch_name(CPUState *cs) 2625 { 2626 #ifdef TARGET_X86_64 2627 return g_strdup("i386:x86-64"); 2628 #else 2629 return g_strdup("i386"); 2630 #endif 2631 } 2632 2633 X86CPU *cpu_x86_init(const char *cpu_model) 2634 { 2635 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model)); 2636 } 2637 2638 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 2639 { 2640 X86CPUDefinition *cpudef = data; 2641 X86CPUClass *xcc = X86_CPU_CLASS(oc); 2642 2643 xcc->cpu_def = cpudef; 2644 xcc->migration_safe = true; 2645 } 2646 2647 static void x86_register_cpudef_type(X86CPUDefinition *def) 2648 { 2649 char *typename = x86_cpu_type_name(def->name); 2650 TypeInfo ti = { 2651 .name = typename, 2652 .parent = TYPE_X86_CPU, 2653 .class_init = x86_cpu_cpudef_class_init, 2654 .class_data = def, 2655 }; 2656 2657 /* AMD aliases are handled at runtime based on CPUID vendor, so 2658 * they shouldn't be set on the CPU model table. 2659 */ 2660 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 2661 2662 type_register(&ti); 2663 g_free(typename); 2664 } 2665 2666 #if !defined(CONFIG_USER_ONLY) 2667 2668 void cpu_clear_apic_feature(CPUX86State *env) 2669 { 2670 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 2671 } 2672 2673 #endif /* !CONFIG_USER_ONLY */ 2674 2675 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 2676 uint32_t *eax, uint32_t *ebx, 2677 uint32_t *ecx, uint32_t *edx) 2678 { 2679 X86CPU *cpu = x86_env_get_cpu(env); 2680 CPUState *cs = CPU(cpu); 2681 uint32_t pkg_offset; 2682 uint32_t limit; 2683 uint32_t signature[3]; 2684 2685 /* Calculate & apply limits for different index ranges */ 2686 if (index >= 0xC0000000) { 2687 limit = env->cpuid_xlevel2; 2688 } else if (index >= 0x80000000) { 2689 limit = env->cpuid_xlevel; 2690 } else if (index >= 0x40000000) { 2691 limit = 0x40000001; 2692 } else { 2693 limit = env->cpuid_level; 2694 } 2695 2696 if (index > limit) { 2697 /* Intel documentation states that invalid EAX input will 2698 * return the same information as EAX=cpuid_level 2699 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 2700 */ 2701 index = env->cpuid_level; 2702 } 2703 2704 switch(index) { 2705 case 0: 2706 *eax = env->cpuid_level; 2707 *ebx = env->cpuid_vendor1; 2708 *edx = env->cpuid_vendor2; 2709 *ecx = env->cpuid_vendor3; 2710 break; 2711 case 1: 2712 *eax = env->cpuid_version; 2713 *ebx = (cpu->apic_id << 24) | 2714 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 2715 *ecx = env->features[FEAT_1_ECX]; 2716 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 2717 *ecx |= CPUID_EXT_OSXSAVE; 2718 } 2719 *edx = env->features[FEAT_1_EDX]; 2720 if (cs->nr_cores * cs->nr_threads > 1) { 2721 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 2722 *edx |= CPUID_HT; 2723 } 2724 break; 2725 case 2: 2726 /* cache info: needed for Pentium Pro compatibility */ 2727 if (cpu->cache_info_passthrough) { 2728 host_cpuid(index, 0, eax, ebx, ecx, edx); 2729 break; 2730 } 2731 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 2732 *ebx = 0; 2733 if (!cpu->enable_l3_cache) { 2734 *ecx = 0; 2735 } else { 2736 *ecx = L3_N_DESCRIPTOR; 2737 } 2738 *edx = (L1D_DESCRIPTOR << 16) | \ 2739 (L1I_DESCRIPTOR << 8) | \ 2740 (L2_DESCRIPTOR); 2741 break; 2742 case 4: 2743 /* cache info: needed for Core compatibility */ 2744 if (cpu->cache_info_passthrough) { 2745 host_cpuid(index, count, eax, ebx, ecx, edx); 2746 *eax &= ~0xFC000000; 2747 } else { 2748 *eax = 0; 2749 switch (count) { 2750 case 0: /* L1 dcache info */ 2751 *eax |= CPUID_4_TYPE_DCACHE | \ 2752 CPUID_4_LEVEL(1) | \ 2753 CPUID_4_SELF_INIT_LEVEL; 2754 *ebx = (L1D_LINE_SIZE - 1) | \ 2755 ((L1D_PARTITIONS - 1) << 12) | \ 2756 ((L1D_ASSOCIATIVITY - 1) << 22); 2757 *ecx = L1D_SETS - 1; 2758 *edx = CPUID_4_NO_INVD_SHARING; 2759 break; 2760 case 1: /* L1 icache info */ 2761 *eax |= CPUID_4_TYPE_ICACHE | \ 2762 CPUID_4_LEVEL(1) | \ 2763 CPUID_4_SELF_INIT_LEVEL; 2764 *ebx = (L1I_LINE_SIZE - 1) | \ 2765 ((L1I_PARTITIONS - 1) << 12) | \ 2766 ((L1I_ASSOCIATIVITY - 1) << 22); 2767 *ecx = L1I_SETS - 1; 2768 *edx = CPUID_4_NO_INVD_SHARING; 2769 break; 2770 case 2: /* L2 cache info */ 2771 *eax |= CPUID_4_TYPE_UNIFIED | \ 2772 CPUID_4_LEVEL(2) | \ 2773 CPUID_4_SELF_INIT_LEVEL; 2774 if (cs->nr_threads > 1) { 2775 *eax |= (cs->nr_threads - 1) << 14; 2776 } 2777 *ebx = (L2_LINE_SIZE - 1) | \ 2778 ((L2_PARTITIONS - 1) << 12) | \ 2779 ((L2_ASSOCIATIVITY - 1) << 22); 2780 *ecx = L2_SETS - 1; 2781 *edx = CPUID_4_NO_INVD_SHARING; 2782 break; 2783 case 3: /* L3 cache info */ 2784 if (!cpu->enable_l3_cache) { 2785 *eax = 0; 2786 *ebx = 0; 2787 *ecx = 0; 2788 *edx = 0; 2789 break; 2790 } 2791 *eax |= CPUID_4_TYPE_UNIFIED | \ 2792 CPUID_4_LEVEL(3) | \ 2793 CPUID_4_SELF_INIT_LEVEL; 2794 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 2795 *eax |= ((1 << pkg_offset) - 1) << 14; 2796 *ebx = (L3_N_LINE_SIZE - 1) | \ 2797 ((L3_N_PARTITIONS - 1) << 12) | \ 2798 ((L3_N_ASSOCIATIVITY - 1) << 22); 2799 *ecx = L3_N_SETS - 1; 2800 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX; 2801 break; 2802 default: /* end of info */ 2803 *eax = 0; 2804 *ebx = 0; 2805 *ecx = 0; 2806 *edx = 0; 2807 break; 2808 } 2809 } 2810 2811 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 2812 if ((*eax & 31) && cs->nr_cores > 1) { 2813 *eax |= (cs->nr_cores - 1) << 26; 2814 } 2815 break; 2816 case 5: 2817 /* mwait info: needed for Core compatibility */ 2818 *eax = 0; /* Smallest monitor-line size in bytes */ 2819 *ebx = 0; /* Largest monitor-line size in bytes */ 2820 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 2821 *edx = 0; 2822 break; 2823 case 6: 2824 /* Thermal and Power Leaf */ 2825 *eax = env->features[FEAT_6_EAX]; 2826 *ebx = 0; 2827 *ecx = 0; 2828 *edx = 0; 2829 break; 2830 case 7: 2831 /* Structured Extended Feature Flags Enumeration Leaf */ 2832 if (count == 0) { 2833 *eax = 0; /* Maximum ECX value for sub-leaves */ 2834 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 2835 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 2836 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 2837 *ecx |= CPUID_7_0_ECX_OSPKE; 2838 } 2839 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 2840 } else { 2841 *eax = 0; 2842 *ebx = 0; 2843 *ecx = 0; 2844 *edx = 0; 2845 } 2846 break; 2847 case 9: 2848 /* Direct Cache Access Information Leaf */ 2849 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 2850 *ebx = 0; 2851 *ecx = 0; 2852 *edx = 0; 2853 break; 2854 case 0xA: 2855 /* Architectural Performance Monitoring Leaf */ 2856 if (kvm_enabled() && cpu->enable_pmu) { 2857 KVMState *s = cs->kvm_state; 2858 2859 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 2860 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 2861 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 2862 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 2863 } else { 2864 *eax = 0; 2865 *ebx = 0; 2866 *ecx = 0; 2867 *edx = 0; 2868 } 2869 break; 2870 case 0xB: 2871 /* Extended Topology Enumeration Leaf */ 2872 if (!cpu->enable_cpuid_0xb) { 2873 *eax = *ebx = *ecx = *edx = 0; 2874 break; 2875 } 2876 2877 *ecx = count & 0xff; 2878 *edx = cpu->apic_id; 2879 2880 switch (count) { 2881 case 0: 2882 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads); 2883 *ebx = cs->nr_threads; 2884 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 2885 break; 2886 case 1: 2887 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 2888 *ebx = cs->nr_cores * cs->nr_threads; 2889 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 2890 break; 2891 default: 2892 *eax = 0; 2893 *ebx = 0; 2894 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 2895 } 2896 2897 assert(!(*eax & ~0x1f)); 2898 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 2899 break; 2900 case 0xD: { 2901 /* Processor Extended State */ 2902 *eax = 0; 2903 *ebx = 0; 2904 *ecx = 0; 2905 *edx = 0; 2906 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 2907 break; 2908 } 2909 2910 if (count == 0) { 2911 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 2912 *eax = env->features[FEAT_XSAVE_COMP_LO]; 2913 *edx = env->features[FEAT_XSAVE_COMP_HI]; 2914 *ebx = *ecx; 2915 } else if (count == 1) { 2916 *eax = env->features[FEAT_XSAVE]; 2917 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 2918 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 2919 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 2920 *eax = esa->size; 2921 *ebx = esa->offset; 2922 } 2923 } 2924 break; 2925 } 2926 case 0x40000000: 2927 /* 2928 * CPUID code in kvm_arch_init_vcpu() ignores stuff 2929 * set here, but we restrict to TCG none the less. 2930 */ 2931 if (tcg_enabled() && cpu->expose_tcg) { 2932 memcpy(signature, "TCGTCGTCGTCG", 12); 2933 *eax = 0x40000001; 2934 *ebx = signature[0]; 2935 *ecx = signature[1]; 2936 *edx = signature[2]; 2937 } else { 2938 *eax = 0; 2939 *ebx = 0; 2940 *ecx = 0; 2941 *edx = 0; 2942 } 2943 break; 2944 case 0x40000001: 2945 *eax = 0; 2946 *ebx = 0; 2947 *ecx = 0; 2948 *edx = 0; 2949 break; 2950 case 0x80000000: 2951 *eax = env->cpuid_xlevel; 2952 *ebx = env->cpuid_vendor1; 2953 *edx = env->cpuid_vendor2; 2954 *ecx = env->cpuid_vendor3; 2955 break; 2956 case 0x80000001: 2957 *eax = env->cpuid_version; 2958 *ebx = 0; 2959 *ecx = env->features[FEAT_8000_0001_ECX]; 2960 *edx = env->features[FEAT_8000_0001_EDX]; 2961 2962 /* The Linux kernel checks for the CMPLegacy bit and 2963 * discards multiple thread information if it is set. 2964 * So don't set it here for Intel to make Linux guests happy. 2965 */ 2966 if (cs->nr_cores * cs->nr_threads > 1) { 2967 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 2968 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 2969 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 2970 *ecx |= 1 << 1; /* CmpLegacy bit */ 2971 } 2972 } 2973 break; 2974 case 0x80000002: 2975 case 0x80000003: 2976 case 0x80000004: 2977 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 2978 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 2979 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 2980 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 2981 break; 2982 case 0x80000005: 2983 /* cache info (L1 cache) */ 2984 if (cpu->cache_info_passthrough) { 2985 host_cpuid(index, 0, eax, ebx, ecx, edx); 2986 break; 2987 } 2988 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ 2989 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 2990 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ 2991 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 2992 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \ 2993 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE); 2994 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \ 2995 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE); 2996 break; 2997 case 0x80000006: 2998 /* cache info (L2 cache) */ 2999 if (cpu->cache_info_passthrough) { 3000 host_cpuid(index, 0, eax, ebx, ecx, edx); 3001 break; 3002 } 3003 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ 3004 (L2_DTLB_2M_ENTRIES << 16) | \ 3005 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ 3006 (L2_ITLB_2M_ENTRIES); 3007 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ 3008 (L2_DTLB_4K_ENTRIES << 16) | \ 3009 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ 3010 (L2_ITLB_4K_ENTRIES); 3011 *ecx = (L2_SIZE_KB_AMD << 16) | \ 3012 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \ 3013 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE); 3014 if (!cpu->enable_l3_cache) { 3015 *edx = ((L3_SIZE_KB / 512) << 18) | \ 3016 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \ 3017 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE); 3018 } else { 3019 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \ 3020 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \ 3021 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE); 3022 } 3023 break; 3024 case 0x80000007: 3025 *eax = 0; 3026 *ebx = 0; 3027 *ecx = 0; 3028 *edx = env->features[FEAT_8000_0007_EDX]; 3029 break; 3030 case 0x80000008: 3031 /* virtual & phys address size in low 2 bytes. */ 3032 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 3033 /* 64 bit processor */ 3034 *eax = cpu->phys_bits; /* configurable physical bits */ 3035 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 3036 *eax |= 0x00003900; /* 57 bits virtual */ 3037 } else { 3038 *eax |= 0x00003000; /* 48 bits virtual */ 3039 } 3040 } else { 3041 *eax = cpu->phys_bits; 3042 } 3043 *ebx = 0; 3044 *ecx = 0; 3045 *edx = 0; 3046 if (cs->nr_cores * cs->nr_threads > 1) { 3047 *ecx |= (cs->nr_cores * cs->nr_threads) - 1; 3048 } 3049 break; 3050 case 0x8000000A: 3051 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 3052 *eax = 0x00000001; /* SVM Revision */ 3053 *ebx = 0x00000010; /* nr of ASIDs */ 3054 *ecx = 0; 3055 *edx = env->features[FEAT_SVM]; /* optional features */ 3056 } else { 3057 *eax = 0; 3058 *ebx = 0; 3059 *ecx = 0; 3060 *edx = 0; 3061 } 3062 break; 3063 case 0xC0000000: 3064 *eax = env->cpuid_xlevel2; 3065 *ebx = 0; 3066 *ecx = 0; 3067 *edx = 0; 3068 break; 3069 case 0xC0000001: 3070 /* Support for VIA CPU's CPUID instruction */ 3071 *eax = env->cpuid_version; 3072 *ebx = 0; 3073 *ecx = 0; 3074 *edx = env->features[FEAT_C000_0001_EDX]; 3075 break; 3076 case 0xC0000002: 3077 case 0xC0000003: 3078 case 0xC0000004: 3079 /* Reserved for the future, and now filled with zero */ 3080 *eax = 0; 3081 *ebx = 0; 3082 *ecx = 0; 3083 *edx = 0; 3084 break; 3085 default: 3086 /* reserved values: zero */ 3087 *eax = 0; 3088 *ebx = 0; 3089 *ecx = 0; 3090 *edx = 0; 3091 break; 3092 } 3093 } 3094 3095 /* CPUClass::reset() */ 3096 static void x86_cpu_reset(CPUState *s) 3097 { 3098 X86CPU *cpu = X86_CPU(s); 3099 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 3100 CPUX86State *env = &cpu->env; 3101 target_ulong cr4; 3102 uint64_t xcr0; 3103 int i; 3104 3105 xcc->parent_reset(s); 3106 3107 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 3108 3109 env->old_exception = -1; 3110 3111 /* init to reset state */ 3112 3113 env->hflags2 |= HF2_GIF_MASK; 3114 3115 cpu_x86_update_cr0(env, 0x60000010); 3116 env->a20_mask = ~0x0; 3117 env->smbase = 0x30000; 3118 3119 env->idt.limit = 0xffff; 3120 env->gdt.limit = 0xffff; 3121 env->ldt.limit = 0xffff; 3122 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 3123 env->tr.limit = 0xffff; 3124 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 3125 3126 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 3127 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 3128 DESC_R_MASK | DESC_A_MASK); 3129 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 3130 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3131 DESC_A_MASK); 3132 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 3133 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3134 DESC_A_MASK); 3135 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 3136 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3137 DESC_A_MASK); 3138 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 3139 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3140 DESC_A_MASK); 3141 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 3142 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3143 DESC_A_MASK); 3144 3145 env->eip = 0xfff0; 3146 env->regs[R_EDX] = env->cpuid_version; 3147 3148 env->eflags = 0x2; 3149 3150 /* FPU init */ 3151 for (i = 0; i < 8; i++) { 3152 env->fptags[i] = 1; 3153 } 3154 cpu_set_fpuc(env, 0x37f); 3155 3156 env->mxcsr = 0x1f80; 3157 /* All units are in INIT state. */ 3158 env->xstate_bv = 0; 3159 3160 env->pat = 0x0007040600070406ULL; 3161 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 3162 3163 memset(env->dr, 0, sizeof(env->dr)); 3164 env->dr[6] = DR6_FIXED_1; 3165 env->dr[7] = DR7_FIXED_1; 3166 cpu_breakpoint_remove_all(s, BP_CPU); 3167 cpu_watchpoint_remove_all(s, BP_CPU); 3168 3169 cr4 = 0; 3170 xcr0 = XSTATE_FP_MASK; 3171 3172 #ifdef CONFIG_USER_ONLY 3173 /* Enable all the features for user-mode. */ 3174 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 3175 xcr0 |= XSTATE_SSE_MASK; 3176 } 3177 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 3178 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 3179 if (env->features[esa->feature] & esa->bits) { 3180 xcr0 |= 1ull << i; 3181 } 3182 } 3183 3184 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 3185 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 3186 } 3187 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 3188 cr4 |= CR4_FSGSBASE_MASK; 3189 } 3190 #endif 3191 3192 env->xcr0 = xcr0; 3193 cpu_x86_update_cr4(env, cr4); 3194 3195 /* 3196 * SDM 11.11.5 requires: 3197 * - IA32_MTRR_DEF_TYPE MSR.E = 0 3198 * - IA32_MTRR_PHYSMASKn.V = 0 3199 * All other bits are undefined. For simplification, zero it all. 3200 */ 3201 env->mtrr_deftype = 0; 3202 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 3203 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 3204 3205 #if !defined(CONFIG_USER_ONLY) 3206 /* We hard-wire the BSP to the first CPU. */ 3207 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 3208 3209 s->halted = !cpu_is_bsp(cpu); 3210 3211 if (kvm_enabled()) { 3212 kvm_arch_reset_vcpu(cpu); 3213 } 3214 #endif 3215 } 3216 3217 #ifndef CONFIG_USER_ONLY 3218 bool cpu_is_bsp(X86CPU *cpu) 3219 { 3220 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 3221 } 3222 3223 /* TODO: remove me, when reset over QOM tree is implemented */ 3224 static void x86_cpu_machine_reset_cb(void *opaque) 3225 { 3226 X86CPU *cpu = opaque; 3227 cpu_reset(CPU(cpu)); 3228 } 3229 #endif 3230 3231 static void mce_init(X86CPU *cpu) 3232 { 3233 CPUX86State *cenv = &cpu->env; 3234 unsigned int bank; 3235 3236 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 3237 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 3238 (CPUID_MCE | CPUID_MCA)) { 3239 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 3240 (cpu->enable_lmce ? MCG_LMCE_P : 0); 3241 cenv->mcg_ctl = ~(uint64_t)0; 3242 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 3243 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 3244 } 3245 } 3246 } 3247 3248 #ifndef CONFIG_USER_ONLY 3249 APICCommonClass *apic_get_class(void) 3250 { 3251 const char *apic_type = "apic"; 3252 3253 if (kvm_apic_in_kernel()) { 3254 apic_type = "kvm-apic"; 3255 } else if (xen_enabled()) { 3256 apic_type = "xen-apic"; 3257 } 3258 3259 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 3260 } 3261 3262 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 3263 { 3264 APICCommonState *apic; 3265 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 3266 3267 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class))); 3268 3269 object_property_add_child(OBJECT(cpu), "lapic", 3270 OBJECT(cpu->apic_state), &error_abort); 3271 object_unref(OBJECT(cpu->apic_state)); 3272 3273 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 3274 /* TODO: convert to link<> */ 3275 apic = APIC_COMMON(cpu->apic_state); 3276 apic->cpu = cpu; 3277 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 3278 } 3279 3280 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 3281 { 3282 APICCommonState *apic; 3283 static bool apic_mmio_map_once; 3284 3285 if (cpu->apic_state == NULL) { 3286 return; 3287 } 3288 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized", 3289 errp); 3290 3291 /* Map APIC MMIO area */ 3292 apic = APIC_COMMON(cpu->apic_state); 3293 if (!apic_mmio_map_once) { 3294 memory_region_add_subregion_overlap(get_system_memory(), 3295 apic->apicbase & 3296 MSR_IA32_APICBASE_BASE, 3297 &apic->io_memory, 3298 0x1000); 3299 apic_mmio_map_once = true; 3300 } 3301 } 3302 3303 static void x86_cpu_machine_done(Notifier *n, void *unused) 3304 { 3305 X86CPU *cpu = container_of(n, X86CPU, machine_done); 3306 MemoryRegion *smram = 3307 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 3308 3309 if (smram) { 3310 cpu->smram = g_new(MemoryRegion, 1); 3311 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 3312 smram, 0, 1ull << 32); 3313 memory_region_set_enabled(cpu->smram, true); 3314 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 3315 } 3316 } 3317 #else 3318 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 3319 { 3320 } 3321 #endif 3322 3323 /* Note: Only safe for use on x86(-64) hosts */ 3324 static uint32_t x86_host_phys_bits(void) 3325 { 3326 uint32_t eax; 3327 uint32_t host_phys_bits; 3328 3329 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 3330 if (eax >= 0x80000008) { 3331 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 3332 /* Note: According to AMD doc 25481 rev 2.34 they have a field 3333 * at 23:16 that can specify a maximum physical address bits for 3334 * the guest that can override this value; but I've not seen 3335 * anything with that set. 3336 */ 3337 host_phys_bits = eax & 0xff; 3338 } else { 3339 /* It's an odd 64 bit machine that doesn't have the leaf for 3340 * physical address bits; fall back to 36 that's most older 3341 * Intel. 3342 */ 3343 host_phys_bits = 36; 3344 } 3345 3346 return host_phys_bits; 3347 } 3348 3349 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 3350 { 3351 if (*min < value) { 3352 *min = value; 3353 } 3354 } 3355 3356 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 3357 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 3358 { 3359 CPUX86State *env = &cpu->env; 3360 FeatureWordInfo *fi = &feature_word_info[w]; 3361 uint32_t eax = fi->cpuid_eax; 3362 uint32_t region = eax & 0xF0000000; 3363 3364 if (!env->features[w]) { 3365 return; 3366 } 3367 3368 switch (region) { 3369 case 0x00000000: 3370 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 3371 break; 3372 case 0x80000000: 3373 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 3374 break; 3375 case 0xC0000000: 3376 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 3377 break; 3378 } 3379 } 3380 3381 /* Calculate XSAVE components based on the configured CPU feature flags */ 3382 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 3383 { 3384 CPUX86State *env = &cpu->env; 3385 int i; 3386 uint64_t mask; 3387 3388 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 3389 return; 3390 } 3391 3392 mask = 0; 3393 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 3394 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 3395 if (env->features[esa->feature] & esa->bits) { 3396 mask |= (1ULL << i); 3397 } 3398 } 3399 3400 env->features[FEAT_XSAVE_COMP_LO] = mask; 3401 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 3402 } 3403 3404 /***** Steps involved on loading and filtering CPUID data 3405 * 3406 * When initializing and realizing a CPU object, the steps 3407 * involved in setting up CPUID data are: 3408 * 3409 * 1) Loading CPU model definition (X86CPUDefinition). This is 3410 * implemented by x86_cpu_load_def() and should be completely 3411 * transparent, as it is done automatically by instance_init. 3412 * No code should need to look at X86CPUDefinition structs 3413 * outside instance_init. 3414 * 3415 * 2) CPU expansion. This is done by realize before CPUID 3416 * filtering, and will make sure host/accelerator data is 3417 * loaded for CPU models that depend on host capabilities 3418 * (e.g. "host"). Done by x86_cpu_expand_features(). 3419 * 3420 * 3) CPUID filtering. This initializes extra data related to 3421 * CPUID, and checks if the host supports all capabilities 3422 * required by the CPU. Runnability of a CPU model is 3423 * determined at this step. Done by x86_cpu_filter_features(). 3424 * 3425 * Some operations don't require all steps to be performed. 3426 * More precisely: 3427 * 3428 * - CPU instance creation (instance_init) will run only CPU 3429 * model loading. CPU expansion can't run at instance_init-time 3430 * because host/accelerator data may be not available yet. 3431 * - CPU realization will perform both CPU model expansion and CPUID 3432 * filtering, and return an error in case one of them fails. 3433 * - query-cpu-definitions needs to run all 3 steps. It needs 3434 * to run CPUID filtering, as the 'unavailable-features' 3435 * field is set based on the filtering results. 3436 * - The query-cpu-model-expansion QMP command only needs to run 3437 * CPU model loading and CPU expansion. It should not filter 3438 * any CPUID data based on host capabilities. 3439 */ 3440 3441 /* Expand CPU configuration data, based on configured features 3442 * and host/accelerator capabilities when appropriate. 3443 */ 3444 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 3445 { 3446 CPUX86State *env = &cpu->env; 3447 FeatureWord w; 3448 GList *l; 3449 Error *local_err = NULL; 3450 3451 /*TODO: Now cpu->max_features doesn't overwrite features 3452 * set using QOM properties, and we can convert 3453 * plus_features & minus_features to global properties 3454 * inside x86_cpu_parse_featurestr() too. 3455 */ 3456 if (cpu->max_features) { 3457 for (w = 0; w < FEATURE_WORDS; w++) { 3458 /* Override only features that weren't set explicitly 3459 * by the user. 3460 */ 3461 env->features[w] |= 3462 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 3463 ~env->user_features[w]; 3464 } 3465 } 3466 3467 for (l = plus_features; l; l = l->next) { 3468 const char *prop = l->data; 3469 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 3470 if (local_err) { 3471 goto out; 3472 } 3473 } 3474 3475 for (l = minus_features; l; l = l->next) { 3476 const char *prop = l->data; 3477 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 3478 if (local_err) { 3479 goto out; 3480 } 3481 } 3482 3483 if (!kvm_enabled() || !cpu->expose_kvm) { 3484 env->features[FEAT_KVM] = 0; 3485 } 3486 3487 x86_cpu_enable_xsave_components(cpu); 3488 3489 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 3490 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 3491 if (cpu->full_cpuid_auto_level) { 3492 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 3493 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 3494 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 3495 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 3496 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 3497 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 3498 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 3499 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 3500 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 3501 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 3502 /* SVM requires CPUID[0x8000000A] */ 3503 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 3504 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 3505 } 3506 } 3507 3508 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 3509 if (env->cpuid_level == UINT32_MAX) { 3510 env->cpuid_level = env->cpuid_min_level; 3511 } 3512 if (env->cpuid_xlevel == UINT32_MAX) { 3513 env->cpuid_xlevel = env->cpuid_min_xlevel; 3514 } 3515 if (env->cpuid_xlevel2 == UINT32_MAX) { 3516 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 3517 } 3518 3519 out: 3520 if (local_err != NULL) { 3521 error_propagate(errp, local_err); 3522 } 3523 } 3524 3525 /* 3526 * Finishes initialization of CPUID data, filters CPU feature 3527 * words based on host availability of each feature. 3528 * 3529 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 3530 */ 3531 static int x86_cpu_filter_features(X86CPU *cpu) 3532 { 3533 CPUX86State *env = &cpu->env; 3534 FeatureWord w; 3535 int rv = 0; 3536 3537 for (w = 0; w < FEATURE_WORDS; w++) { 3538 uint32_t host_feat = 3539 x86_cpu_get_supported_feature_word(w, false); 3540 uint32_t requested_features = env->features[w]; 3541 env->features[w] &= host_feat; 3542 cpu->filtered_features[w] = requested_features & ~env->features[w]; 3543 if (cpu->filtered_features[w]) { 3544 rv = 1; 3545 } 3546 } 3547 3548 return rv; 3549 } 3550 3551 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ 3552 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ 3553 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) 3554 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ 3555 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ 3556 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) 3557 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 3558 { 3559 CPUState *cs = CPU(dev); 3560 X86CPU *cpu = X86_CPU(dev); 3561 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 3562 CPUX86State *env = &cpu->env; 3563 Error *local_err = NULL; 3564 static bool ht_warned; 3565 3566 if (xcc->kvm_required && !kvm_enabled()) { 3567 char *name = x86_cpu_class_get_model_name(xcc); 3568 error_setg(&local_err, "CPU model '%s' requires KVM", name); 3569 g_free(name); 3570 goto out; 3571 } 3572 3573 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 3574 error_setg(errp, "apic-id property was not initialized properly"); 3575 return; 3576 } 3577 3578 x86_cpu_expand_features(cpu, &local_err); 3579 if (local_err) { 3580 goto out; 3581 } 3582 3583 if (x86_cpu_filter_features(cpu) && 3584 (cpu->check_cpuid || cpu->enforce_cpuid)) { 3585 x86_cpu_report_filtered_features(cpu); 3586 if (cpu->enforce_cpuid) { 3587 error_setg(&local_err, 3588 kvm_enabled() ? 3589 "Host doesn't support requested features" : 3590 "TCG doesn't support requested features"); 3591 goto out; 3592 } 3593 } 3594 3595 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 3596 * CPUID[1].EDX. 3597 */ 3598 if (IS_AMD_CPU(env)) { 3599 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 3600 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 3601 & CPUID_EXT2_AMD_ALIASES); 3602 } 3603 3604 /* For 64bit systems think about the number of physical bits to present. 3605 * ideally this should be the same as the host; anything other than matching 3606 * the host can cause incorrect guest behaviour. 3607 * QEMU used to pick the magic value of 40 bits that corresponds to 3608 * consumer AMD devices but nothing else. 3609 */ 3610 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 3611 if (kvm_enabled()) { 3612 uint32_t host_phys_bits = x86_host_phys_bits(); 3613 static bool warned; 3614 3615 if (cpu->host_phys_bits) { 3616 /* The user asked for us to use the host physical bits */ 3617 cpu->phys_bits = host_phys_bits; 3618 } 3619 3620 /* Print a warning if the user set it to a value that's not the 3621 * host value. 3622 */ 3623 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 3624 !warned) { 3625 warn_report("Host physical bits (%u)" 3626 " does not match phys-bits property (%u)", 3627 host_phys_bits, cpu->phys_bits); 3628 warned = true; 3629 } 3630 3631 if (cpu->phys_bits && 3632 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 3633 cpu->phys_bits < 32)) { 3634 error_setg(errp, "phys-bits should be between 32 and %u " 3635 " (but is %u)", 3636 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 3637 return; 3638 } 3639 } else { 3640 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 3641 error_setg(errp, "TCG only supports phys-bits=%u", 3642 TCG_PHYS_ADDR_BITS); 3643 return; 3644 } 3645 } 3646 /* 0 means it was not explicitly set by the user (or by machine 3647 * compat_props or by the host code above). In this case, the default 3648 * is the value used by TCG (40). 3649 */ 3650 if (cpu->phys_bits == 0) { 3651 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 3652 } 3653 } else { 3654 /* For 32 bit systems don't use the user set value, but keep 3655 * phys_bits consistent with what we tell the guest. 3656 */ 3657 if (cpu->phys_bits != 0) { 3658 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 3659 return; 3660 } 3661 3662 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 3663 cpu->phys_bits = 36; 3664 } else { 3665 cpu->phys_bits = 32; 3666 } 3667 } 3668 cpu_exec_realizefn(cs, &local_err); 3669 if (local_err != NULL) { 3670 error_propagate(errp, local_err); 3671 return; 3672 } 3673 3674 if (tcg_enabled()) { 3675 tcg_x86_init(); 3676 } 3677 3678 #ifndef CONFIG_USER_ONLY 3679 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 3680 3681 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) { 3682 x86_cpu_apic_create(cpu, &local_err); 3683 if (local_err != NULL) { 3684 goto out; 3685 } 3686 } 3687 #endif 3688 3689 mce_init(cpu); 3690 3691 #ifndef CONFIG_USER_ONLY 3692 if (tcg_enabled()) { 3693 AddressSpace *as_normal = address_space_init_shareable(cs->memory, 3694 "cpu-memory"); 3695 AddressSpace *as_smm = g_new(AddressSpace, 1); 3696 3697 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 3698 cpu->cpu_as_root = g_new(MemoryRegion, 1); 3699 3700 /* Outer container... */ 3701 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 3702 memory_region_set_enabled(cpu->cpu_as_root, true); 3703 3704 /* ... with two regions inside: normal system memory with low 3705 * priority, and... 3706 */ 3707 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 3708 get_system_memory(), 0, ~0ull); 3709 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 3710 memory_region_set_enabled(cpu->cpu_as_mem, true); 3711 address_space_init(as_smm, cpu->cpu_as_root, "CPU"); 3712 3713 cs->num_ases = 2; 3714 cpu_address_space_init(cs, as_normal, 0); 3715 cpu_address_space_init(cs, as_smm, 1); 3716 3717 /* ... SMRAM with higher priority, linked from /machine/smram. */ 3718 cpu->machine_done.notify = x86_cpu_machine_done; 3719 qemu_add_machine_init_done_notifier(&cpu->machine_done); 3720 } 3721 #endif 3722 3723 qemu_init_vcpu(cs); 3724 3725 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this 3726 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 3727 * based on inputs (sockets,cores,threads), it is still better to gives 3728 * users a warning. 3729 * 3730 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 3731 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 3732 */ 3733 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) { 3734 error_report("AMD CPU doesn't support hyperthreading. Please configure" 3735 " -smp options properly."); 3736 ht_warned = true; 3737 } 3738 3739 x86_cpu_apic_realize(cpu, &local_err); 3740 if (local_err != NULL) { 3741 goto out; 3742 } 3743 cpu_reset(cs); 3744 3745 xcc->parent_realize(dev, &local_err); 3746 3747 out: 3748 if (local_err != NULL) { 3749 error_propagate(errp, local_err); 3750 return; 3751 } 3752 } 3753 3754 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) 3755 { 3756 X86CPU *cpu = X86_CPU(dev); 3757 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 3758 Error *local_err = NULL; 3759 3760 #ifndef CONFIG_USER_ONLY 3761 cpu_remove_sync(CPU(dev)); 3762 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 3763 #endif 3764 3765 if (cpu->apic_state) { 3766 object_unparent(OBJECT(cpu->apic_state)); 3767 cpu->apic_state = NULL; 3768 } 3769 3770 xcc->parent_unrealize(dev, &local_err); 3771 if (local_err != NULL) { 3772 error_propagate(errp, local_err); 3773 return; 3774 } 3775 } 3776 3777 typedef struct BitProperty { 3778 FeatureWord w; 3779 uint32_t mask; 3780 } BitProperty; 3781 3782 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 3783 void *opaque, Error **errp) 3784 { 3785 X86CPU *cpu = X86_CPU(obj); 3786 BitProperty *fp = opaque; 3787 uint32_t f = cpu->env.features[fp->w]; 3788 bool value = (f & fp->mask) == fp->mask; 3789 visit_type_bool(v, name, &value, errp); 3790 } 3791 3792 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 3793 void *opaque, Error **errp) 3794 { 3795 DeviceState *dev = DEVICE(obj); 3796 X86CPU *cpu = X86_CPU(obj); 3797 BitProperty *fp = opaque; 3798 Error *local_err = NULL; 3799 bool value; 3800 3801 if (dev->realized) { 3802 qdev_prop_set_after_realize(dev, name, errp); 3803 return; 3804 } 3805 3806 visit_type_bool(v, name, &value, &local_err); 3807 if (local_err) { 3808 error_propagate(errp, local_err); 3809 return; 3810 } 3811 3812 if (value) { 3813 cpu->env.features[fp->w] |= fp->mask; 3814 } else { 3815 cpu->env.features[fp->w] &= ~fp->mask; 3816 } 3817 cpu->env.user_features[fp->w] |= fp->mask; 3818 } 3819 3820 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 3821 void *opaque) 3822 { 3823 BitProperty *prop = opaque; 3824 g_free(prop); 3825 } 3826 3827 /* Register a boolean property to get/set a single bit in a uint32_t field. 3828 * 3829 * The same property name can be registered multiple times to make it affect 3830 * multiple bits in the same FeatureWord. In that case, the getter will return 3831 * true only if all bits are set. 3832 */ 3833 static void x86_cpu_register_bit_prop(X86CPU *cpu, 3834 const char *prop_name, 3835 FeatureWord w, 3836 int bitnr) 3837 { 3838 BitProperty *fp; 3839 ObjectProperty *op; 3840 uint32_t mask = (1UL << bitnr); 3841 3842 op = object_property_find(OBJECT(cpu), prop_name, NULL); 3843 if (op) { 3844 fp = op->opaque; 3845 assert(fp->w == w); 3846 fp->mask |= mask; 3847 } else { 3848 fp = g_new0(BitProperty, 1); 3849 fp->w = w; 3850 fp->mask = mask; 3851 object_property_add(OBJECT(cpu), prop_name, "bool", 3852 x86_cpu_get_bit_prop, 3853 x86_cpu_set_bit_prop, 3854 x86_cpu_release_bit_prop, fp, &error_abort); 3855 } 3856 } 3857 3858 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 3859 FeatureWord w, 3860 int bitnr) 3861 { 3862 FeatureWordInfo *fi = &feature_word_info[w]; 3863 const char *name = fi->feat_names[bitnr]; 3864 3865 if (!name) { 3866 return; 3867 } 3868 3869 /* Property names should use "-" instead of "_". 3870 * Old names containing underscores are registered as aliases 3871 * using object_property_add_alias() 3872 */ 3873 assert(!strchr(name, '_')); 3874 /* aliases don't use "|" delimiters anymore, they are registered 3875 * manually using object_property_add_alias() */ 3876 assert(!strchr(name, '|')); 3877 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 3878 } 3879 3880 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 3881 { 3882 X86CPU *cpu = X86_CPU(cs); 3883 CPUX86State *env = &cpu->env; 3884 GuestPanicInformation *panic_info = NULL; 3885 3886 if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) { 3887 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 3888 3889 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 3890 3891 assert(HV_X64_MSR_CRASH_PARAMS >= 5); 3892 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 3893 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 3894 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 3895 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 3896 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 3897 } 3898 3899 return panic_info; 3900 } 3901 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 3902 const char *name, void *opaque, 3903 Error **errp) 3904 { 3905 CPUState *cs = CPU(obj); 3906 GuestPanicInformation *panic_info; 3907 3908 if (!cs->crash_occurred) { 3909 error_setg(errp, "No crash occured"); 3910 return; 3911 } 3912 3913 panic_info = x86_cpu_get_crash_info(cs); 3914 if (panic_info == NULL) { 3915 error_setg(errp, "No crash information"); 3916 return; 3917 } 3918 3919 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 3920 errp); 3921 qapi_free_GuestPanicInformation(panic_info); 3922 } 3923 3924 static void x86_cpu_initfn(Object *obj) 3925 { 3926 CPUState *cs = CPU(obj); 3927 X86CPU *cpu = X86_CPU(obj); 3928 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 3929 CPUX86State *env = &cpu->env; 3930 FeatureWord w; 3931 3932 cs->env_ptr = env; 3933 3934 object_property_add(obj, "family", "int", 3935 x86_cpuid_version_get_family, 3936 x86_cpuid_version_set_family, NULL, NULL, NULL); 3937 object_property_add(obj, "model", "int", 3938 x86_cpuid_version_get_model, 3939 x86_cpuid_version_set_model, NULL, NULL, NULL); 3940 object_property_add(obj, "stepping", "int", 3941 x86_cpuid_version_get_stepping, 3942 x86_cpuid_version_set_stepping, NULL, NULL, NULL); 3943 object_property_add_str(obj, "vendor", 3944 x86_cpuid_get_vendor, 3945 x86_cpuid_set_vendor, NULL); 3946 object_property_add_str(obj, "model-id", 3947 x86_cpuid_get_model_id, 3948 x86_cpuid_set_model_id, NULL); 3949 object_property_add(obj, "tsc-frequency", "int", 3950 x86_cpuid_get_tsc_freq, 3951 x86_cpuid_set_tsc_freq, NULL, NULL, NULL); 3952 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 3953 x86_cpu_get_feature_words, 3954 NULL, NULL, (void *)env->features, NULL); 3955 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 3956 x86_cpu_get_feature_words, 3957 NULL, NULL, (void *)cpu->filtered_features, NULL); 3958 3959 object_property_add(obj, "crash-information", "GuestPanicInformation", 3960 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); 3961 3962 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY; 3963 3964 for (w = 0; w < FEATURE_WORDS; w++) { 3965 int bitnr; 3966 3967 for (bitnr = 0; bitnr < 32; bitnr++) { 3968 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 3969 } 3970 } 3971 3972 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort); 3973 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort); 3974 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort); 3975 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort); 3976 object_property_add_alias(obj, "xd", obj, "nx", &error_abort); 3977 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort); 3978 object_property_add_alias(obj, "i64", obj, "lm", &error_abort); 3979 3980 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort); 3981 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort); 3982 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort); 3983 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort); 3984 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort); 3985 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort); 3986 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort); 3987 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort); 3988 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort); 3989 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort); 3990 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort); 3991 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort); 3992 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort); 3993 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort); 3994 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort); 3995 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort); 3996 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort); 3997 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort); 3998 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort); 3999 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort); 4000 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort); 4001 4002 if (xcc->cpu_def) { 4003 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort); 4004 } 4005 } 4006 4007 static int64_t x86_cpu_get_arch_id(CPUState *cs) 4008 { 4009 X86CPU *cpu = X86_CPU(cs); 4010 4011 return cpu->apic_id; 4012 } 4013 4014 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 4015 { 4016 X86CPU *cpu = X86_CPU(cs); 4017 4018 return cpu->env.cr[0] & CR0_PG_MASK; 4019 } 4020 4021 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 4022 { 4023 X86CPU *cpu = X86_CPU(cs); 4024 4025 cpu->env.eip = value; 4026 } 4027 4028 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 4029 { 4030 X86CPU *cpu = X86_CPU(cs); 4031 4032 cpu->env.eip = tb->pc - tb->cs_base; 4033 } 4034 4035 static bool x86_cpu_has_work(CPUState *cs) 4036 { 4037 X86CPU *cpu = X86_CPU(cs); 4038 CPUX86State *env = &cpu->env; 4039 4040 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD | 4041 CPU_INTERRUPT_POLL)) && 4042 (env->eflags & IF_MASK)) || 4043 (cs->interrupt_request & (CPU_INTERRUPT_NMI | 4044 CPU_INTERRUPT_INIT | 4045 CPU_INTERRUPT_SIPI | 4046 CPU_INTERRUPT_MCE)) || 4047 ((cs->interrupt_request & CPU_INTERRUPT_SMI) && 4048 !(env->hflags & HF_SMM_MASK)); 4049 } 4050 4051 static Property x86_cpu_properties[] = { 4052 #ifdef CONFIG_USER_ONLY 4053 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 4054 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 4055 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 4056 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 4057 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 4058 #else 4059 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 4060 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 4061 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 4062 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 4063 #endif 4064 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 4065 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 4066 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks }, 4067 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false), 4068 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false), 4069 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false), 4070 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false), 4071 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false), 4072 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false), 4073 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false), 4074 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false), 4075 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false), 4076 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 4077 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 4078 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 4079 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 4080 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 4081 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 4082 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 4083 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 4084 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 4085 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 4086 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 4087 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 4088 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 4089 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 4090 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 4091 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 4092 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 4093 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 4094 false), 4095 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 4096 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 4097 DEFINE_PROP_END_OF_LIST() 4098 }; 4099 4100 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 4101 { 4102 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4103 CPUClass *cc = CPU_CLASS(oc); 4104 DeviceClass *dc = DEVICE_CLASS(oc); 4105 4106 xcc->parent_realize = dc->realize; 4107 xcc->parent_unrealize = dc->unrealize; 4108 dc->realize = x86_cpu_realizefn; 4109 dc->unrealize = x86_cpu_unrealizefn; 4110 dc->props = x86_cpu_properties; 4111 4112 xcc->parent_reset = cc->reset; 4113 cc->reset = x86_cpu_reset; 4114 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 4115 4116 cc->class_by_name = x86_cpu_class_by_name; 4117 cc->parse_features = x86_cpu_parse_featurestr; 4118 cc->has_work = x86_cpu_has_work; 4119 #ifdef CONFIG_TCG 4120 cc->do_interrupt = x86_cpu_do_interrupt; 4121 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 4122 #endif 4123 cc->dump_state = x86_cpu_dump_state; 4124 cc->get_crash_info = x86_cpu_get_crash_info; 4125 cc->set_pc = x86_cpu_set_pc; 4126 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 4127 cc->gdb_read_register = x86_cpu_gdb_read_register; 4128 cc->gdb_write_register = x86_cpu_gdb_write_register; 4129 cc->get_arch_id = x86_cpu_get_arch_id; 4130 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 4131 #ifdef CONFIG_USER_ONLY 4132 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault; 4133 #else 4134 cc->asidx_from_attrs = x86_asidx_from_attrs; 4135 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 4136 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; 4137 cc->write_elf64_note = x86_cpu_write_elf64_note; 4138 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 4139 cc->write_elf32_note = x86_cpu_write_elf32_note; 4140 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 4141 cc->vmsd = &vmstate_x86_cpu; 4142 #endif 4143 cc->gdb_arch_name = x86_gdb_arch_name; 4144 #ifdef TARGET_X86_64 4145 cc->gdb_core_xml_file = "i386-64bit.xml"; 4146 cc->gdb_num_core_regs = 57; 4147 #else 4148 cc->gdb_core_xml_file = "i386-32bit.xml"; 4149 cc->gdb_num_core_regs = 41; 4150 #endif 4151 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 4152 cc->debug_excp_handler = breakpoint_handler; 4153 #endif 4154 cc->cpu_exec_enter = x86_cpu_exec_enter; 4155 cc->cpu_exec_exit = x86_cpu_exec_exit; 4156 4157 dc->user_creatable = true; 4158 } 4159 4160 static const TypeInfo x86_cpu_type_info = { 4161 .name = TYPE_X86_CPU, 4162 .parent = TYPE_CPU, 4163 .instance_size = sizeof(X86CPU), 4164 .instance_init = x86_cpu_initfn, 4165 .abstract = true, 4166 .class_size = sizeof(X86CPUClass), 4167 .class_init = x86_cpu_common_class_init, 4168 }; 4169 4170 4171 /* "base" CPU model, used by query-cpu-model-expansion */ 4172 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 4173 { 4174 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4175 4176 xcc->static_model = true; 4177 xcc->migration_safe = true; 4178 xcc->model_description = "base CPU model type with no features enabled"; 4179 xcc->ordering = 8; 4180 } 4181 4182 static const TypeInfo x86_base_cpu_type_info = { 4183 .name = X86_CPU_TYPE_NAME("base"), 4184 .parent = TYPE_X86_CPU, 4185 .class_init = x86_cpu_base_class_init, 4186 }; 4187 4188 static void x86_cpu_register_types(void) 4189 { 4190 int i; 4191 4192 type_register_static(&x86_cpu_type_info); 4193 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 4194 x86_register_cpudef_type(&builtin_x86_defs[i]); 4195 } 4196 type_register_static(&max_x86_cpu_type_info); 4197 type_register_static(&x86_base_cpu_type_info); 4198 #ifdef CONFIG_KVM 4199 type_register_static(&host_x86_cpu_type_info); 4200 #endif 4201 } 4202 4203 type_init(x86_cpu_register_types) 4204