1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/cutils.h" 21 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "sysemu/kvm.h" 25 #include "sysemu/cpus.h" 26 #include "kvm_i386.h" 27 28 #include "qemu/error-report.h" 29 #include "qemu/option.h" 30 #include "qemu/config-file.h" 31 #include "qapi/qmp/qerror.h" 32 #include "qapi/qmp/types.h" 33 34 #include "qapi-types.h" 35 #include "qapi-visit.h" 36 #include "qapi/visitor.h" 37 #include "qom/qom-qobject.h" 38 #include "sysemu/arch_init.h" 39 40 #if defined(CONFIG_KVM) 41 #include <linux/kvm_para.h> 42 #endif 43 44 #include "sysemu/sysemu.h" 45 #include "hw/qdev-properties.h" 46 #include "hw/i386/topology.h" 47 #ifndef CONFIG_USER_ONLY 48 #include "exec/address-spaces.h" 49 #include "hw/hw.h" 50 #include "hw/xen/xen.h" 51 #include "hw/i386/apic_internal.h" 52 #endif 53 54 55 /* Cache topology CPUID constants: */ 56 57 /* CPUID Leaf 2 Descriptors */ 58 59 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c 60 #define CPUID_2_L1I_32KB_8WAY_64B 0x30 61 #define CPUID_2_L2_2MB_8WAY_64B 0x7d 62 #define CPUID_2_L3_16MB_16WAY_64B 0x4d 63 64 65 /* CPUID Leaf 4 constants: */ 66 67 /* EAX: */ 68 #define CPUID_4_TYPE_DCACHE 1 69 #define CPUID_4_TYPE_ICACHE 2 70 #define CPUID_4_TYPE_UNIFIED 3 71 72 #define CPUID_4_LEVEL(l) ((l) << 5) 73 74 #define CPUID_4_SELF_INIT_LEVEL (1 << 8) 75 #define CPUID_4_FULLY_ASSOC (1 << 9) 76 77 /* EDX: */ 78 #define CPUID_4_NO_INVD_SHARING (1 << 0) 79 #define CPUID_4_INCLUSIVE (1 << 1) 80 #define CPUID_4_COMPLEX_IDX (1 << 2) 81 82 #define ASSOC_FULL 0xFF 83 84 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 85 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 86 a == 2 ? 0x2 : \ 87 a == 4 ? 0x4 : \ 88 a == 8 ? 0x6 : \ 89 a == 16 ? 0x8 : \ 90 a == 32 ? 0xA : \ 91 a == 48 ? 0xB : \ 92 a == 64 ? 0xC : \ 93 a == 96 ? 0xD : \ 94 a == 128 ? 0xE : \ 95 a == ASSOC_FULL ? 0xF : \ 96 0 /* invalid value */) 97 98 99 /* Definitions of the hardcoded cache entries we expose: */ 100 101 /* L1 data cache: */ 102 #define L1D_LINE_SIZE 64 103 #define L1D_ASSOCIATIVITY 8 104 #define L1D_SETS 64 105 #define L1D_PARTITIONS 1 106 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */ 107 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B 108 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 109 #define L1D_LINES_PER_TAG 1 110 #define L1D_SIZE_KB_AMD 64 111 #define L1D_ASSOCIATIVITY_AMD 2 112 113 /* L1 instruction cache: */ 114 #define L1I_LINE_SIZE 64 115 #define L1I_ASSOCIATIVITY 8 116 #define L1I_SETS 64 117 #define L1I_PARTITIONS 1 118 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */ 119 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B 120 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 121 #define L1I_LINES_PER_TAG 1 122 #define L1I_SIZE_KB_AMD 64 123 #define L1I_ASSOCIATIVITY_AMD 2 124 125 /* Level 2 unified cache: */ 126 #define L2_LINE_SIZE 64 127 #define L2_ASSOCIATIVITY 16 128 #define L2_SETS 4096 129 #define L2_PARTITIONS 1 130 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */ 131 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 132 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B 133 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 134 #define L2_LINES_PER_TAG 1 135 #define L2_SIZE_KB_AMD 512 136 137 /* Level 3 unified cache: */ 138 #define L3_SIZE_KB 0 /* disabled */ 139 #define L3_ASSOCIATIVITY 0 /* disabled */ 140 #define L3_LINES_PER_TAG 0 /* disabled */ 141 #define L3_LINE_SIZE 0 /* disabled */ 142 #define L3_N_LINE_SIZE 64 143 #define L3_N_ASSOCIATIVITY 16 144 #define L3_N_SETS 16384 145 #define L3_N_PARTITIONS 1 146 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B 147 #define L3_N_LINES_PER_TAG 1 148 #define L3_N_SIZE_KB_AMD 16384 149 150 /* TLB definitions: */ 151 152 #define L1_DTLB_2M_ASSOC 1 153 #define L1_DTLB_2M_ENTRIES 255 154 #define L1_DTLB_4K_ASSOC 1 155 #define L1_DTLB_4K_ENTRIES 255 156 157 #define L1_ITLB_2M_ASSOC 1 158 #define L1_ITLB_2M_ENTRIES 255 159 #define L1_ITLB_4K_ASSOC 1 160 #define L1_ITLB_4K_ENTRIES 255 161 162 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 163 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 164 #define L2_DTLB_4K_ASSOC 4 165 #define L2_DTLB_4K_ENTRIES 512 166 167 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 168 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 169 #define L2_ITLB_4K_ASSOC 4 170 #define L2_ITLB_4K_ENTRIES 512 171 172 173 174 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 175 uint32_t vendor2, uint32_t vendor3) 176 { 177 int i; 178 for (i = 0; i < 4; i++) { 179 dst[i] = vendor1 >> (8 * i); 180 dst[i + 4] = vendor2 >> (8 * i); 181 dst[i + 8] = vendor3 >> (8 * i); 182 } 183 dst[CPUID_VENDOR_SZ] = '\0'; 184 } 185 186 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 187 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 188 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 189 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 190 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 191 CPUID_PSE36 | CPUID_FXSR) 192 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 193 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 194 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 195 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 196 CPUID_PAE | CPUID_SEP | CPUID_APIC) 197 198 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 199 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 200 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 201 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 202 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 203 /* partly implemented: 204 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 205 /* missing: 206 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 207 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 208 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 209 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 210 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 211 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR) 212 /* missing: 213 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 214 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 215 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 216 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 217 CPUID_EXT_F16C, CPUID_EXT_RDRAND */ 218 219 #ifdef TARGET_X86_64 220 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 221 #else 222 #define TCG_EXT2_X86_64_FEATURES 0 223 #endif 224 225 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 226 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 227 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 228 TCG_EXT2_X86_64_FEATURES) 229 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 230 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 231 #define TCG_EXT4_FEATURES 0 232 #define TCG_SVM_FEATURES 0 233 #define TCG_KVM_FEATURES 0 234 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 235 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 236 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 237 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 238 CPUID_7_0_EBX_ERMS) 239 /* missing: 240 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 241 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 242 CPUID_7_0_EBX_RDSEED */ 243 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \ 244 CPUID_7_0_ECX_LA57) 245 #define TCG_7_0_EDX_FEATURES 0 246 #define TCG_APM_FEATURES 0 247 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 248 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 249 /* missing: 250 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 251 252 typedef struct FeatureWordInfo { 253 /* feature flags names are taken from "Intel Processor Identification and 254 * the CPUID Instruction" and AMD's "CPUID Specification". 255 * In cases of disagreement between feature naming conventions, 256 * aliases may be added. 257 */ 258 const char *feat_names[32]; 259 uint32_t cpuid_eax; /* Input EAX for CPUID */ 260 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */ 261 uint32_t cpuid_ecx; /* Input ECX value for CPUID */ 262 int cpuid_reg; /* output register (R_* constant) */ 263 uint32_t tcg_features; /* Feature flags supported by TCG */ 264 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */ 265 uint32_t migratable_flags; /* Feature flags known to be migratable */ 266 } FeatureWordInfo; 267 268 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 269 [FEAT_1_EDX] = { 270 .feat_names = { 271 "fpu", "vme", "de", "pse", 272 "tsc", "msr", "pae", "mce", 273 "cx8", "apic", NULL, "sep", 274 "mtrr", "pge", "mca", "cmov", 275 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 276 NULL, "ds" /* Intel dts */, "acpi", "mmx", 277 "fxsr", "sse", "sse2", "ss", 278 "ht" /* Intel htt */, "tm", "ia64", "pbe", 279 }, 280 .cpuid_eax = 1, .cpuid_reg = R_EDX, 281 .tcg_features = TCG_FEATURES, 282 }, 283 [FEAT_1_ECX] = { 284 .feat_names = { 285 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 286 "ds-cpl", "vmx", "smx", "est", 287 "tm2", "ssse3", "cid", NULL, 288 "fma", "cx16", "xtpr", "pdcm", 289 NULL, "pcid", "dca", "sse4.1", 290 "sse4.2", "x2apic", "movbe", "popcnt", 291 "tsc-deadline", "aes", "xsave", "osxsave", 292 "avx", "f16c", "rdrand", "hypervisor", 293 }, 294 .cpuid_eax = 1, .cpuid_reg = R_ECX, 295 .tcg_features = TCG_EXT_FEATURES, 296 }, 297 /* Feature names that are already defined on feature_name[] but 298 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 299 * names on feat_names below. They are copied automatically 300 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 301 */ 302 [FEAT_8000_0001_EDX] = { 303 .feat_names = { 304 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 305 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 306 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 307 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 308 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 309 "nx", NULL, "mmxext", NULL /* mmx */, 310 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 311 NULL, "lm", "3dnowext", "3dnow", 312 }, 313 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX, 314 .tcg_features = TCG_EXT2_FEATURES, 315 }, 316 [FEAT_8000_0001_ECX] = { 317 .feat_names = { 318 "lahf-lm", "cmp-legacy", "svm", "extapic", 319 "cr8legacy", "abm", "sse4a", "misalignsse", 320 "3dnowprefetch", "osvw", "ibs", "xop", 321 "skinit", "wdt", NULL, "lwp", 322 "fma4", "tce", NULL, "nodeid-msr", 323 NULL, "tbm", "topoext", "perfctr-core", 324 "perfctr-nb", NULL, NULL, NULL, 325 NULL, NULL, NULL, NULL, 326 }, 327 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX, 328 .tcg_features = TCG_EXT3_FEATURES, 329 }, 330 [FEAT_C000_0001_EDX] = { 331 .feat_names = { 332 NULL, NULL, "xstore", "xstore-en", 333 NULL, NULL, "xcrypt", "xcrypt-en", 334 "ace2", "ace2-en", "phe", "phe-en", 335 "pmm", "pmm-en", NULL, NULL, 336 NULL, NULL, NULL, NULL, 337 NULL, NULL, NULL, NULL, 338 NULL, NULL, NULL, NULL, 339 NULL, NULL, NULL, NULL, 340 }, 341 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX, 342 .tcg_features = TCG_EXT4_FEATURES, 343 }, 344 [FEAT_KVM] = { 345 .feat_names = { 346 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 347 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 348 NULL, NULL, NULL, NULL, 349 NULL, NULL, NULL, NULL, 350 NULL, NULL, NULL, NULL, 351 NULL, NULL, NULL, NULL, 352 "kvmclock-stable-bit", NULL, NULL, NULL, 353 NULL, NULL, NULL, NULL, 354 }, 355 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX, 356 .tcg_features = TCG_KVM_FEATURES, 357 }, 358 [FEAT_HYPERV_EAX] = { 359 .feat_names = { 360 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 361 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 362 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 363 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 364 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 365 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 366 NULL, NULL, NULL, NULL, 367 NULL, NULL, NULL, NULL, 368 NULL, NULL, NULL, NULL, 369 NULL, NULL, NULL, NULL, 370 NULL, NULL, NULL, NULL, 371 }, 372 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX, 373 }, 374 [FEAT_HYPERV_EBX] = { 375 .feat_names = { 376 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 377 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 378 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 379 NULL /* hv_create_port */, NULL /* hv_connect_port */, 380 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 381 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 382 NULL, NULL, 383 NULL, NULL, NULL, NULL, 384 NULL, NULL, NULL, NULL, 385 NULL, NULL, NULL, NULL, 386 NULL, NULL, NULL, NULL, 387 }, 388 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX, 389 }, 390 [FEAT_HYPERV_EDX] = { 391 .feat_names = { 392 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 393 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 394 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 395 NULL, NULL, 396 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 397 NULL, NULL, NULL, NULL, 398 NULL, NULL, NULL, NULL, 399 NULL, NULL, NULL, NULL, 400 NULL, NULL, NULL, NULL, 401 NULL, NULL, NULL, NULL, 402 }, 403 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX, 404 }, 405 [FEAT_SVM] = { 406 .feat_names = { 407 "npt", "lbrv", "svm-lock", "nrip-save", 408 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 409 NULL, NULL, "pause-filter", NULL, 410 "pfthreshold", NULL, NULL, NULL, 411 NULL, NULL, NULL, NULL, 412 NULL, NULL, NULL, NULL, 413 NULL, NULL, NULL, NULL, 414 NULL, NULL, NULL, NULL, 415 }, 416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX, 417 .tcg_features = TCG_SVM_FEATURES, 418 }, 419 [FEAT_7_0_EBX] = { 420 .feat_names = { 421 "fsgsbase", "tsc-adjust", NULL, "bmi1", 422 "hle", "avx2", NULL, "smep", 423 "bmi2", "erms", "invpcid", "rtm", 424 NULL, NULL, "mpx", NULL, 425 "avx512f", "avx512dq", "rdseed", "adx", 426 "smap", "avx512ifma", "pcommit", "clflushopt", 427 "clwb", NULL, "avx512pf", "avx512er", 428 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 429 }, 430 .cpuid_eax = 7, 431 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 432 .cpuid_reg = R_EBX, 433 .tcg_features = TCG_7_0_EBX_FEATURES, 434 }, 435 [FEAT_7_0_ECX] = { 436 .feat_names = { 437 NULL, "avx512vbmi", "umip", "pku", 438 "ospke", NULL, NULL, NULL, 439 NULL, NULL, NULL, NULL, 440 NULL, NULL, "avx512-vpopcntdq", NULL, 441 "la57", NULL, NULL, NULL, 442 NULL, NULL, "rdpid", NULL, 443 NULL, NULL, NULL, NULL, 444 NULL, NULL, NULL, NULL, 445 }, 446 .cpuid_eax = 7, 447 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 448 .cpuid_reg = R_ECX, 449 .tcg_features = TCG_7_0_ECX_FEATURES, 450 }, 451 [FEAT_7_0_EDX] = { 452 .feat_names = { 453 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 454 NULL, NULL, NULL, NULL, 455 NULL, NULL, NULL, NULL, 456 NULL, NULL, NULL, NULL, 457 NULL, NULL, NULL, NULL, 458 NULL, NULL, NULL, NULL, 459 NULL, NULL, NULL, NULL, 460 NULL, NULL, NULL, NULL, 461 }, 462 .cpuid_eax = 7, 463 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 464 .cpuid_reg = R_EDX, 465 .tcg_features = TCG_7_0_EDX_FEATURES, 466 }, 467 [FEAT_8000_0007_EDX] = { 468 .feat_names = { 469 NULL, NULL, NULL, NULL, 470 NULL, NULL, NULL, NULL, 471 "invtsc", NULL, NULL, NULL, 472 NULL, NULL, NULL, NULL, 473 NULL, NULL, NULL, NULL, 474 NULL, NULL, NULL, NULL, 475 NULL, NULL, NULL, NULL, 476 NULL, NULL, NULL, NULL, 477 }, 478 .cpuid_eax = 0x80000007, 479 .cpuid_reg = R_EDX, 480 .tcg_features = TCG_APM_FEATURES, 481 .unmigratable_flags = CPUID_APM_INVTSC, 482 }, 483 [FEAT_XSAVE] = { 484 .feat_names = { 485 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 486 NULL, NULL, NULL, NULL, 487 NULL, NULL, NULL, NULL, 488 NULL, NULL, NULL, NULL, 489 NULL, NULL, NULL, NULL, 490 NULL, NULL, NULL, NULL, 491 NULL, NULL, NULL, NULL, 492 NULL, NULL, NULL, NULL, 493 }, 494 .cpuid_eax = 0xd, 495 .cpuid_needs_ecx = true, .cpuid_ecx = 1, 496 .cpuid_reg = R_EAX, 497 .tcg_features = TCG_XSAVE_FEATURES, 498 }, 499 [FEAT_6_EAX] = { 500 .feat_names = { 501 NULL, NULL, "arat", NULL, 502 NULL, NULL, NULL, NULL, 503 NULL, NULL, NULL, NULL, 504 NULL, NULL, NULL, NULL, 505 NULL, NULL, NULL, NULL, 506 NULL, NULL, NULL, NULL, 507 NULL, NULL, NULL, NULL, 508 NULL, NULL, NULL, NULL, 509 }, 510 .cpuid_eax = 6, .cpuid_reg = R_EAX, 511 .tcg_features = TCG_6_EAX_FEATURES, 512 }, 513 [FEAT_XSAVE_COMP_LO] = { 514 .cpuid_eax = 0xD, 515 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 516 .cpuid_reg = R_EAX, 517 .tcg_features = ~0U, 518 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 519 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 520 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 521 XSTATE_PKRU_MASK, 522 }, 523 [FEAT_XSAVE_COMP_HI] = { 524 .cpuid_eax = 0xD, 525 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 526 .cpuid_reg = R_EDX, 527 .tcg_features = ~0U, 528 }, 529 }; 530 531 typedef struct X86RegisterInfo32 { 532 /* Name of register */ 533 const char *name; 534 /* QAPI enum value register */ 535 X86CPURegister32 qapi_enum; 536 } X86RegisterInfo32; 537 538 #define REGISTER(reg) \ 539 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 540 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 541 REGISTER(EAX), 542 REGISTER(ECX), 543 REGISTER(EDX), 544 REGISTER(EBX), 545 REGISTER(ESP), 546 REGISTER(EBP), 547 REGISTER(ESI), 548 REGISTER(EDI), 549 }; 550 #undef REGISTER 551 552 typedef struct ExtSaveArea { 553 uint32_t feature, bits; 554 uint32_t offset, size; 555 } ExtSaveArea; 556 557 static const ExtSaveArea x86_ext_save_areas[] = { 558 [XSTATE_FP_BIT] = { 559 /* x87 FP state component is always enabled if XSAVE is supported */ 560 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 561 /* x87 state is in the legacy region of the XSAVE area */ 562 .offset = 0, 563 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 564 }, 565 [XSTATE_SSE_BIT] = { 566 /* SSE state component is always enabled if XSAVE is supported */ 567 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 568 /* SSE state is in the legacy region of the XSAVE area */ 569 .offset = 0, 570 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 571 }, 572 [XSTATE_YMM_BIT] = 573 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 574 .offset = offsetof(X86XSaveArea, avx_state), 575 .size = sizeof(XSaveAVX) }, 576 [XSTATE_BNDREGS_BIT] = 577 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 578 .offset = offsetof(X86XSaveArea, bndreg_state), 579 .size = sizeof(XSaveBNDREG) }, 580 [XSTATE_BNDCSR_BIT] = 581 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 582 .offset = offsetof(X86XSaveArea, bndcsr_state), 583 .size = sizeof(XSaveBNDCSR) }, 584 [XSTATE_OPMASK_BIT] = 585 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 586 .offset = offsetof(X86XSaveArea, opmask_state), 587 .size = sizeof(XSaveOpmask) }, 588 [XSTATE_ZMM_Hi256_BIT] = 589 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 590 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 591 .size = sizeof(XSaveZMM_Hi256) }, 592 [XSTATE_Hi16_ZMM_BIT] = 593 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 594 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 595 .size = sizeof(XSaveHi16_ZMM) }, 596 [XSTATE_PKRU_BIT] = 597 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 598 .offset = offsetof(X86XSaveArea, pkru_state), 599 .size = sizeof(XSavePKRU) }, 600 }; 601 602 static uint32_t xsave_area_size(uint64_t mask) 603 { 604 int i; 605 uint64_t ret = 0; 606 607 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 608 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 609 if ((mask >> i) & 1) { 610 ret = MAX(ret, esa->offset + esa->size); 611 } 612 } 613 return ret; 614 } 615 616 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 617 { 618 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 619 cpu->env.features[FEAT_XSAVE_COMP_LO]; 620 } 621 622 const char *get_register_name_32(unsigned int reg) 623 { 624 if (reg >= CPU_NB_REGS32) { 625 return NULL; 626 } 627 return x86_reg_info_32[reg].name; 628 } 629 630 /* 631 * Returns the set of feature flags that are supported and migratable by 632 * QEMU, for a given FeatureWord. 633 */ 634 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w) 635 { 636 FeatureWordInfo *wi = &feature_word_info[w]; 637 uint32_t r = 0; 638 int i; 639 640 for (i = 0; i < 32; i++) { 641 uint32_t f = 1U << i; 642 643 /* If the feature name is known, it is implicitly considered migratable, 644 * unless it is explicitly set in unmigratable_flags */ 645 if ((wi->migratable_flags & f) || 646 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 647 r |= f; 648 } 649 } 650 return r; 651 } 652 653 void host_cpuid(uint32_t function, uint32_t count, 654 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 655 { 656 uint32_t vec[4]; 657 658 #ifdef __x86_64__ 659 asm volatile("cpuid" 660 : "=a"(vec[0]), "=b"(vec[1]), 661 "=c"(vec[2]), "=d"(vec[3]) 662 : "0"(function), "c"(count) : "cc"); 663 #elif defined(__i386__) 664 asm volatile("pusha \n\t" 665 "cpuid \n\t" 666 "mov %%eax, 0(%2) \n\t" 667 "mov %%ebx, 4(%2) \n\t" 668 "mov %%ecx, 8(%2) \n\t" 669 "mov %%edx, 12(%2) \n\t" 670 "popa" 671 : : "a"(function), "c"(count), "S"(vec) 672 : "memory", "cc"); 673 #else 674 abort(); 675 #endif 676 677 if (eax) 678 *eax = vec[0]; 679 if (ebx) 680 *ebx = vec[1]; 681 if (ecx) 682 *ecx = vec[2]; 683 if (edx) 684 *edx = vec[3]; 685 } 686 687 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 688 { 689 uint32_t eax, ebx, ecx, edx; 690 691 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 692 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 693 694 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 695 if (family) { 696 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 697 } 698 if (model) { 699 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 700 } 701 if (stepping) { 702 *stepping = eax & 0x0F; 703 } 704 } 705 706 /* CPU class name definitions: */ 707 708 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU 709 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX) 710 711 /* Return type name for a given CPU model name 712 * Caller is responsible for freeing the returned string. 713 */ 714 static char *x86_cpu_type_name(const char *model_name) 715 { 716 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 717 } 718 719 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 720 { 721 ObjectClass *oc; 722 char *typename; 723 724 if (cpu_model == NULL) { 725 return NULL; 726 } 727 728 typename = x86_cpu_type_name(cpu_model); 729 oc = object_class_by_name(typename); 730 g_free(typename); 731 return oc; 732 } 733 734 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 735 { 736 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 737 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 738 return g_strndup(class_name, 739 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 740 } 741 742 struct X86CPUDefinition { 743 const char *name; 744 uint32_t level; 745 uint32_t xlevel; 746 /* vendor is zero-terminated, 12 character ASCII string */ 747 char vendor[CPUID_VENDOR_SZ + 1]; 748 int family; 749 int model; 750 int stepping; 751 FeatureWordArray features; 752 char model_id[48]; 753 }; 754 755 static X86CPUDefinition builtin_x86_defs[] = { 756 { 757 .name = "qemu64", 758 .level = 0xd, 759 .vendor = CPUID_VENDOR_AMD, 760 .family = 6, 761 .model = 6, 762 .stepping = 3, 763 .features[FEAT_1_EDX] = 764 PPRO_FEATURES | 765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 766 CPUID_PSE36, 767 .features[FEAT_1_ECX] = 768 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 769 .features[FEAT_8000_0001_EDX] = 770 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 771 .features[FEAT_8000_0001_ECX] = 772 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 773 .xlevel = 0x8000000A, 774 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 775 }, 776 { 777 .name = "phenom", 778 .level = 5, 779 .vendor = CPUID_VENDOR_AMD, 780 .family = 16, 781 .model = 2, 782 .stepping = 3, 783 /* Missing: CPUID_HT */ 784 .features[FEAT_1_EDX] = 785 PPRO_FEATURES | 786 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 787 CPUID_PSE36 | CPUID_VME, 788 .features[FEAT_1_ECX] = 789 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 790 CPUID_EXT_POPCNT, 791 .features[FEAT_8000_0001_EDX] = 792 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 793 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 794 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 795 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 796 CPUID_EXT3_CR8LEG, 797 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 798 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 799 .features[FEAT_8000_0001_ECX] = 800 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 801 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 802 /* Missing: CPUID_SVM_LBRV */ 803 .features[FEAT_SVM] = 804 CPUID_SVM_NPT, 805 .xlevel = 0x8000001A, 806 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 807 }, 808 { 809 .name = "core2duo", 810 .level = 10, 811 .vendor = CPUID_VENDOR_INTEL, 812 .family = 6, 813 .model = 15, 814 .stepping = 11, 815 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 816 .features[FEAT_1_EDX] = 817 PPRO_FEATURES | 818 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 819 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 820 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 821 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 822 .features[FEAT_1_ECX] = 823 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 824 CPUID_EXT_CX16, 825 .features[FEAT_8000_0001_EDX] = 826 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 827 .features[FEAT_8000_0001_ECX] = 828 CPUID_EXT3_LAHF_LM, 829 .xlevel = 0x80000008, 830 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 831 }, 832 { 833 .name = "kvm64", 834 .level = 0xd, 835 .vendor = CPUID_VENDOR_INTEL, 836 .family = 15, 837 .model = 6, 838 .stepping = 1, 839 /* Missing: CPUID_HT */ 840 .features[FEAT_1_EDX] = 841 PPRO_FEATURES | CPUID_VME | 842 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 843 CPUID_PSE36, 844 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 845 .features[FEAT_1_ECX] = 846 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 847 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 848 .features[FEAT_8000_0001_EDX] = 849 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 850 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 851 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 852 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 853 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 854 .features[FEAT_8000_0001_ECX] = 855 0, 856 .xlevel = 0x80000008, 857 .model_id = "Common KVM processor" 858 }, 859 { 860 .name = "qemu32", 861 .level = 4, 862 .vendor = CPUID_VENDOR_INTEL, 863 .family = 6, 864 .model = 6, 865 .stepping = 3, 866 .features[FEAT_1_EDX] = 867 PPRO_FEATURES, 868 .features[FEAT_1_ECX] = 869 CPUID_EXT_SSE3, 870 .xlevel = 0x80000004, 871 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 872 }, 873 { 874 .name = "kvm32", 875 .level = 5, 876 .vendor = CPUID_VENDOR_INTEL, 877 .family = 15, 878 .model = 6, 879 .stepping = 1, 880 .features[FEAT_1_EDX] = 881 PPRO_FEATURES | CPUID_VME | 882 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 883 .features[FEAT_1_ECX] = 884 CPUID_EXT_SSE3, 885 .features[FEAT_8000_0001_ECX] = 886 0, 887 .xlevel = 0x80000008, 888 .model_id = "Common 32-bit KVM processor" 889 }, 890 { 891 .name = "coreduo", 892 .level = 10, 893 .vendor = CPUID_VENDOR_INTEL, 894 .family = 6, 895 .model = 14, 896 .stepping = 8, 897 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 898 .features[FEAT_1_EDX] = 899 PPRO_FEATURES | CPUID_VME | 900 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 901 CPUID_SS, 902 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 903 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 904 .features[FEAT_1_ECX] = 905 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 906 .features[FEAT_8000_0001_EDX] = 907 CPUID_EXT2_NX, 908 .xlevel = 0x80000008, 909 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 910 }, 911 { 912 .name = "486", 913 .level = 1, 914 .vendor = CPUID_VENDOR_INTEL, 915 .family = 4, 916 .model = 8, 917 .stepping = 0, 918 .features[FEAT_1_EDX] = 919 I486_FEATURES, 920 .xlevel = 0, 921 }, 922 { 923 .name = "pentium", 924 .level = 1, 925 .vendor = CPUID_VENDOR_INTEL, 926 .family = 5, 927 .model = 4, 928 .stepping = 3, 929 .features[FEAT_1_EDX] = 930 PENTIUM_FEATURES, 931 .xlevel = 0, 932 }, 933 { 934 .name = "pentium2", 935 .level = 2, 936 .vendor = CPUID_VENDOR_INTEL, 937 .family = 6, 938 .model = 5, 939 .stepping = 2, 940 .features[FEAT_1_EDX] = 941 PENTIUM2_FEATURES, 942 .xlevel = 0, 943 }, 944 { 945 .name = "pentium3", 946 .level = 3, 947 .vendor = CPUID_VENDOR_INTEL, 948 .family = 6, 949 .model = 7, 950 .stepping = 3, 951 .features[FEAT_1_EDX] = 952 PENTIUM3_FEATURES, 953 .xlevel = 0, 954 }, 955 { 956 .name = "athlon", 957 .level = 2, 958 .vendor = CPUID_VENDOR_AMD, 959 .family = 6, 960 .model = 2, 961 .stepping = 3, 962 .features[FEAT_1_EDX] = 963 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 964 CPUID_MCA, 965 .features[FEAT_8000_0001_EDX] = 966 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 967 .xlevel = 0x80000008, 968 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 969 }, 970 { 971 .name = "n270", 972 .level = 10, 973 .vendor = CPUID_VENDOR_INTEL, 974 .family = 6, 975 .model = 28, 976 .stepping = 2, 977 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 978 .features[FEAT_1_EDX] = 979 PPRO_FEATURES | 980 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 981 CPUID_ACPI | CPUID_SS, 982 /* Some CPUs got no CPUID_SEP */ 983 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 984 * CPUID_EXT_XTPR */ 985 .features[FEAT_1_ECX] = 986 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 987 CPUID_EXT_MOVBE, 988 .features[FEAT_8000_0001_EDX] = 989 CPUID_EXT2_NX, 990 .features[FEAT_8000_0001_ECX] = 991 CPUID_EXT3_LAHF_LM, 992 .xlevel = 0x80000008, 993 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 994 }, 995 { 996 .name = "Conroe", 997 .level = 10, 998 .vendor = CPUID_VENDOR_INTEL, 999 .family = 6, 1000 .model = 15, 1001 .stepping = 3, 1002 .features[FEAT_1_EDX] = 1003 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1004 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1005 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1006 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1007 CPUID_DE | CPUID_FP87, 1008 .features[FEAT_1_ECX] = 1009 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1010 .features[FEAT_8000_0001_EDX] = 1011 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1012 .features[FEAT_8000_0001_ECX] = 1013 CPUID_EXT3_LAHF_LM, 1014 .xlevel = 0x80000008, 1015 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 1016 }, 1017 { 1018 .name = "Penryn", 1019 .level = 10, 1020 .vendor = CPUID_VENDOR_INTEL, 1021 .family = 6, 1022 .model = 23, 1023 .stepping = 3, 1024 .features[FEAT_1_EDX] = 1025 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1026 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1027 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1028 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1029 CPUID_DE | CPUID_FP87, 1030 .features[FEAT_1_ECX] = 1031 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1032 CPUID_EXT_SSE3, 1033 .features[FEAT_8000_0001_EDX] = 1034 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1035 .features[FEAT_8000_0001_ECX] = 1036 CPUID_EXT3_LAHF_LM, 1037 .xlevel = 0x80000008, 1038 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 1039 }, 1040 { 1041 .name = "Nehalem", 1042 .level = 11, 1043 .vendor = CPUID_VENDOR_INTEL, 1044 .family = 6, 1045 .model = 26, 1046 .stepping = 3, 1047 .features[FEAT_1_EDX] = 1048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1052 CPUID_DE | CPUID_FP87, 1053 .features[FEAT_1_ECX] = 1054 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1055 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1056 .features[FEAT_8000_0001_EDX] = 1057 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1058 .features[FEAT_8000_0001_ECX] = 1059 CPUID_EXT3_LAHF_LM, 1060 .xlevel = 0x80000008, 1061 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 1062 }, 1063 { 1064 .name = "Westmere", 1065 .level = 11, 1066 .vendor = CPUID_VENDOR_INTEL, 1067 .family = 6, 1068 .model = 44, 1069 .stepping = 1, 1070 .features[FEAT_1_EDX] = 1071 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1072 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1073 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1074 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1075 CPUID_DE | CPUID_FP87, 1076 .features[FEAT_1_ECX] = 1077 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1078 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1079 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1080 .features[FEAT_8000_0001_EDX] = 1081 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1082 .features[FEAT_8000_0001_ECX] = 1083 CPUID_EXT3_LAHF_LM, 1084 .features[FEAT_6_EAX] = 1085 CPUID_6_EAX_ARAT, 1086 .xlevel = 0x80000008, 1087 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 1088 }, 1089 { 1090 .name = "SandyBridge", 1091 .level = 0xd, 1092 .vendor = CPUID_VENDOR_INTEL, 1093 .family = 6, 1094 .model = 42, 1095 .stepping = 1, 1096 .features[FEAT_1_EDX] = 1097 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1098 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1099 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1100 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1101 CPUID_DE | CPUID_FP87, 1102 .features[FEAT_1_ECX] = 1103 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1104 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1105 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1106 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1107 CPUID_EXT_SSE3, 1108 .features[FEAT_8000_0001_EDX] = 1109 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1110 CPUID_EXT2_SYSCALL, 1111 .features[FEAT_8000_0001_ECX] = 1112 CPUID_EXT3_LAHF_LM, 1113 .features[FEAT_XSAVE] = 1114 CPUID_XSAVE_XSAVEOPT, 1115 .features[FEAT_6_EAX] = 1116 CPUID_6_EAX_ARAT, 1117 .xlevel = 0x80000008, 1118 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 1119 }, 1120 { 1121 .name = "IvyBridge", 1122 .level = 0xd, 1123 .vendor = CPUID_VENDOR_INTEL, 1124 .family = 6, 1125 .model = 58, 1126 .stepping = 9, 1127 .features[FEAT_1_EDX] = 1128 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1129 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1130 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1131 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1132 CPUID_DE | CPUID_FP87, 1133 .features[FEAT_1_ECX] = 1134 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1135 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1136 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1137 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1138 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1139 .features[FEAT_7_0_EBX] = 1140 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1141 CPUID_7_0_EBX_ERMS, 1142 .features[FEAT_8000_0001_EDX] = 1143 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1144 CPUID_EXT2_SYSCALL, 1145 .features[FEAT_8000_0001_ECX] = 1146 CPUID_EXT3_LAHF_LM, 1147 .features[FEAT_XSAVE] = 1148 CPUID_XSAVE_XSAVEOPT, 1149 .features[FEAT_6_EAX] = 1150 CPUID_6_EAX_ARAT, 1151 .xlevel = 0x80000008, 1152 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 1153 }, 1154 { 1155 .name = "Haswell-noTSX", 1156 .level = 0xd, 1157 .vendor = CPUID_VENDOR_INTEL, 1158 .family = 6, 1159 .model = 60, 1160 .stepping = 1, 1161 .features[FEAT_1_EDX] = 1162 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1163 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1164 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1165 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1166 CPUID_DE | CPUID_FP87, 1167 .features[FEAT_1_ECX] = 1168 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1169 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1170 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1171 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1172 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1173 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1174 .features[FEAT_8000_0001_EDX] = 1175 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1176 CPUID_EXT2_SYSCALL, 1177 .features[FEAT_8000_0001_ECX] = 1178 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1179 .features[FEAT_7_0_EBX] = 1180 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1181 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1182 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID, 1183 .features[FEAT_XSAVE] = 1184 CPUID_XSAVE_XSAVEOPT, 1185 .features[FEAT_6_EAX] = 1186 CPUID_6_EAX_ARAT, 1187 .xlevel = 0x80000008, 1188 .model_id = "Intel Core Processor (Haswell, no TSX)", 1189 }, { 1190 .name = "Haswell", 1191 .level = 0xd, 1192 .vendor = CPUID_VENDOR_INTEL, 1193 .family = 6, 1194 .model = 60, 1195 .stepping = 4, 1196 .features[FEAT_1_EDX] = 1197 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1198 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1199 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1200 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1201 CPUID_DE | CPUID_FP87, 1202 .features[FEAT_1_ECX] = 1203 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1204 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1205 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1206 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1207 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1208 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1209 .features[FEAT_8000_0001_EDX] = 1210 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1211 CPUID_EXT2_SYSCALL, 1212 .features[FEAT_8000_0001_ECX] = 1213 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1214 .features[FEAT_7_0_EBX] = 1215 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1216 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1217 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1218 CPUID_7_0_EBX_RTM, 1219 .features[FEAT_XSAVE] = 1220 CPUID_XSAVE_XSAVEOPT, 1221 .features[FEAT_6_EAX] = 1222 CPUID_6_EAX_ARAT, 1223 .xlevel = 0x80000008, 1224 .model_id = "Intel Core Processor (Haswell)", 1225 }, 1226 { 1227 .name = "Broadwell-noTSX", 1228 .level = 0xd, 1229 .vendor = CPUID_VENDOR_INTEL, 1230 .family = 6, 1231 .model = 61, 1232 .stepping = 2, 1233 .features[FEAT_1_EDX] = 1234 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1235 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1236 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1237 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1238 CPUID_DE | CPUID_FP87, 1239 .features[FEAT_1_ECX] = 1240 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1241 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1242 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1243 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1244 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1245 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1246 .features[FEAT_8000_0001_EDX] = 1247 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1248 CPUID_EXT2_SYSCALL, 1249 .features[FEAT_8000_0001_ECX] = 1250 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1251 .features[FEAT_7_0_EBX] = 1252 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1253 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1254 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1255 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1256 CPUID_7_0_EBX_SMAP, 1257 .features[FEAT_XSAVE] = 1258 CPUID_XSAVE_XSAVEOPT, 1259 .features[FEAT_6_EAX] = 1260 CPUID_6_EAX_ARAT, 1261 .xlevel = 0x80000008, 1262 .model_id = "Intel Core Processor (Broadwell, no TSX)", 1263 }, 1264 { 1265 .name = "Broadwell", 1266 .level = 0xd, 1267 .vendor = CPUID_VENDOR_INTEL, 1268 .family = 6, 1269 .model = 61, 1270 .stepping = 2, 1271 .features[FEAT_1_EDX] = 1272 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1273 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1274 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1275 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1276 CPUID_DE | CPUID_FP87, 1277 .features[FEAT_1_ECX] = 1278 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1279 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1280 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1281 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1282 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1283 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1284 .features[FEAT_8000_0001_EDX] = 1285 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1286 CPUID_EXT2_SYSCALL, 1287 .features[FEAT_8000_0001_ECX] = 1288 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1289 .features[FEAT_7_0_EBX] = 1290 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1291 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1292 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1293 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1294 CPUID_7_0_EBX_SMAP, 1295 .features[FEAT_XSAVE] = 1296 CPUID_XSAVE_XSAVEOPT, 1297 .features[FEAT_6_EAX] = 1298 CPUID_6_EAX_ARAT, 1299 .xlevel = 0x80000008, 1300 .model_id = "Intel Core Processor (Broadwell)", 1301 }, 1302 { 1303 .name = "Skylake-Client", 1304 .level = 0xd, 1305 .vendor = CPUID_VENDOR_INTEL, 1306 .family = 6, 1307 .model = 94, 1308 .stepping = 3, 1309 .features[FEAT_1_EDX] = 1310 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1311 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1312 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1313 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1314 CPUID_DE | CPUID_FP87, 1315 .features[FEAT_1_ECX] = 1316 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1317 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1318 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1319 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1320 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1321 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1322 .features[FEAT_8000_0001_EDX] = 1323 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1324 CPUID_EXT2_SYSCALL, 1325 .features[FEAT_8000_0001_ECX] = 1326 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1327 .features[FEAT_7_0_EBX] = 1328 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1329 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1330 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1331 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1332 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX, 1333 /* Missing: XSAVES (not supported by some Linux versions, 1334 * including v4.1 to v4.12). 1335 * KVM doesn't yet expose any XSAVES state save component, 1336 * and the only one defined in Skylake (processor tracing) 1337 * probably will block migration anyway. 1338 */ 1339 .features[FEAT_XSAVE] = 1340 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 1341 CPUID_XSAVE_XGETBV1, 1342 .features[FEAT_6_EAX] = 1343 CPUID_6_EAX_ARAT, 1344 .xlevel = 0x80000008, 1345 .model_id = "Intel Core Processor (Skylake)", 1346 }, 1347 { 1348 .name = "Skylake-Server", 1349 .level = 0xd, 1350 .vendor = CPUID_VENDOR_INTEL, 1351 .family = 6, 1352 .model = 85, 1353 .stepping = 4, 1354 .features[FEAT_1_EDX] = 1355 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1356 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1357 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1358 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1359 CPUID_DE | CPUID_FP87, 1360 .features[FEAT_1_ECX] = 1361 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1362 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1363 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1364 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1365 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1366 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1367 .features[FEAT_8000_0001_EDX] = 1368 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 1369 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1370 .features[FEAT_8000_0001_ECX] = 1371 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1372 .features[FEAT_7_0_EBX] = 1373 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1374 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1375 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1376 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1377 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB | 1378 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 1379 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 1380 CPUID_7_0_EBX_AVX512VL, 1381 /* Missing: XSAVES (not supported by some Linux versions, 1382 * including v4.1 to v4.12). 1383 * KVM doesn't yet expose any XSAVES state save component, 1384 * and the only one defined in Skylake (processor tracing) 1385 * probably will block migration anyway. 1386 */ 1387 .features[FEAT_XSAVE] = 1388 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 1389 CPUID_XSAVE_XGETBV1, 1390 .features[FEAT_6_EAX] = 1391 CPUID_6_EAX_ARAT, 1392 .xlevel = 0x80000008, 1393 .model_id = "Intel Xeon Processor (Skylake)", 1394 }, 1395 { 1396 .name = "Opteron_G1", 1397 .level = 5, 1398 .vendor = CPUID_VENDOR_AMD, 1399 .family = 15, 1400 .model = 6, 1401 .stepping = 1, 1402 .features[FEAT_1_EDX] = 1403 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1404 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1405 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1406 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1407 CPUID_DE | CPUID_FP87, 1408 .features[FEAT_1_ECX] = 1409 CPUID_EXT_SSE3, 1410 .features[FEAT_8000_0001_EDX] = 1411 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1412 .xlevel = 0x80000008, 1413 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 1414 }, 1415 { 1416 .name = "Opteron_G2", 1417 .level = 5, 1418 .vendor = CPUID_VENDOR_AMD, 1419 .family = 15, 1420 .model = 6, 1421 .stepping = 1, 1422 .features[FEAT_1_EDX] = 1423 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1424 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1425 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1426 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1427 CPUID_DE | CPUID_FP87, 1428 .features[FEAT_1_ECX] = 1429 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 1430 /* Missing: CPUID_EXT2_RDTSCP */ 1431 .features[FEAT_8000_0001_EDX] = 1432 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1433 .features[FEAT_8000_0001_ECX] = 1434 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 1435 .xlevel = 0x80000008, 1436 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 1437 }, 1438 { 1439 .name = "Opteron_G3", 1440 .level = 5, 1441 .vendor = CPUID_VENDOR_AMD, 1442 .family = 16, 1443 .model = 2, 1444 .stepping = 3, 1445 .features[FEAT_1_EDX] = 1446 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1447 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1448 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1449 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1450 CPUID_DE | CPUID_FP87, 1451 .features[FEAT_1_ECX] = 1452 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 1453 CPUID_EXT_SSE3, 1454 /* Missing: CPUID_EXT2_RDTSCP */ 1455 .features[FEAT_8000_0001_EDX] = 1456 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1457 .features[FEAT_8000_0001_ECX] = 1458 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 1459 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 1460 .xlevel = 0x80000008, 1461 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 1462 }, 1463 { 1464 .name = "Opteron_G4", 1465 .level = 0xd, 1466 .vendor = CPUID_VENDOR_AMD, 1467 .family = 21, 1468 .model = 1, 1469 .stepping = 2, 1470 .features[FEAT_1_EDX] = 1471 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1472 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1473 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1474 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1475 CPUID_DE | CPUID_FP87, 1476 .features[FEAT_1_ECX] = 1477 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1478 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1479 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1480 CPUID_EXT_SSE3, 1481 /* Missing: CPUID_EXT2_RDTSCP */ 1482 .features[FEAT_8000_0001_EDX] = 1483 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 1484 CPUID_EXT2_SYSCALL, 1485 .features[FEAT_8000_0001_ECX] = 1486 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 1487 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 1488 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 1489 CPUID_EXT3_LAHF_LM, 1490 /* no xsaveopt! */ 1491 .xlevel = 0x8000001A, 1492 .model_id = "AMD Opteron 62xx class CPU", 1493 }, 1494 { 1495 .name = "Opteron_G5", 1496 .level = 0xd, 1497 .vendor = CPUID_VENDOR_AMD, 1498 .family = 21, 1499 .model = 2, 1500 .stepping = 0, 1501 .features[FEAT_1_EDX] = 1502 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1503 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1504 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1505 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1506 CPUID_DE | CPUID_FP87, 1507 .features[FEAT_1_ECX] = 1508 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 1509 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1510 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 1511 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1512 /* Missing: CPUID_EXT2_RDTSCP */ 1513 .features[FEAT_8000_0001_EDX] = 1514 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 1515 CPUID_EXT2_SYSCALL, 1516 .features[FEAT_8000_0001_ECX] = 1517 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 1518 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 1519 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 1520 CPUID_EXT3_LAHF_LM, 1521 /* no xsaveopt! */ 1522 .xlevel = 0x8000001A, 1523 .model_id = "AMD Opteron 63xx class CPU", 1524 }, 1525 { 1526 .name = "EPYC", 1527 .level = 0xd, 1528 .vendor = CPUID_VENDOR_AMD, 1529 .family = 23, 1530 .model = 1, 1531 .stepping = 2, 1532 .features[FEAT_1_EDX] = 1533 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 1534 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 1535 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 1536 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 1537 CPUID_VME | CPUID_FP87, 1538 .features[FEAT_1_ECX] = 1539 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 1540 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 1541 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1542 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 1543 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1544 .features[FEAT_8000_0001_EDX] = 1545 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 1546 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 1547 CPUID_EXT2_SYSCALL, 1548 .features[FEAT_8000_0001_ECX] = 1549 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 1550 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 1551 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 1552 .features[FEAT_7_0_EBX] = 1553 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 1554 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 1555 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 1556 CPUID_7_0_EBX_SHA_NI, 1557 /* Missing: XSAVES (not supported by some Linux versions, 1558 * including v4.1 to v4.12). 1559 * KVM doesn't yet expose any XSAVES state save component. 1560 */ 1561 .features[FEAT_XSAVE] = 1562 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 1563 CPUID_XSAVE_XGETBV1, 1564 .features[FEAT_6_EAX] = 1565 CPUID_6_EAX_ARAT, 1566 .xlevel = 0x8000000A, 1567 .model_id = "AMD EPYC Processor", 1568 }, 1569 }; 1570 1571 typedef struct PropValue { 1572 const char *prop, *value; 1573 } PropValue; 1574 1575 /* KVM-specific features that are automatically added/removed 1576 * from all CPU models when KVM is enabled. 1577 */ 1578 static PropValue kvm_default_props[] = { 1579 { "kvmclock", "on" }, 1580 { "kvm-nopiodelay", "on" }, 1581 { "kvm-asyncpf", "on" }, 1582 { "kvm-steal-time", "on" }, 1583 { "kvm-pv-eoi", "on" }, 1584 { "kvmclock-stable-bit", "on" }, 1585 { "x2apic", "on" }, 1586 { "acpi", "off" }, 1587 { "monitor", "off" }, 1588 { "svm", "off" }, 1589 { NULL, NULL }, 1590 }; 1591 1592 /* TCG-specific defaults that override all CPU models when using TCG 1593 */ 1594 static PropValue tcg_default_props[] = { 1595 { "vme", "off" }, 1596 { NULL, NULL }, 1597 }; 1598 1599 1600 void x86_cpu_change_kvm_default(const char *prop, const char *value) 1601 { 1602 PropValue *pv; 1603 for (pv = kvm_default_props; pv->prop; pv++) { 1604 if (!strcmp(pv->prop, prop)) { 1605 pv->value = value; 1606 break; 1607 } 1608 } 1609 1610 /* It is valid to call this function only for properties that 1611 * are already present in the kvm_default_props table. 1612 */ 1613 assert(pv->prop); 1614 } 1615 1616 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 1617 bool migratable_only); 1618 1619 static bool lmce_supported(void) 1620 { 1621 uint64_t mce_cap = 0; 1622 1623 #ifdef CONFIG_KVM 1624 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 1625 return false; 1626 } 1627 #endif 1628 1629 return !!(mce_cap & MCG_LMCE_P); 1630 } 1631 1632 #define CPUID_MODEL_ID_SZ 48 1633 1634 /** 1635 * cpu_x86_fill_model_id: 1636 * Get CPUID model ID string from host CPU. 1637 * 1638 * @str should have at least CPUID_MODEL_ID_SZ bytes 1639 * 1640 * The function does NOT add a null terminator to the string 1641 * automatically. 1642 */ 1643 static int cpu_x86_fill_model_id(char *str) 1644 { 1645 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 1646 int i; 1647 1648 for (i = 0; i < 3; i++) { 1649 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 1650 memcpy(str + i * 16 + 0, &eax, 4); 1651 memcpy(str + i * 16 + 4, &ebx, 4); 1652 memcpy(str + i * 16 + 8, &ecx, 4); 1653 memcpy(str + i * 16 + 12, &edx, 4); 1654 } 1655 return 0; 1656 } 1657 1658 static Property max_x86_cpu_properties[] = { 1659 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 1660 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 1661 DEFINE_PROP_END_OF_LIST() 1662 }; 1663 1664 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 1665 { 1666 DeviceClass *dc = DEVICE_CLASS(oc); 1667 X86CPUClass *xcc = X86_CPU_CLASS(oc); 1668 1669 xcc->ordering = 9; 1670 1671 xcc->model_description = 1672 "Enables all features supported by the accelerator in the current host"; 1673 1674 dc->props = max_x86_cpu_properties; 1675 } 1676 1677 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp); 1678 1679 static void max_x86_cpu_initfn(Object *obj) 1680 { 1681 X86CPU *cpu = X86_CPU(obj); 1682 CPUX86State *env = &cpu->env; 1683 KVMState *s = kvm_state; 1684 1685 /* We can't fill the features array here because we don't know yet if 1686 * "migratable" is true or false. 1687 */ 1688 cpu->max_features = true; 1689 1690 if (kvm_enabled()) { 1691 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 1692 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 1693 int family, model, stepping; 1694 1695 host_vendor_fms(vendor, &family, &model, &stepping); 1696 1697 cpu_x86_fill_model_id(model_id); 1698 1699 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort); 1700 object_property_set_int(OBJECT(cpu), family, "family", &error_abort); 1701 object_property_set_int(OBJECT(cpu), model, "model", &error_abort); 1702 object_property_set_int(OBJECT(cpu), stepping, "stepping", 1703 &error_abort); 1704 object_property_set_str(OBJECT(cpu), model_id, "model-id", 1705 &error_abort); 1706 1707 env->cpuid_min_level = 1708 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 1709 env->cpuid_min_xlevel = 1710 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 1711 env->cpuid_min_xlevel2 = 1712 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 1713 1714 if (lmce_supported()) { 1715 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 1716 } 1717 } else { 1718 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 1719 "vendor", &error_abort); 1720 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 1721 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 1722 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 1723 object_property_set_str(OBJECT(cpu), 1724 "QEMU TCG CPU version " QEMU_HW_VERSION, 1725 "model-id", &error_abort); 1726 } 1727 1728 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 1729 } 1730 1731 static const TypeInfo max_x86_cpu_type_info = { 1732 .name = X86_CPU_TYPE_NAME("max"), 1733 .parent = TYPE_X86_CPU, 1734 .instance_init = max_x86_cpu_initfn, 1735 .class_init = max_x86_cpu_class_init, 1736 }; 1737 1738 #ifdef CONFIG_KVM 1739 1740 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 1741 { 1742 X86CPUClass *xcc = X86_CPU_CLASS(oc); 1743 1744 xcc->kvm_required = true; 1745 xcc->ordering = 8; 1746 1747 xcc->model_description = 1748 "KVM processor with all supported host features " 1749 "(only available in KVM mode)"; 1750 } 1751 1752 static const TypeInfo host_x86_cpu_type_info = { 1753 .name = X86_CPU_TYPE_NAME("host"), 1754 .parent = X86_CPU_TYPE_NAME("max"), 1755 .class_init = host_x86_cpu_class_init, 1756 }; 1757 1758 #endif 1759 1760 static void report_unavailable_features(FeatureWord w, uint32_t mask) 1761 { 1762 FeatureWordInfo *f = &feature_word_info[w]; 1763 int i; 1764 1765 for (i = 0; i < 32; ++i) { 1766 if ((1UL << i) & mask) { 1767 const char *reg = get_register_name_32(f->cpuid_reg); 1768 assert(reg); 1769 fprintf(stderr, "warning: %s doesn't support requested feature: " 1770 "CPUID.%02XH:%s%s%s [bit %d]\n", 1771 kvm_enabled() ? "host" : "TCG", 1772 f->cpuid_eax, reg, 1773 f->feat_names[i] ? "." : "", 1774 f->feat_names[i] ? f->feat_names[i] : "", i); 1775 } 1776 } 1777 } 1778 1779 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 1780 const char *name, void *opaque, 1781 Error **errp) 1782 { 1783 X86CPU *cpu = X86_CPU(obj); 1784 CPUX86State *env = &cpu->env; 1785 int64_t value; 1786 1787 value = (env->cpuid_version >> 8) & 0xf; 1788 if (value == 0xf) { 1789 value += (env->cpuid_version >> 20) & 0xff; 1790 } 1791 visit_type_int(v, name, &value, errp); 1792 } 1793 1794 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 1795 const char *name, void *opaque, 1796 Error **errp) 1797 { 1798 X86CPU *cpu = X86_CPU(obj); 1799 CPUX86State *env = &cpu->env; 1800 const int64_t min = 0; 1801 const int64_t max = 0xff + 0xf; 1802 Error *local_err = NULL; 1803 int64_t value; 1804 1805 visit_type_int(v, name, &value, &local_err); 1806 if (local_err) { 1807 error_propagate(errp, local_err); 1808 return; 1809 } 1810 if (value < min || value > max) { 1811 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 1812 name ? name : "null", value, min, max); 1813 return; 1814 } 1815 1816 env->cpuid_version &= ~0xff00f00; 1817 if (value > 0x0f) { 1818 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 1819 } else { 1820 env->cpuid_version |= value << 8; 1821 } 1822 } 1823 1824 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 1825 const char *name, void *opaque, 1826 Error **errp) 1827 { 1828 X86CPU *cpu = X86_CPU(obj); 1829 CPUX86State *env = &cpu->env; 1830 int64_t value; 1831 1832 value = (env->cpuid_version >> 4) & 0xf; 1833 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 1834 visit_type_int(v, name, &value, errp); 1835 } 1836 1837 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 1838 const char *name, void *opaque, 1839 Error **errp) 1840 { 1841 X86CPU *cpu = X86_CPU(obj); 1842 CPUX86State *env = &cpu->env; 1843 const int64_t min = 0; 1844 const int64_t max = 0xff; 1845 Error *local_err = NULL; 1846 int64_t value; 1847 1848 visit_type_int(v, name, &value, &local_err); 1849 if (local_err) { 1850 error_propagate(errp, local_err); 1851 return; 1852 } 1853 if (value < min || value > max) { 1854 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 1855 name ? name : "null", value, min, max); 1856 return; 1857 } 1858 1859 env->cpuid_version &= ~0xf00f0; 1860 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 1861 } 1862 1863 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 1864 const char *name, void *opaque, 1865 Error **errp) 1866 { 1867 X86CPU *cpu = X86_CPU(obj); 1868 CPUX86State *env = &cpu->env; 1869 int64_t value; 1870 1871 value = env->cpuid_version & 0xf; 1872 visit_type_int(v, name, &value, errp); 1873 } 1874 1875 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 1876 const char *name, void *opaque, 1877 Error **errp) 1878 { 1879 X86CPU *cpu = X86_CPU(obj); 1880 CPUX86State *env = &cpu->env; 1881 const int64_t min = 0; 1882 const int64_t max = 0xf; 1883 Error *local_err = NULL; 1884 int64_t value; 1885 1886 visit_type_int(v, name, &value, &local_err); 1887 if (local_err) { 1888 error_propagate(errp, local_err); 1889 return; 1890 } 1891 if (value < min || value > max) { 1892 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 1893 name ? name : "null", value, min, max); 1894 return; 1895 } 1896 1897 env->cpuid_version &= ~0xf; 1898 env->cpuid_version |= value & 0xf; 1899 } 1900 1901 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 1902 { 1903 X86CPU *cpu = X86_CPU(obj); 1904 CPUX86State *env = &cpu->env; 1905 char *value; 1906 1907 value = g_malloc(CPUID_VENDOR_SZ + 1); 1908 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 1909 env->cpuid_vendor3); 1910 return value; 1911 } 1912 1913 static void x86_cpuid_set_vendor(Object *obj, const char *value, 1914 Error **errp) 1915 { 1916 X86CPU *cpu = X86_CPU(obj); 1917 CPUX86State *env = &cpu->env; 1918 int i; 1919 1920 if (strlen(value) != CPUID_VENDOR_SZ) { 1921 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 1922 return; 1923 } 1924 1925 env->cpuid_vendor1 = 0; 1926 env->cpuid_vendor2 = 0; 1927 env->cpuid_vendor3 = 0; 1928 for (i = 0; i < 4; i++) { 1929 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 1930 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 1931 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 1932 } 1933 } 1934 1935 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 1936 { 1937 X86CPU *cpu = X86_CPU(obj); 1938 CPUX86State *env = &cpu->env; 1939 char *value; 1940 int i; 1941 1942 value = g_malloc(48 + 1); 1943 for (i = 0; i < 48; i++) { 1944 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 1945 } 1946 value[48] = '\0'; 1947 return value; 1948 } 1949 1950 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 1951 Error **errp) 1952 { 1953 X86CPU *cpu = X86_CPU(obj); 1954 CPUX86State *env = &cpu->env; 1955 int c, len, i; 1956 1957 if (model_id == NULL) { 1958 model_id = ""; 1959 } 1960 len = strlen(model_id); 1961 memset(env->cpuid_model, 0, 48); 1962 for (i = 0; i < 48; i++) { 1963 if (i >= len) { 1964 c = '\0'; 1965 } else { 1966 c = (uint8_t)model_id[i]; 1967 } 1968 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 1969 } 1970 } 1971 1972 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 1973 void *opaque, Error **errp) 1974 { 1975 X86CPU *cpu = X86_CPU(obj); 1976 int64_t value; 1977 1978 value = cpu->env.tsc_khz * 1000; 1979 visit_type_int(v, name, &value, errp); 1980 } 1981 1982 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 1983 void *opaque, Error **errp) 1984 { 1985 X86CPU *cpu = X86_CPU(obj); 1986 const int64_t min = 0; 1987 const int64_t max = INT64_MAX; 1988 Error *local_err = NULL; 1989 int64_t value; 1990 1991 visit_type_int(v, name, &value, &local_err); 1992 if (local_err) { 1993 error_propagate(errp, local_err); 1994 return; 1995 } 1996 if (value < min || value > max) { 1997 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 1998 name ? name : "null", value, min, max); 1999 return; 2000 } 2001 2002 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 2003 } 2004 2005 /* Generic getter for "feature-words" and "filtered-features" properties */ 2006 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 2007 const char *name, void *opaque, 2008 Error **errp) 2009 { 2010 uint32_t *array = (uint32_t *)opaque; 2011 FeatureWord w; 2012 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 2013 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 2014 X86CPUFeatureWordInfoList *list = NULL; 2015 2016 for (w = 0; w < FEATURE_WORDS; w++) { 2017 FeatureWordInfo *wi = &feature_word_info[w]; 2018 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 2019 qwi->cpuid_input_eax = wi->cpuid_eax; 2020 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx; 2021 qwi->cpuid_input_ecx = wi->cpuid_ecx; 2022 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum; 2023 qwi->features = array[w]; 2024 2025 /* List will be in reverse order, but order shouldn't matter */ 2026 list_entries[w].next = list; 2027 list_entries[w].value = &word_infos[w]; 2028 list = &list_entries[w]; 2029 } 2030 2031 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 2032 } 2033 2034 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name, 2035 void *opaque, Error **errp) 2036 { 2037 X86CPU *cpu = X86_CPU(obj); 2038 int64_t value = cpu->hyperv_spinlock_attempts; 2039 2040 visit_type_int(v, name, &value, errp); 2041 } 2042 2043 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name, 2044 void *opaque, Error **errp) 2045 { 2046 const int64_t min = 0xFFF; 2047 const int64_t max = UINT_MAX; 2048 X86CPU *cpu = X86_CPU(obj); 2049 Error *err = NULL; 2050 int64_t value; 2051 2052 visit_type_int(v, name, &value, &err); 2053 if (err) { 2054 error_propagate(errp, err); 2055 return; 2056 } 2057 2058 if (value < min || value > max) { 2059 error_setg(errp, "Property %s.%s doesn't take value %" PRId64 2060 " (minimum: %" PRId64 ", maximum: %" PRId64 ")", 2061 object_get_typename(obj), name ? name : "null", 2062 value, min, max); 2063 return; 2064 } 2065 cpu->hyperv_spinlock_attempts = value; 2066 } 2067 2068 static const PropertyInfo qdev_prop_spinlocks = { 2069 .name = "int", 2070 .get = x86_get_hv_spinlocks, 2071 .set = x86_set_hv_spinlocks, 2072 }; 2073 2074 /* Convert all '_' in a feature string option name to '-', to make feature 2075 * name conform to QOM property naming rule, which uses '-' instead of '_'. 2076 */ 2077 static inline void feat2prop(char *s) 2078 { 2079 while ((s = strchr(s, '_'))) { 2080 *s = '-'; 2081 } 2082 } 2083 2084 /* Return the feature property name for a feature flag bit */ 2085 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 2086 { 2087 /* XSAVE components are automatically enabled by other features, 2088 * so return the original feature name instead 2089 */ 2090 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 2091 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 2092 2093 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 2094 x86_ext_save_areas[comp].bits) { 2095 w = x86_ext_save_areas[comp].feature; 2096 bitnr = ctz32(x86_ext_save_areas[comp].bits); 2097 } 2098 } 2099 2100 assert(bitnr < 32); 2101 assert(w < FEATURE_WORDS); 2102 return feature_word_info[w].feat_names[bitnr]; 2103 } 2104 2105 /* Compatibily hack to maintain legacy +-feat semantic, 2106 * where +-feat overwrites any feature set by 2107 * feat=on|feat even if the later is parsed after +-feat 2108 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 2109 */ 2110 static GList *plus_features, *minus_features; 2111 2112 static gint compare_string(gconstpointer a, gconstpointer b) 2113 { 2114 return g_strcmp0(a, b); 2115 } 2116 2117 /* Parse "+feature,-feature,feature=foo" CPU feature string 2118 */ 2119 static void x86_cpu_parse_featurestr(const char *typename, char *features, 2120 Error **errp) 2121 { 2122 char *featurestr; /* Single 'key=value" string being parsed */ 2123 static bool cpu_globals_initialized; 2124 bool ambiguous = false; 2125 2126 if (cpu_globals_initialized) { 2127 return; 2128 } 2129 cpu_globals_initialized = true; 2130 2131 if (!features) { 2132 return; 2133 } 2134 2135 for (featurestr = strtok(features, ","); 2136 featurestr; 2137 featurestr = strtok(NULL, ",")) { 2138 const char *name; 2139 const char *val = NULL; 2140 char *eq = NULL; 2141 char num[32]; 2142 GlobalProperty *prop; 2143 2144 /* Compatibility syntax: */ 2145 if (featurestr[0] == '+') { 2146 plus_features = g_list_append(plus_features, 2147 g_strdup(featurestr + 1)); 2148 continue; 2149 } else if (featurestr[0] == '-') { 2150 minus_features = g_list_append(minus_features, 2151 g_strdup(featurestr + 1)); 2152 continue; 2153 } 2154 2155 eq = strchr(featurestr, '='); 2156 if (eq) { 2157 *eq++ = 0; 2158 val = eq; 2159 } else { 2160 val = "on"; 2161 } 2162 2163 feat2prop(featurestr); 2164 name = featurestr; 2165 2166 if (g_list_find_custom(plus_features, name, compare_string)) { 2167 warn_report("Ambiguous CPU model string. " 2168 "Don't mix both \"+%s\" and \"%s=%s\"", 2169 name, name, val); 2170 ambiguous = true; 2171 } 2172 if (g_list_find_custom(minus_features, name, compare_string)) { 2173 warn_report("Ambiguous CPU model string. " 2174 "Don't mix both \"-%s\" and \"%s=%s\"", 2175 name, name, val); 2176 ambiguous = true; 2177 } 2178 2179 /* Special case: */ 2180 if (!strcmp(name, "tsc-freq")) { 2181 int ret; 2182 uint64_t tsc_freq; 2183 2184 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 2185 if (ret < 0 || tsc_freq > INT64_MAX) { 2186 error_setg(errp, "bad numerical value %s", val); 2187 return; 2188 } 2189 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 2190 val = num; 2191 name = "tsc-frequency"; 2192 } 2193 2194 prop = g_new0(typeof(*prop), 1); 2195 prop->driver = typename; 2196 prop->property = g_strdup(name); 2197 prop->value = g_strdup(val); 2198 prop->errp = &error_fatal; 2199 qdev_prop_register_global(prop); 2200 } 2201 2202 if (ambiguous) { 2203 warn_report("Compatibility of ambiguous CPU model " 2204 "strings won't be kept on future QEMU versions"); 2205 } 2206 } 2207 2208 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 2209 static int x86_cpu_filter_features(X86CPU *cpu); 2210 2211 /* Check for missing features that may prevent the CPU class from 2212 * running using the current machine and accelerator. 2213 */ 2214 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 2215 strList **missing_feats) 2216 { 2217 X86CPU *xc; 2218 FeatureWord w; 2219 Error *err = NULL; 2220 strList **next = missing_feats; 2221 2222 if (xcc->kvm_required && !kvm_enabled()) { 2223 strList *new = g_new0(strList, 1); 2224 new->value = g_strdup("kvm");; 2225 *missing_feats = new; 2226 return; 2227 } 2228 2229 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 2230 2231 x86_cpu_expand_features(xc, &err); 2232 if (err) { 2233 /* Errors at x86_cpu_expand_features should never happen, 2234 * but in case it does, just report the model as not 2235 * runnable at all using the "type" property. 2236 */ 2237 strList *new = g_new0(strList, 1); 2238 new->value = g_strdup("type"); 2239 *next = new; 2240 next = &new->next; 2241 } 2242 2243 x86_cpu_filter_features(xc); 2244 2245 for (w = 0; w < FEATURE_WORDS; w++) { 2246 uint32_t filtered = xc->filtered_features[w]; 2247 int i; 2248 for (i = 0; i < 32; i++) { 2249 if (filtered & (1UL << i)) { 2250 strList *new = g_new0(strList, 1); 2251 new->value = g_strdup(x86_cpu_feature_name(w, i)); 2252 *next = new; 2253 next = &new->next; 2254 } 2255 } 2256 } 2257 2258 object_unref(OBJECT(xc)); 2259 } 2260 2261 /* Print all cpuid feature names in featureset 2262 */ 2263 static void listflags(FILE *f, fprintf_function print, const char **featureset) 2264 { 2265 int bit; 2266 bool first = true; 2267 2268 for (bit = 0; bit < 32; bit++) { 2269 if (featureset[bit]) { 2270 print(f, "%s%s", first ? "" : " ", featureset[bit]); 2271 first = false; 2272 } 2273 } 2274 } 2275 2276 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 2277 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 2278 { 2279 ObjectClass *class_a = (ObjectClass *)a; 2280 ObjectClass *class_b = (ObjectClass *)b; 2281 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 2282 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 2283 const char *name_a, *name_b; 2284 2285 if (cc_a->ordering != cc_b->ordering) { 2286 return cc_a->ordering - cc_b->ordering; 2287 } else { 2288 name_a = object_class_get_name(class_a); 2289 name_b = object_class_get_name(class_b); 2290 return strcmp(name_a, name_b); 2291 } 2292 } 2293 2294 static GSList *get_sorted_cpu_model_list(void) 2295 { 2296 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 2297 list = g_slist_sort(list, x86_cpu_list_compare); 2298 return list; 2299 } 2300 2301 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 2302 { 2303 ObjectClass *oc = data; 2304 X86CPUClass *cc = X86_CPU_CLASS(oc); 2305 CPUListState *s = user_data; 2306 char *name = x86_cpu_class_get_model_name(cc); 2307 const char *desc = cc->model_description; 2308 if (!desc && cc->cpu_def) { 2309 desc = cc->cpu_def->model_id; 2310 } 2311 2312 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n", 2313 name, desc); 2314 g_free(name); 2315 } 2316 2317 /* list available CPU models and flags */ 2318 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf) 2319 { 2320 int i; 2321 CPUListState s = { 2322 .file = f, 2323 .cpu_fprintf = cpu_fprintf, 2324 }; 2325 GSList *list; 2326 2327 (*cpu_fprintf)(f, "Available CPUs:\n"); 2328 list = get_sorted_cpu_model_list(); 2329 g_slist_foreach(list, x86_cpu_list_entry, &s); 2330 g_slist_free(list); 2331 2332 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n"); 2333 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 2334 FeatureWordInfo *fw = &feature_word_info[i]; 2335 2336 (*cpu_fprintf)(f, " "); 2337 listflags(f, cpu_fprintf, fw->feat_names); 2338 (*cpu_fprintf)(f, "\n"); 2339 } 2340 } 2341 2342 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 2343 { 2344 ObjectClass *oc = data; 2345 X86CPUClass *cc = X86_CPU_CLASS(oc); 2346 CpuDefinitionInfoList **cpu_list = user_data; 2347 CpuDefinitionInfoList *entry; 2348 CpuDefinitionInfo *info; 2349 2350 info = g_malloc0(sizeof(*info)); 2351 info->name = x86_cpu_class_get_model_name(cc); 2352 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 2353 info->has_unavailable_features = true; 2354 info->q_typename = g_strdup(object_class_get_name(oc)); 2355 info->migration_safe = cc->migration_safe; 2356 info->has_migration_safe = true; 2357 info->q_static = cc->static_model; 2358 2359 entry = g_malloc0(sizeof(*entry)); 2360 entry->value = info; 2361 entry->next = *cpu_list; 2362 *cpu_list = entry; 2363 } 2364 2365 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp) 2366 { 2367 CpuDefinitionInfoList *cpu_list = NULL; 2368 GSList *list = get_sorted_cpu_model_list(); 2369 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 2370 g_slist_free(list); 2371 return cpu_list; 2372 } 2373 2374 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 2375 bool migratable_only) 2376 { 2377 FeatureWordInfo *wi = &feature_word_info[w]; 2378 uint32_t r; 2379 2380 if (kvm_enabled()) { 2381 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax, 2382 wi->cpuid_ecx, 2383 wi->cpuid_reg); 2384 } else if (tcg_enabled()) { 2385 r = wi->tcg_features; 2386 } else { 2387 return ~0; 2388 } 2389 if (migratable_only) { 2390 r &= x86_cpu_get_migratable_flags(w); 2391 } 2392 return r; 2393 } 2394 2395 static void x86_cpu_report_filtered_features(X86CPU *cpu) 2396 { 2397 FeatureWord w; 2398 2399 for (w = 0; w < FEATURE_WORDS; w++) { 2400 report_unavailable_features(w, cpu->filtered_features[w]); 2401 } 2402 } 2403 2404 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 2405 { 2406 PropValue *pv; 2407 for (pv = props; pv->prop; pv++) { 2408 if (!pv->value) { 2409 continue; 2410 } 2411 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 2412 &error_abort); 2413 } 2414 } 2415 2416 /* Load data from X86CPUDefinition into a X86CPU object 2417 */ 2418 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp) 2419 { 2420 CPUX86State *env = &cpu->env; 2421 const char *vendor; 2422 char host_vendor[CPUID_VENDOR_SZ + 1]; 2423 FeatureWord w; 2424 2425 /*NOTE: any property set by this function should be returned by 2426 * x86_cpu_static_props(), so static expansion of 2427 * query-cpu-model-expansion is always complete. 2428 */ 2429 2430 /* CPU models only set _minimum_ values for level/xlevel: */ 2431 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp); 2432 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp); 2433 2434 object_property_set_int(OBJECT(cpu), def->family, "family", errp); 2435 object_property_set_int(OBJECT(cpu), def->model, "model", errp); 2436 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp); 2437 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); 2438 for (w = 0; w < FEATURE_WORDS; w++) { 2439 env->features[w] = def->features[w]; 2440 } 2441 2442 /* Special cases not set in the X86CPUDefinition structs: */ 2443 if (kvm_enabled()) { 2444 if (!kvm_irqchip_in_kernel()) { 2445 x86_cpu_change_kvm_default("x2apic", "off"); 2446 } 2447 2448 x86_cpu_apply_props(cpu, kvm_default_props); 2449 } else if (tcg_enabled()) { 2450 x86_cpu_apply_props(cpu, tcg_default_props); 2451 } 2452 2453 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 2454 2455 /* sysenter isn't supported in compatibility mode on AMD, 2456 * syscall isn't supported in compatibility mode on Intel. 2457 * Normally we advertise the actual CPU vendor, but you can 2458 * override this using the 'vendor' property if you want to use 2459 * KVM's sysenter/syscall emulation in compatibility mode and 2460 * when doing cross vendor migration 2461 */ 2462 vendor = def->vendor; 2463 if (kvm_enabled()) { 2464 uint32_t ebx = 0, ecx = 0, edx = 0; 2465 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 2466 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 2467 vendor = host_vendor; 2468 } 2469 2470 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp); 2471 2472 } 2473 2474 /* Return a QDict containing keys for all properties that can be included 2475 * in static expansion of CPU models. All properties set by x86_cpu_load_def() 2476 * must be included in the dictionary. 2477 */ 2478 static QDict *x86_cpu_static_props(void) 2479 { 2480 FeatureWord w; 2481 int i; 2482 static const char *props[] = { 2483 "min-level", 2484 "min-xlevel", 2485 "family", 2486 "model", 2487 "stepping", 2488 "model-id", 2489 "vendor", 2490 "lmce", 2491 NULL, 2492 }; 2493 static QDict *d; 2494 2495 if (d) { 2496 return d; 2497 } 2498 2499 d = qdict_new(); 2500 for (i = 0; props[i]; i++) { 2501 qdict_put(d, props[i], qnull()); 2502 } 2503 2504 for (w = 0; w < FEATURE_WORDS; w++) { 2505 FeatureWordInfo *fi = &feature_word_info[w]; 2506 int bit; 2507 for (bit = 0; bit < 32; bit++) { 2508 if (!fi->feat_names[bit]) { 2509 continue; 2510 } 2511 qdict_put(d, fi->feat_names[bit], qnull()); 2512 } 2513 } 2514 2515 return d; 2516 } 2517 2518 /* Add an entry to @props dict, with the value for property. */ 2519 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 2520 { 2521 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 2522 &error_abort); 2523 2524 qdict_put_obj(props, prop, value); 2525 } 2526 2527 /* Convert CPU model data from X86CPU object to a property dictionary 2528 * that can recreate exactly the same CPU model. 2529 */ 2530 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 2531 { 2532 QDict *sprops = x86_cpu_static_props(); 2533 const QDictEntry *e; 2534 2535 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 2536 const char *prop = qdict_entry_key(e); 2537 x86_cpu_expand_prop(cpu, props, prop); 2538 } 2539 } 2540 2541 /* Convert CPU model data from X86CPU object to a property dictionary 2542 * that can recreate exactly the same CPU model, including every 2543 * writeable QOM property. 2544 */ 2545 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 2546 { 2547 ObjectPropertyIterator iter; 2548 ObjectProperty *prop; 2549 2550 object_property_iter_init(&iter, OBJECT(cpu)); 2551 while ((prop = object_property_iter_next(&iter))) { 2552 /* skip read-only or write-only properties */ 2553 if (!prop->get || !prop->set) { 2554 continue; 2555 } 2556 2557 /* "hotplugged" is the only property that is configurable 2558 * on the command-line but will be set differently on CPUs 2559 * created using "-cpu ... -smp ..." and by CPUs created 2560 * on the fly by x86_cpu_from_model() for querying. Skip it. 2561 */ 2562 if (!strcmp(prop->name, "hotplugged")) { 2563 continue; 2564 } 2565 x86_cpu_expand_prop(cpu, props, prop->name); 2566 } 2567 } 2568 2569 static void object_apply_props(Object *obj, QDict *props, Error **errp) 2570 { 2571 const QDictEntry *prop; 2572 Error *err = NULL; 2573 2574 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 2575 object_property_set_qobject(obj, qdict_entry_value(prop), 2576 qdict_entry_key(prop), &err); 2577 if (err) { 2578 break; 2579 } 2580 } 2581 2582 error_propagate(errp, err); 2583 } 2584 2585 /* Create X86CPU object according to model+props specification */ 2586 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 2587 { 2588 X86CPU *xc = NULL; 2589 X86CPUClass *xcc; 2590 Error *err = NULL; 2591 2592 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 2593 if (xcc == NULL) { 2594 error_setg(&err, "CPU model '%s' not found", model); 2595 goto out; 2596 } 2597 2598 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 2599 if (props) { 2600 object_apply_props(OBJECT(xc), props, &err); 2601 if (err) { 2602 goto out; 2603 } 2604 } 2605 2606 x86_cpu_expand_features(xc, &err); 2607 if (err) { 2608 goto out; 2609 } 2610 2611 out: 2612 if (err) { 2613 error_propagate(errp, err); 2614 object_unref(OBJECT(xc)); 2615 xc = NULL; 2616 } 2617 return xc; 2618 } 2619 2620 CpuModelExpansionInfo * 2621 arch_query_cpu_model_expansion(CpuModelExpansionType type, 2622 CpuModelInfo *model, 2623 Error **errp) 2624 { 2625 X86CPU *xc = NULL; 2626 Error *err = NULL; 2627 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 2628 QDict *props = NULL; 2629 const char *base_name; 2630 2631 xc = x86_cpu_from_model(model->name, 2632 model->has_props ? 2633 qobject_to_qdict(model->props) : 2634 NULL, &err); 2635 if (err) { 2636 goto out; 2637 } 2638 2639 props = qdict_new(); 2640 2641 switch (type) { 2642 case CPU_MODEL_EXPANSION_TYPE_STATIC: 2643 /* Static expansion will be based on "base" only */ 2644 base_name = "base"; 2645 x86_cpu_to_dict(xc, props); 2646 break; 2647 case CPU_MODEL_EXPANSION_TYPE_FULL: 2648 /* As we don't return every single property, full expansion needs 2649 * to keep the original model name+props, and add extra 2650 * properties on top of that. 2651 */ 2652 base_name = model->name; 2653 x86_cpu_to_dict_full(xc, props); 2654 break; 2655 default: 2656 error_setg(&err, "Unsupportted expansion type"); 2657 goto out; 2658 } 2659 2660 if (!props) { 2661 props = qdict_new(); 2662 } 2663 x86_cpu_to_dict(xc, props); 2664 2665 ret->model = g_new0(CpuModelInfo, 1); 2666 ret->model->name = g_strdup(base_name); 2667 ret->model->props = QOBJECT(props); 2668 ret->model->has_props = true; 2669 2670 out: 2671 object_unref(OBJECT(xc)); 2672 if (err) { 2673 error_propagate(errp, err); 2674 qapi_free_CpuModelExpansionInfo(ret); 2675 ret = NULL; 2676 } 2677 return ret; 2678 } 2679 2680 static gchar *x86_gdb_arch_name(CPUState *cs) 2681 { 2682 #ifdef TARGET_X86_64 2683 return g_strdup("i386:x86-64"); 2684 #else 2685 return g_strdup("i386"); 2686 #endif 2687 } 2688 2689 X86CPU *cpu_x86_init(const char *cpu_model) 2690 { 2691 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model)); 2692 } 2693 2694 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 2695 { 2696 X86CPUDefinition *cpudef = data; 2697 X86CPUClass *xcc = X86_CPU_CLASS(oc); 2698 2699 xcc->cpu_def = cpudef; 2700 xcc->migration_safe = true; 2701 } 2702 2703 static void x86_register_cpudef_type(X86CPUDefinition *def) 2704 { 2705 char *typename = x86_cpu_type_name(def->name); 2706 TypeInfo ti = { 2707 .name = typename, 2708 .parent = TYPE_X86_CPU, 2709 .class_init = x86_cpu_cpudef_class_init, 2710 .class_data = def, 2711 }; 2712 2713 /* AMD aliases are handled at runtime based on CPUID vendor, so 2714 * they shouldn't be set on the CPU model table. 2715 */ 2716 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 2717 2718 type_register(&ti); 2719 g_free(typename); 2720 } 2721 2722 #if !defined(CONFIG_USER_ONLY) 2723 2724 void cpu_clear_apic_feature(CPUX86State *env) 2725 { 2726 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 2727 } 2728 2729 #endif /* !CONFIG_USER_ONLY */ 2730 2731 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 2732 uint32_t *eax, uint32_t *ebx, 2733 uint32_t *ecx, uint32_t *edx) 2734 { 2735 X86CPU *cpu = x86_env_get_cpu(env); 2736 CPUState *cs = CPU(cpu); 2737 uint32_t pkg_offset; 2738 uint32_t limit; 2739 uint32_t signature[3]; 2740 2741 /* Calculate & apply limits for different index ranges */ 2742 if (index >= 0xC0000000) { 2743 limit = env->cpuid_xlevel2; 2744 } else if (index >= 0x80000000) { 2745 limit = env->cpuid_xlevel; 2746 } else if (index >= 0x40000000) { 2747 limit = 0x40000001; 2748 } else { 2749 limit = env->cpuid_level; 2750 } 2751 2752 if (index > limit) { 2753 /* Intel documentation states that invalid EAX input will 2754 * return the same information as EAX=cpuid_level 2755 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 2756 */ 2757 index = env->cpuid_level; 2758 } 2759 2760 switch(index) { 2761 case 0: 2762 *eax = env->cpuid_level; 2763 *ebx = env->cpuid_vendor1; 2764 *edx = env->cpuid_vendor2; 2765 *ecx = env->cpuid_vendor3; 2766 break; 2767 case 1: 2768 *eax = env->cpuid_version; 2769 *ebx = (cpu->apic_id << 24) | 2770 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 2771 *ecx = env->features[FEAT_1_ECX]; 2772 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 2773 *ecx |= CPUID_EXT_OSXSAVE; 2774 } 2775 *edx = env->features[FEAT_1_EDX]; 2776 if (cs->nr_cores * cs->nr_threads > 1) { 2777 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 2778 *edx |= CPUID_HT; 2779 } 2780 break; 2781 case 2: 2782 /* cache info: needed for Pentium Pro compatibility */ 2783 if (cpu->cache_info_passthrough) { 2784 host_cpuid(index, 0, eax, ebx, ecx, edx); 2785 break; 2786 } 2787 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 2788 *ebx = 0; 2789 if (!cpu->enable_l3_cache) { 2790 *ecx = 0; 2791 } else { 2792 *ecx = L3_N_DESCRIPTOR; 2793 } 2794 *edx = (L1D_DESCRIPTOR << 16) | \ 2795 (L1I_DESCRIPTOR << 8) | \ 2796 (L2_DESCRIPTOR); 2797 break; 2798 case 4: 2799 /* cache info: needed for Core compatibility */ 2800 if (cpu->cache_info_passthrough) { 2801 host_cpuid(index, count, eax, ebx, ecx, edx); 2802 *eax &= ~0xFC000000; 2803 } else { 2804 *eax = 0; 2805 switch (count) { 2806 case 0: /* L1 dcache info */ 2807 *eax |= CPUID_4_TYPE_DCACHE | \ 2808 CPUID_4_LEVEL(1) | \ 2809 CPUID_4_SELF_INIT_LEVEL; 2810 *ebx = (L1D_LINE_SIZE - 1) | \ 2811 ((L1D_PARTITIONS - 1) << 12) | \ 2812 ((L1D_ASSOCIATIVITY - 1) << 22); 2813 *ecx = L1D_SETS - 1; 2814 *edx = CPUID_4_NO_INVD_SHARING; 2815 break; 2816 case 1: /* L1 icache info */ 2817 *eax |= CPUID_4_TYPE_ICACHE | \ 2818 CPUID_4_LEVEL(1) | \ 2819 CPUID_4_SELF_INIT_LEVEL; 2820 *ebx = (L1I_LINE_SIZE - 1) | \ 2821 ((L1I_PARTITIONS - 1) << 12) | \ 2822 ((L1I_ASSOCIATIVITY - 1) << 22); 2823 *ecx = L1I_SETS - 1; 2824 *edx = CPUID_4_NO_INVD_SHARING; 2825 break; 2826 case 2: /* L2 cache info */ 2827 *eax |= CPUID_4_TYPE_UNIFIED | \ 2828 CPUID_4_LEVEL(2) | \ 2829 CPUID_4_SELF_INIT_LEVEL; 2830 if (cs->nr_threads > 1) { 2831 *eax |= (cs->nr_threads - 1) << 14; 2832 } 2833 *ebx = (L2_LINE_SIZE - 1) | \ 2834 ((L2_PARTITIONS - 1) << 12) | \ 2835 ((L2_ASSOCIATIVITY - 1) << 22); 2836 *ecx = L2_SETS - 1; 2837 *edx = CPUID_4_NO_INVD_SHARING; 2838 break; 2839 case 3: /* L3 cache info */ 2840 if (!cpu->enable_l3_cache) { 2841 *eax = 0; 2842 *ebx = 0; 2843 *ecx = 0; 2844 *edx = 0; 2845 break; 2846 } 2847 *eax |= CPUID_4_TYPE_UNIFIED | \ 2848 CPUID_4_LEVEL(3) | \ 2849 CPUID_4_SELF_INIT_LEVEL; 2850 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 2851 *eax |= ((1 << pkg_offset) - 1) << 14; 2852 *ebx = (L3_N_LINE_SIZE - 1) | \ 2853 ((L3_N_PARTITIONS - 1) << 12) | \ 2854 ((L3_N_ASSOCIATIVITY - 1) << 22); 2855 *ecx = L3_N_SETS - 1; 2856 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX; 2857 break; 2858 default: /* end of info */ 2859 *eax = 0; 2860 *ebx = 0; 2861 *ecx = 0; 2862 *edx = 0; 2863 break; 2864 } 2865 } 2866 2867 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 2868 if ((*eax & 31) && cs->nr_cores > 1) { 2869 *eax |= (cs->nr_cores - 1) << 26; 2870 } 2871 break; 2872 case 5: 2873 /* mwait info: needed for Core compatibility */ 2874 *eax = 0; /* Smallest monitor-line size in bytes */ 2875 *ebx = 0; /* Largest monitor-line size in bytes */ 2876 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 2877 *edx = 0; 2878 break; 2879 case 6: 2880 /* Thermal and Power Leaf */ 2881 *eax = env->features[FEAT_6_EAX]; 2882 *ebx = 0; 2883 *ecx = 0; 2884 *edx = 0; 2885 break; 2886 case 7: 2887 /* Structured Extended Feature Flags Enumeration Leaf */ 2888 if (count == 0) { 2889 *eax = 0; /* Maximum ECX value for sub-leaves */ 2890 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 2891 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 2892 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 2893 *ecx |= CPUID_7_0_ECX_OSPKE; 2894 } 2895 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 2896 } else { 2897 *eax = 0; 2898 *ebx = 0; 2899 *ecx = 0; 2900 *edx = 0; 2901 } 2902 break; 2903 case 9: 2904 /* Direct Cache Access Information Leaf */ 2905 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 2906 *ebx = 0; 2907 *ecx = 0; 2908 *edx = 0; 2909 break; 2910 case 0xA: 2911 /* Architectural Performance Monitoring Leaf */ 2912 if (kvm_enabled() && cpu->enable_pmu) { 2913 KVMState *s = cs->kvm_state; 2914 2915 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 2916 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 2917 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 2918 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 2919 } else { 2920 *eax = 0; 2921 *ebx = 0; 2922 *ecx = 0; 2923 *edx = 0; 2924 } 2925 break; 2926 case 0xB: 2927 /* Extended Topology Enumeration Leaf */ 2928 if (!cpu->enable_cpuid_0xb) { 2929 *eax = *ebx = *ecx = *edx = 0; 2930 break; 2931 } 2932 2933 *ecx = count & 0xff; 2934 *edx = cpu->apic_id; 2935 2936 switch (count) { 2937 case 0: 2938 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads); 2939 *ebx = cs->nr_threads; 2940 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 2941 break; 2942 case 1: 2943 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 2944 *ebx = cs->nr_cores * cs->nr_threads; 2945 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 2946 break; 2947 default: 2948 *eax = 0; 2949 *ebx = 0; 2950 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 2951 } 2952 2953 assert(!(*eax & ~0x1f)); 2954 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 2955 break; 2956 case 0xD: { 2957 /* Processor Extended State */ 2958 *eax = 0; 2959 *ebx = 0; 2960 *ecx = 0; 2961 *edx = 0; 2962 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 2963 break; 2964 } 2965 2966 if (count == 0) { 2967 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 2968 *eax = env->features[FEAT_XSAVE_COMP_LO]; 2969 *edx = env->features[FEAT_XSAVE_COMP_HI]; 2970 *ebx = *ecx; 2971 } else if (count == 1) { 2972 *eax = env->features[FEAT_XSAVE]; 2973 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 2974 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 2975 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 2976 *eax = esa->size; 2977 *ebx = esa->offset; 2978 } 2979 } 2980 break; 2981 } 2982 case 0x40000000: 2983 /* 2984 * CPUID code in kvm_arch_init_vcpu() ignores stuff 2985 * set here, but we restrict to TCG none the less. 2986 */ 2987 if (tcg_enabled() && cpu->expose_tcg) { 2988 memcpy(signature, "TCGTCGTCGTCG", 12); 2989 *eax = 0x40000001; 2990 *ebx = signature[0]; 2991 *ecx = signature[1]; 2992 *edx = signature[2]; 2993 } else { 2994 *eax = 0; 2995 *ebx = 0; 2996 *ecx = 0; 2997 *edx = 0; 2998 } 2999 break; 3000 case 0x40000001: 3001 *eax = 0; 3002 *ebx = 0; 3003 *ecx = 0; 3004 *edx = 0; 3005 break; 3006 case 0x80000000: 3007 *eax = env->cpuid_xlevel; 3008 *ebx = env->cpuid_vendor1; 3009 *edx = env->cpuid_vendor2; 3010 *ecx = env->cpuid_vendor3; 3011 break; 3012 case 0x80000001: 3013 *eax = env->cpuid_version; 3014 *ebx = 0; 3015 *ecx = env->features[FEAT_8000_0001_ECX]; 3016 *edx = env->features[FEAT_8000_0001_EDX]; 3017 3018 /* The Linux kernel checks for the CMPLegacy bit and 3019 * discards multiple thread information if it is set. 3020 * So don't set it here for Intel to make Linux guests happy. 3021 */ 3022 if (cs->nr_cores * cs->nr_threads > 1) { 3023 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 3024 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 3025 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 3026 *ecx |= 1 << 1; /* CmpLegacy bit */ 3027 } 3028 } 3029 break; 3030 case 0x80000002: 3031 case 0x80000003: 3032 case 0x80000004: 3033 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 3034 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 3035 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 3036 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 3037 break; 3038 case 0x80000005: 3039 /* cache info (L1 cache) */ 3040 if (cpu->cache_info_passthrough) { 3041 host_cpuid(index, 0, eax, ebx, ecx, edx); 3042 break; 3043 } 3044 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ 3045 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 3046 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ 3047 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 3048 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \ 3049 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE); 3050 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \ 3051 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE); 3052 break; 3053 case 0x80000006: 3054 /* cache info (L2 cache) */ 3055 if (cpu->cache_info_passthrough) { 3056 host_cpuid(index, 0, eax, ebx, ecx, edx); 3057 break; 3058 } 3059 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ 3060 (L2_DTLB_2M_ENTRIES << 16) | \ 3061 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ 3062 (L2_ITLB_2M_ENTRIES); 3063 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ 3064 (L2_DTLB_4K_ENTRIES << 16) | \ 3065 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ 3066 (L2_ITLB_4K_ENTRIES); 3067 *ecx = (L2_SIZE_KB_AMD << 16) | \ 3068 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \ 3069 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE); 3070 if (!cpu->enable_l3_cache) { 3071 *edx = ((L3_SIZE_KB / 512) << 18) | \ 3072 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \ 3073 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE); 3074 } else { 3075 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \ 3076 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \ 3077 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE); 3078 } 3079 break; 3080 case 0x80000007: 3081 *eax = 0; 3082 *ebx = 0; 3083 *ecx = 0; 3084 *edx = env->features[FEAT_8000_0007_EDX]; 3085 break; 3086 case 0x80000008: 3087 /* virtual & phys address size in low 2 bytes. */ 3088 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 3089 /* 64 bit processor */ 3090 *eax = cpu->phys_bits; /* configurable physical bits */ 3091 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 3092 *eax |= 0x00003900; /* 57 bits virtual */ 3093 } else { 3094 *eax |= 0x00003000; /* 48 bits virtual */ 3095 } 3096 } else { 3097 *eax = cpu->phys_bits; 3098 } 3099 *ebx = 0; 3100 *ecx = 0; 3101 *edx = 0; 3102 if (cs->nr_cores * cs->nr_threads > 1) { 3103 *ecx |= (cs->nr_cores * cs->nr_threads) - 1; 3104 } 3105 break; 3106 case 0x8000000A: 3107 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 3108 *eax = 0x00000001; /* SVM Revision */ 3109 *ebx = 0x00000010; /* nr of ASIDs */ 3110 *ecx = 0; 3111 *edx = env->features[FEAT_SVM]; /* optional features */ 3112 } else { 3113 *eax = 0; 3114 *ebx = 0; 3115 *ecx = 0; 3116 *edx = 0; 3117 } 3118 break; 3119 case 0xC0000000: 3120 *eax = env->cpuid_xlevel2; 3121 *ebx = 0; 3122 *ecx = 0; 3123 *edx = 0; 3124 break; 3125 case 0xC0000001: 3126 /* Support for VIA CPU's CPUID instruction */ 3127 *eax = env->cpuid_version; 3128 *ebx = 0; 3129 *ecx = 0; 3130 *edx = env->features[FEAT_C000_0001_EDX]; 3131 break; 3132 case 0xC0000002: 3133 case 0xC0000003: 3134 case 0xC0000004: 3135 /* Reserved for the future, and now filled with zero */ 3136 *eax = 0; 3137 *ebx = 0; 3138 *ecx = 0; 3139 *edx = 0; 3140 break; 3141 default: 3142 /* reserved values: zero */ 3143 *eax = 0; 3144 *ebx = 0; 3145 *ecx = 0; 3146 *edx = 0; 3147 break; 3148 } 3149 } 3150 3151 /* CPUClass::reset() */ 3152 static void x86_cpu_reset(CPUState *s) 3153 { 3154 X86CPU *cpu = X86_CPU(s); 3155 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 3156 CPUX86State *env = &cpu->env; 3157 target_ulong cr4; 3158 uint64_t xcr0; 3159 int i; 3160 3161 xcc->parent_reset(s); 3162 3163 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 3164 3165 env->old_exception = -1; 3166 3167 /* init to reset state */ 3168 3169 env->hflags2 |= HF2_GIF_MASK; 3170 3171 cpu_x86_update_cr0(env, 0x60000010); 3172 env->a20_mask = ~0x0; 3173 env->smbase = 0x30000; 3174 3175 env->idt.limit = 0xffff; 3176 env->gdt.limit = 0xffff; 3177 env->ldt.limit = 0xffff; 3178 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 3179 env->tr.limit = 0xffff; 3180 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 3181 3182 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 3183 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 3184 DESC_R_MASK | DESC_A_MASK); 3185 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 3186 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3187 DESC_A_MASK); 3188 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 3189 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3190 DESC_A_MASK); 3191 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 3192 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3193 DESC_A_MASK); 3194 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 3195 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3196 DESC_A_MASK); 3197 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 3198 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 3199 DESC_A_MASK); 3200 3201 env->eip = 0xfff0; 3202 env->regs[R_EDX] = env->cpuid_version; 3203 3204 env->eflags = 0x2; 3205 3206 /* FPU init */ 3207 for (i = 0; i < 8; i++) { 3208 env->fptags[i] = 1; 3209 } 3210 cpu_set_fpuc(env, 0x37f); 3211 3212 env->mxcsr = 0x1f80; 3213 /* All units are in INIT state. */ 3214 env->xstate_bv = 0; 3215 3216 env->pat = 0x0007040600070406ULL; 3217 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 3218 3219 memset(env->dr, 0, sizeof(env->dr)); 3220 env->dr[6] = DR6_FIXED_1; 3221 env->dr[7] = DR7_FIXED_1; 3222 cpu_breakpoint_remove_all(s, BP_CPU); 3223 cpu_watchpoint_remove_all(s, BP_CPU); 3224 3225 cr4 = 0; 3226 xcr0 = XSTATE_FP_MASK; 3227 3228 #ifdef CONFIG_USER_ONLY 3229 /* Enable all the features for user-mode. */ 3230 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 3231 xcr0 |= XSTATE_SSE_MASK; 3232 } 3233 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 3234 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 3235 if (env->features[esa->feature] & esa->bits) { 3236 xcr0 |= 1ull << i; 3237 } 3238 } 3239 3240 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 3241 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 3242 } 3243 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 3244 cr4 |= CR4_FSGSBASE_MASK; 3245 } 3246 #endif 3247 3248 env->xcr0 = xcr0; 3249 cpu_x86_update_cr4(env, cr4); 3250 3251 /* 3252 * SDM 11.11.5 requires: 3253 * - IA32_MTRR_DEF_TYPE MSR.E = 0 3254 * - IA32_MTRR_PHYSMASKn.V = 0 3255 * All other bits are undefined. For simplification, zero it all. 3256 */ 3257 env->mtrr_deftype = 0; 3258 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 3259 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 3260 3261 #if !defined(CONFIG_USER_ONLY) 3262 /* We hard-wire the BSP to the first CPU. */ 3263 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 3264 3265 s->halted = !cpu_is_bsp(cpu); 3266 3267 if (kvm_enabled()) { 3268 kvm_arch_reset_vcpu(cpu); 3269 } 3270 #endif 3271 } 3272 3273 #ifndef CONFIG_USER_ONLY 3274 bool cpu_is_bsp(X86CPU *cpu) 3275 { 3276 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 3277 } 3278 3279 /* TODO: remove me, when reset over QOM tree is implemented */ 3280 static void x86_cpu_machine_reset_cb(void *opaque) 3281 { 3282 X86CPU *cpu = opaque; 3283 cpu_reset(CPU(cpu)); 3284 } 3285 #endif 3286 3287 static void mce_init(X86CPU *cpu) 3288 { 3289 CPUX86State *cenv = &cpu->env; 3290 unsigned int bank; 3291 3292 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 3293 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 3294 (CPUID_MCE | CPUID_MCA)) { 3295 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 3296 (cpu->enable_lmce ? MCG_LMCE_P : 0); 3297 cenv->mcg_ctl = ~(uint64_t)0; 3298 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 3299 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 3300 } 3301 } 3302 } 3303 3304 #ifndef CONFIG_USER_ONLY 3305 APICCommonClass *apic_get_class(void) 3306 { 3307 const char *apic_type = "apic"; 3308 3309 if (kvm_apic_in_kernel()) { 3310 apic_type = "kvm-apic"; 3311 } else if (xen_enabled()) { 3312 apic_type = "xen-apic"; 3313 } 3314 3315 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 3316 } 3317 3318 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 3319 { 3320 APICCommonState *apic; 3321 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 3322 3323 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class))); 3324 3325 object_property_add_child(OBJECT(cpu), "lapic", 3326 OBJECT(cpu->apic_state), &error_abort); 3327 object_unref(OBJECT(cpu->apic_state)); 3328 3329 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 3330 /* TODO: convert to link<> */ 3331 apic = APIC_COMMON(cpu->apic_state); 3332 apic->cpu = cpu; 3333 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 3334 } 3335 3336 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 3337 { 3338 APICCommonState *apic; 3339 static bool apic_mmio_map_once; 3340 3341 if (cpu->apic_state == NULL) { 3342 return; 3343 } 3344 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized", 3345 errp); 3346 3347 /* Map APIC MMIO area */ 3348 apic = APIC_COMMON(cpu->apic_state); 3349 if (!apic_mmio_map_once) { 3350 memory_region_add_subregion_overlap(get_system_memory(), 3351 apic->apicbase & 3352 MSR_IA32_APICBASE_BASE, 3353 &apic->io_memory, 3354 0x1000); 3355 apic_mmio_map_once = true; 3356 } 3357 } 3358 3359 static void x86_cpu_machine_done(Notifier *n, void *unused) 3360 { 3361 X86CPU *cpu = container_of(n, X86CPU, machine_done); 3362 MemoryRegion *smram = 3363 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 3364 3365 if (smram) { 3366 cpu->smram = g_new(MemoryRegion, 1); 3367 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 3368 smram, 0, 1ull << 32); 3369 memory_region_set_enabled(cpu->smram, true); 3370 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 3371 } 3372 } 3373 #else 3374 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 3375 { 3376 } 3377 #endif 3378 3379 /* Note: Only safe for use on x86(-64) hosts */ 3380 static uint32_t x86_host_phys_bits(void) 3381 { 3382 uint32_t eax; 3383 uint32_t host_phys_bits; 3384 3385 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 3386 if (eax >= 0x80000008) { 3387 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 3388 /* Note: According to AMD doc 25481 rev 2.34 they have a field 3389 * at 23:16 that can specify a maximum physical address bits for 3390 * the guest that can override this value; but I've not seen 3391 * anything with that set. 3392 */ 3393 host_phys_bits = eax & 0xff; 3394 } else { 3395 /* It's an odd 64 bit machine that doesn't have the leaf for 3396 * physical address bits; fall back to 36 that's most older 3397 * Intel. 3398 */ 3399 host_phys_bits = 36; 3400 } 3401 3402 return host_phys_bits; 3403 } 3404 3405 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 3406 { 3407 if (*min < value) { 3408 *min = value; 3409 } 3410 } 3411 3412 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 3413 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 3414 { 3415 CPUX86State *env = &cpu->env; 3416 FeatureWordInfo *fi = &feature_word_info[w]; 3417 uint32_t eax = fi->cpuid_eax; 3418 uint32_t region = eax & 0xF0000000; 3419 3420 if (!env->features[w]) { 3421 return; 3422 } 3423 3424 switch (region) { 3425 case 0x00000000: 3426 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 3427 break; 3428 case 0x80000000: 3429 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 3430 break; 3431 case 0xC0000000: 3432 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 3433 break; 3434 } 3435 } 3436 3437 /* Calculate XSAVE components based on the configured CPU feature flags */ 3438 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 3439 { 3440 CPUX86State *env = &cpu->env; 3441 int i; 3442 uint64_t mask; 3443 3444 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 3445 return; 3446 } 3447 3448 mask = 0; 3449 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 3450 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 3451 if (env->features[esa->feature] & esa->bits) { 3452 mask |= (1ULL << i); 3453 } 3454 } 3455 3456 env->features[FEAT_XSAVE_COMP_LO] = mask; 3457 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 3458 } 3459 3460 /***** Steps involved on loading and filtering CPUID data 3461 * 3462 * When initializing and realizing a CPU object, the steps 3463 * involved in setting up CPUID data are: 3464 * 3465 * 1) Loading CPU model definition (X86CPUDefinition). This is 3466 * implemented by x86_cpu_load_def() and should be completely 3467 * transparent, as it is done automatically by instance_init. 3468 * No code should need to look at X86CPUDefinition structs 3469 * outside instance_init. 3470 * 3471 * 2) CPU expansion. This is done by realize before CPUID 3472 * filtering, and will make sure host/accelerator data is 3473 * loaded for CPU models that depend on host capabilities 3474 * (e.g. "host"). Done by x86_cpu_expand_features(). 3475 * 3476 * 3) CPUID filtering. This initializes extra data related to 3477 * CPUID, and checks if the host supports all capabilities 3478 * required by the CPU. Runnability of a CPU model is 3479 * determined at this step. Done by x86_cpu_filter_features(). 3480 * 3481 * Some operations don't require all steps to be performed. 3482 * More precisely: 3483 * 3484 * - CPU instance creation (instance_init) will run only CPU 3485 * model loading. CPU expansion can't run at instance_init-time 3486 * because host/accelerator data may be not available yet. 3487 * - CPU realization will perform both CPU model expansion and CPUID 3488 * filtering, and return an error in case one of them fails. 3489 * - query-cpu-definitions needs to run all 3 steps. It needs 3490 * to run CPUID filtering, as the 'unavailable-features' 3491 * field is set based on the filtering results. 3492 * - The query-cpu-model-expansion QMP command only needs to run 3493 * CPU model loading and CPU expansion. It should not filter 3494 * any CPUID data based on host capabilities. 3495 */ 3496 3497 /* Expand CPU configuration data, based on configured features 3498 * and host/accelerator capabilities when appropriate. 3499 */ 3500 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 3501 { 3502 CPUX86State *env = &cpu->env; 3503 FeatureWord w; 3504 GList *l; 3505 Error *local_err = NULL; 3506 3507 /*TODO: Now cpu->max_features doesn't overwrite features 3508 * set using QOM properties, and we can convert 3509 * plus_features & minus_features to global properties 3510 * inside x86_cpu_parse_featurestr() too. 3511 */ 3512 if (cpu->max_features) { 3513 for (w = 0; w < FEATURE_WORDS; w++) { 3514 /* Override only features that weren't set explicitly 3515 * by the user. 3516 */ 3517 env->features[w] |= 3518 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 3519 ~env->user_features[w]; 3520 } 3521 } 3522 3523 for (l = plus_features; l; l = l->next) { 3524 const char *prop = l->data; 3525 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 3526 if (local_err) { 3527 goto out; 3528 } 3529 } 3530 3531 for (l = minus_features; l; l = l->next) { 3532 const char *prop = l->data; 3533 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 3534 if (local_err) { 3535 goto out; 3536 } 3537 } 3538 3539 if (!kvm_enabled() || !cpu->expose_kvm) { 3540 env->features[FEAT_KVM] = 0; 3541 } 3542 3543 x86_cpu_enable_xsave_components(cpu); 3544 3545 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 3546 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 3547 if (cpu->full_cpuid_auto_level) { 3548 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 3549 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 3550 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 3551 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 3552 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 3553 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 3554 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 3555 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 3556 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 3557 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 3558 /* SVM requires CPUID[0x8000000A] */ 3559 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 3560 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 3561 } 3562 } 3563 3564 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 3565 if (env->cpuid_level == UINT32_MAX) { 3566 env->cpuid_level = env->cpuid_min_level; 3567 } 3568 if (env->cpuid_xlevel == UINT32_MAX) { 3569 env->cpuid_xlevel = env->cpuid_min_xlevel; 3570 } 3571 if (env->cpuid_xlevel2 == UINT32_MAX) { 3572 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 3573 } 3574 3575 out: 3576 if (local_err != NULL) { 3577 error_propagate(errp, local_err); 3578 } 3579 } 3580 3581 /* 3582 * Finishes initialization of CPUID data, filters CPU feature 3583 * words based on host availability of each feature. 3584 * 3585 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 3586 */ 3587 static int x86_cpu_filter_features(X86CPU *cpu) 3588 { 3589 CPUX86State *env = &cpu->env; 3590 FeatureWord w; 3591 int rv = 0; 3592 3593 for (w = 0; w < FEATURE_WORDS; w++) { 3594 uint32_t host_feat = 3595 x86_cpu_get_supported_feature_word(w, false); 3596 uint32_t requested_features = env->features[w]; 3597 env->features[w] &= host_feat; 3598 cpu->filtered_features[w] = requested_features & ~env->features[w]; 3599 if (cpu->filtered_features[w]) { 3600 rv = 1; 3601 } 3602 } 3603 3604 return rv; 3605 } 3606 3607 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ 3608 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ 3609 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) 3610 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ 3611 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ 3612 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) 3613 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 3614 { 3615 CPUState *cs = CPU(dev); 3616 X86CPU *cpu = X86_CPU(dev); 3617 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 3618 CPUX86State *env = &cpu->env; 3619 Error *local_err = NULL; 3620 static bool ht_warned; 3621 3622 if (xcc->kvm_required && !kvm_enabled()) { 3623 char *name = x86_cpu_class_get_model_name(xcc); 3624 error_setg(&local_err, "CPU model '%s' requires KVM", name); 3625 g_free(name); 3626 goto out; 3627 } 3628 3629 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 3630 error_setg(errp, "apic-id property was not initialized properly"); 3631 return; 3632 } 3633 3634 x86_cpu_expand_features(cpu, &local_err); 3635 if (local_err) { 3636 goto out; 3637 } 3638 3639 if (x86_cpu_filter_features(cpu) && 3640 (cpu->check_cpuid || cpu->enforce_cpuid)) { 3641 x86_cpu_report_filtered_features(cpu); 3642 if (cpu->enforce_cpuid) { 3643 error_setg(&local_err, 3644 kvm_enabled() ? 3645 "Host doesn't support requested features" : 3646 "TCG doesn't support requested features"); 3647 goto out; 3648 } 3649 } 3650 3651 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 3652 * CPUID[1].EDX. 3653 */ 3654 if (IS_AMD_CPU(env)) { 3655 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 3656 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 3657 & CPUID_EXT2_AMD_ALIASES); 3658 } 3659 3660 /* For 64bit systems think about the number of physical bits to present. 3661 * ideally this should be the same as the host; anything other than matching 3662 * the host can cause incorrect guest behaviour. 3663 * QEMU used to pick the magic value of 40 bits that corresponds to 3664 * consumer AMD devices but nothing else. 3665 */ 3666 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 3667 if (kvm_enabled()) { 3668 uint32_t host_phys_bits = x86_host_phys_bits(); 3669 static bool warned; 3670 3671 if (cpu->host_phys_bits) { 3672 /* The user asked for us to use the host physical bits */ 3673 cpu->phys_bits = host_phys_bits; 3674 } 3675 3676 /* Print a warning if the user set it to a value that's not the 3677 * host value. 3678 */ 3679 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 3680 !warned) { 3681 warn_report("Host physical bits (%u)" 3682 " does not match phys-bits property (%u)", 3683 host_phys_bits, cpu->phys_bits); 3684 warned = true; 3685 } 3686 3687 if (cpu->phys_bits && 3688 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 3689 cpu->phys_bits < 32)) { 3690 error_setg(errp, "phys-bits should be between 32 and %u " 3691 " (but is %u)", 3692 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 3693 return; 3694 } 3695 } else { 3696 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 3697 error_setg(errp, "TCG only supports phys-bits=%u", 3698 TCG_PHYS_ADDR_BITS); 3699 return; 3700 } 3701 } 3702 /* 0 means it was not explicitly set by the user (or by machine 3703 * compat_props or by the host code above). In this case, the default 3704 * is the value used by TCG (40). 3705 */ 3706 if (cpu->phys_bits == 0) { 3707 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 3708 } 3709 } else { 3710 /* For 32 bit systems don't use the user set value, but keep 3711 * phys_bits consistent with what we tell the guest. 3712 */ 3713 if (cpu->phys_bits != 0) { 3714 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 3715 return; 3716 } 3717 3718 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 3719 cpu->phys_bits = 36; 3720 } else { 3721 cpu->phys_bits = 32; 3722 } 3723 } 3724 cpu_exec_realizefn(cs, &local_err); 3725 if (local_err != NULL) { 3726 error_propagate(errp, local_err); 3727 return; 3728 } 3729 3730 if (tcg_enabled()) { 3731 tcg_x86_init(); 3732 } 3733 3734 #ifndef CONFIG_USER_ONLY 3735 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 3736 3737 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) { 3738 x86_cpu_apic_create(cpu, &local_err); 3739 if (local_err != NULL) { 3740 goto out; 3741 } 3742 } 3743 #endif 3744 3745 mce_init(cpu); 3746 3747 #ifndef CONFIG_USER_ONLY 3748 if (tcg_enabled()) { 3749 AddressSpace *as_normal = address_space_init_shareable(cs->memory, 3750 "cpu-memory"); 3751 AddressSpace *as_smm = g_new(AddressSpace, 1); 3752 3753 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 3754 cpu->cpu_as_root = g_new(MemoryRegion, 1); 3755 3756 /* Outer container... */ 3757 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 3758 memory_region_set_enabled(cpu->cpu_as_root, true); 3759 3760 /* ... with two regions inside: normal system memory with low 3761 * priority, and... 3762 */ 3763 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 3764 get_system_memory(), 0, ~0ull); 3765 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 3766 memory_region_set_enabled(cpu->cpu_as_mem, true); 3767 address_space_init(as_smm, cpu->cpu_as_root, "CPU"); 3768 3769 cs->num_ases = 2; 3770 cpu_address_space_init(cs, as_normal, 0); 3771 cpu_address_space_init(cs, as_smm, 1); 3772 3773 /* ... SMRAM with higher priority, linked from /machine/smram. */ 3774 cpu->machine_done.notify = x86_cpu_machine_done; 3775 qemu_add_machine_init_done_notifier(&cpu->machine_done); 3776 } 3777 #endif 3778 3779 qemu_init_vcpu(cs); 3780 3781 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this 3782 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 3783 * based on inputs (sockets,cores,threads), it is still better to gives 3784 * users a warning. 3785 * 3786 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 3787 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 3788 */ 3789 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) { 3790 error_report("AMD CPU doesn't support hyperthreading. Please configure" 3791 " -smp options properly."); 3792 ht_warned = true; 3793 } 3794 3795 x86_cpu_apic_realize(cpu, &local_err); 3796 if (local_err != NULL) { 3797 goto out; 3798 } 3799 cpu_reset(cs); 3800 3801 xcc->parent_realize(dev, &local_err); 3802 3803 out: 3804 if (local_err != NULL) { 3805 error_propagate(errp, local_err); 3806 return; 3807 } 3808 } 3809 3810 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) 3811 { 3812 X86CPU *cpu = X86_CPU(dev); 3813 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 3814 Error *local_err = NULL; 3815 3816 #ifndef CONFIG_USER_ONLY 3817 cpu_remove_sync(CPU(dev)); 3818 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 3819 #endif 3820 3821 if (cpu->apic_state) { 3822 object_unparent(OBJECT(cpu->apic_state)); 3823 cpu->apic_state = NULL; 3824 } 3825 3826 xcc->parent_unrealize(dev, &local_err); 3827 if (local_err != NULL) { 3828 error_propagate(errp, local_err); 3829 return; 3830 } 3831 } 3832 3833 typedef struct BitProperty { 3834 FeatureWord w; 3835 uint32_t mask; 3836 } BitProperty; 3837 3838 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 3839 void *opaque, Error **errp) 3840 { 3841 X86CPU *cpu = X86_CPU(obj); 3842 BitProperty *fp = opaque; 3843 uint32_t f = cpu->env.features[fp->w]; 3844 bool value = (f & fp->mask) == fp->mask; 3845 visit_type_bool(v, name, &value, errp); 3846 } 3847 3848 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 3849 void *opaque, Error **errp) 3850 { 3851 DeviceState *dev = DEVICE(obj); 3852 X86CPU *cpu = X86_CPU(obj); 3853 BitProperty *fp = opaque; 3854 Error *local_err = NULL; 3855 bool value; 3856 3857 if (dev->realized) { 3858 qdev_prop_set_after_realize(dev, name, errp); 3859 return; 3860 } 3861 3862 visit_type_bool(v, name, &value, &local_err); 3863 if (local_err) { 3864 error_propagate(errp, local_err); 3865 return; 3866 } 3867 3868 if (value) { 3869 cpu->env.features[fp->w] |= fp->mask; 3870 } else { 3871 cpu->env.features[fp->w] &= ~fp->mask; 3872 } 3873 cpu->env.user_features[fp->w] |= fp->mask; 3874 } 3875 3876 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 3877 void *opaque) 3878 { 3879 BitProperty *prop = opaque; 3880 g_free(prop); 3881 } 3882 3883 /* Register a boolean property to get/set a single bit in a uint32_t field. 3884 * 3885 * The same property name can be registered multiple times to make it affect 3886 * multiple bits in the same FeatureWord. In that case, the getter will return 3887 * true only if all bits are set. 3888 */ 3889 static void x86_cpu_register_bit_prop(X86CPU *cpu, 3890 const char *prop_name, 3891 FeatureWord w, 3892 int bitnr) 3893 { 3894 BitProperty *fp; 3895 ObjectProperty *op; 3896 uint32_t mask = (1UL << bitnr); 3897 3898 op = object_property_find(OBJECT(cpu), prop_name, NULL); 3899 if (op) { 3900 fp = op->opaque; 3901 assert(fp->w == w); 3902 fp->mask |= mask; 3903 } else { 3904 fp = g_new0(BitProperty, 1); 3905 fp->w = w; 3906 fp->mask = mask; 3907 object_property_add(OBJECT(cpu), prop_name, "bool", 3908 x86_cpu_get_bit_prop, 3909 x86_cpu_set_bit_prop, 3910 x86_cpu_release_bit_prop, fp, &error_abort); 3911 } 3912 } 3913 3914 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 3915 FeatureWord w, 3916 int bitnr) 3917 { 3918 FeatureWordInfo *fi = &feature_word_info[w]; 3919 const char *name = fi->feat_names[bitnr]; 3920 3921 if (!name) { 3922 return; 3923 } 3924 3925 /* Property names should use "-" instead of "_". 3926 * Old names containing underscores are registered as aliases 3927 * using object_property_add_alias() 3928 */ 3929 assert(!strchr(name, '_')); 3930 /* aliases don't use "|" delimiters anymore, they are registered 3931 * manually using object_property_add_alias() */ 3932 assert(!strchr(name, '|')); 3933 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 3934 } 3935 3936 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 3937 { 3938 X86CPU *cpu = X86_CPU(cs); 3939 CPUX86State *env = &cpu->env; 3940 GuestPanicInformation *panic_info = NULL; 3941 3942 if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) { 3943 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 3944 3945 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 3946 3947 assert(HV_X64_MSR_CRASH_PARAMS >= 5); 3948 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 3949 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 3950 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 3951 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 3952 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 3953 } 3954 3955 return panic_info; 3956 } 3957 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 3958 const char *name, void *opaque, 3959 Error **errp) 3960 { 3961 CPUState *cs = CPU(obj); 3962 GuestPanicInformation *panic_info; 3963 3964 if (!cs->crash_occurred) { 3965 error_setg(errp, "No crash occured"); 3966 return; 3967 } 3968 3969 panic_info = x86_cpu_get_crash_info(cs); 3970 if (panic_info == NULL) { 3971 error_setg(errp, "No crash information"); 3972 return; 3973 } 3974 3975 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 3976 errp); 3977 qapi_free_GuestPanicInformation(panic_info); 3978 } 3979 3980 static void x86_cpu_initfn(Object *obj) 3981 { 3982 CPUState *cs = CPU(obj); 3983 X86CPU *cpu = X86_CPU(obj); 3984 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 3985 CPUX86State *env = &cpu->env; 3986 FeatureWord w; 3987 3988 cs->env_ptr = env; 3989 3990 object_property_add(obj, "family", "int", 3991 x86_cpuid_version_get_family, 3992 x86_cpuid_version_set_family, NULL, NULL, NULL); 3993 object_property_add(obj, "model", "int", 3994 x86_cpuid_version_get_model, 3995 x86_cpuid_version_set_model, NULL, NULL, NULL); 3996 object_property_add(obj, "stepping", "int", 3997 x86_cpuid_version_get_stepping, 3998 x86_cpuid_version_set_stepping, NULL, NULL, NULL); 3999 object_property_add_str(obj, "vendor", 4000 x86_cpuid_get_vendor, 4001 x86_cpuid_set_vendor, NULL); 4002 object_property_add_str(obj, "model-id", 4003 x86_cpuid_get_model_id, 4004 x86_cpuid_set_model_id, NULL); 4005 object_property_add(obj, "tsc-frequency", "int", 4006 x86_cpuid_get_tsc_freq, 4007 x86_cpuid_set_tsc_freq, NULL, NULL, NULL); 4008 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 4009 x86_cpu_get_feature_words, 4010 NULL, NULL, (void *)env->features, NULL); 4011 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 4012 x86_cpu_get_feature_words, 4013 NULL, NULL, (void *)cpu->filtered_features, NULL); 4014 4015 object_property_add(obj, "crash-information", "GuestPanicInformation", 4016 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); 4017 4018 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY; 4019 4020 for (w = 0; w < FEATURE_WORDS; w++) { 4021 int bitnr; 4022 4023 for (bitnr = 0; bitnr < 32; bitnr++) { 4024 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 4025 } 4026 } 4027 4028 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort); 4029 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort); 4030 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort); 4031 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort); 4032 object_property_add_alias(obj, "xd", obj, "nx", &error_abort); 4033 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort); 4034 object_property_add_alias(obj, "i64", obj, "lm", &error_abort); 4035 4036 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort); 4037 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort); 4038 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort); 4039 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort); 4040 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort); 4041 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort); 4042 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort); 4043 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort); 4044 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort); 4045 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort); 4046 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort); 4047 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort); 4048 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort); 4049 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort); 4050 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort); 4051 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort); 4052 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort); 4053 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort); 4054 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort); 4055 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort); 4056 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort); 4057 4058 if (xcc->cpu_def) { 4059 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort); 4060 } 4061 } 4062 4063 static int64_t x86_cpu_get_arch_id(CPUState *cs) 4064 { 4065 X86CPU *cpu = X86_CPU(cs); 4066 4067 return cpu->apic_id; 4068 } 4069 4070 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 4071 { 4072 X86CPU *cpu = X86_CPU(cs); 4073 4074 return cpu->env.cr[0] & CR0_PG_MASK; 4075 } 4076 4077 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 4078 { 4079 X86CPU *cpu = X86_CPU(cs); 4080 4081 cpu->env.eip = value; 4082 } 4083 4084 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 4085 { 4086 X86CPU *cpu = X86_CPU(cs); 4087 4088 cpu->env.eip = tb->pc - tb->cs_base; 4089 } 4090 4091 static bool x86_cpu_has_work(CPUState *cs) 4092 { 4093 X86CPU *cpu = X86_CPU(cs); 4094 CPUX86State *env = &cpu->env; 4095 4096 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD | 4097 CPU_INTERRUPT_POLL)) && 4098 (env->eflags & IF_MASK)) || 4099 (cs->interrupt_request & (CPU_INTERRUPT_NMI | 4100 CPU_INTERRUPT_INIT | 4101 CPU_INTERRUPT_SIPI | 4102 CPU_INTERRUPT_MCE)) || 4103 ((cs->interrupt_request & CPU_INTERRUPT_SMI) && 4104 !(env->hflags & HF_SMM_MASK)); 4105 } 4106 4107 static Property x86_cpu_properties[] = { 4108 #ifdef CONFIG_USER_ONLY 4109 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 4110 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 4111 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 4112 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 4113 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 4114 #else 4115 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 4116 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 4117 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 4118 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 4119 #endif 4120 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 4121 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 4122 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks }, 4123 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false), 4124 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false), 4125 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false), 4126 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false), 4127 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false), 4128 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false), 4129 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false), 4130 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false), 4131 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false), 4132 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 4133 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 4134 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 4135 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 4136 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 4137 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 4138 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 4139 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 4140 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 4141 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 4142 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 4143 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 4144 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 4145 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 4146 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 4147 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 4148 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 4149 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 4150 false), 4151 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 4152 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 4153 DEFINE_PROP_END_OF_LIST() 4154 }; 4155 4156 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 4157 { 4158 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4159 CPUClass *cc = CPU_CLASS(oc); 4160 DeviceClass *dc = DEVICE_CLASS(oc); 4161 4162 xcc->parent_realize = dc->realize; 4163 xcc->parent_unrealize = dc->unrealize; 4164 dc->realize = x86_cpu_realizefn; 4165 dc->unrealize = x86_cpu_unrealizefn; 4166 dc->props = x86_cpu_properties; 4167 4168 xcc->parent_reset = cc->reset; 4169 cc->reset = x86_cpu_reset; 4170 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 4171 4172 cc->class_by_name = x86_cpu_class_by_name; 4173 cc->parse_features = x86_cpu_parse_featurestr; 4174 cc->has_work = x86_cpu_has_work; 4175 #ifdef CONFIG_TCG 4176 cc->do_interrupt = x86_cpu_do_interrupt; 4177 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 4178 #endif 4179 cc->dump_state = x86_cpu_dump_state; 4180 cc->get_crash_info = x86_cpu_get_crash_info; 4181 cc->set_pc = x86_cpu_set_pc; 4182 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 4183 cc->gdb_read_register = x86_cpu_gdb_read_register; 4184 cc->gdb_write_register = x86_cpu_gdb_write_register; 4185 cc->get_arch_id = x86_cpu_get_arch_id; 4186 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 4187 #ifdef CONFIG_USER_ONLY 4188 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault; 4189 #else 4190 cc->asidx_from_attrs = x86_asidx_from_attrs; 4191 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 4192 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; 4193 cc->write_elf64_note = x86_cpu_write_elf64_note; 4194 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 4195 cc->write_elf32_note = x86_cpu_write_elf32_note; 4196 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 4197 cc->vmsd = &vmstate_x86_cpu; 4198 #endif 4199 cc->gdb_arch_name = x86_gdb_arch_name; 4200 #ifdef TARGET_X86_64 4201 cc->gdb_core_xml_file = "i386-64bit.xml"; 4202 cc->gdb_num_core_regs = 57; 4203 #else 4204 cc->gdb_core_xml_file = "i386-32bit.xml"; 4205 cc->gdb_num_core_regs = 41; 4206 #endif 4207 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 4208 cc->debug_excp_handler = breakpoint_handler; 4209 #endif 4210 cc->cpu_exec_enter = x86_cpu_exec_enter; 4211 cc->cpu_exec_exit = x86_cpu_exec_exit; 4212 4213 dc->user_creatable = true; 4214 } 4215 4216 static const TypeInfo x86_cpu_type_info = { 4217 .name = TYPE_X86_CPU, 4218 .parent = TYPE_CPU, 4219 .instance_size = sizeof(X86CPU), 4220 .instance_init = x86_cpu_initfn, 4221 .abstract = true, 4222 .class_size = sizeof(X86CPUClass), 4223 .class_init = x86_cpu_common_class_init, 4224 }; 4225 4226 4227 /* "base" CPU model, used by query-cpu-model-expansion */ 4228 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 4229 { 4230 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4231 4232 xcc->static_model = true; 4233 xcc->migration_safe = true; 4234 xcc->model_description = "base CPU model type with no features enabled"; 4235 xcc->ordering = 8; 4236 } 4237 4238 static const TypeInfo x86_base_cpu_type_info = { 4239 .name = X86_CPU_TYPE_NAME("base"), 4240 .parent = TYPE_X86_CPU, 4241 .class_init = x86_cpu_base_class_init, 4242 }; 4243 4244 static void x86_cpu_register_types(void) 4245 { 4246 int i; 4247 4248 type_register_static(&x86_cpu_type_info); 4249 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 4250 x86_register_cpudef_type(&builtin_x86_defs[i]); 4251 } 4252 type_register_static(&max_x86_cpu_type_info); 4253 type_register_static(&x86_base_cpu_type_info); 4254 #ifdef CONFIG_KVM 4255 type_register_static(&host_x86_cpu_type_info); 4256 #endif 4257 } 4258 4259 type_init(x86_cpu_register_types) 4260