1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 23 #include "cpu.h" 24 #include "exec/exec-all.h" 25 #include "sysemu/kvm.h" 26 #include "sysemu/hvf.h" 27 #include "sysemu/cpus.h" 28 #include "kvm_i386.h" 29 #include "sev_i386.h" 30 31 #include "qemu/error-report.h" 32 #include "qemu/option.h" 33 #include "qemu/config-file.h" 34 #include "qapi/error.h" 35 #include "qapi/qapi-visit-misc.h" 36 #include "qapi/qapi-visit-run-state.h" 37 #include "qapi/qmp/qdict.h" 38 #include "qapi/qmp/qerror.h" 39 #include "qapi/visitor.h" 40 #include "qom/qom-qobject.h" 41 #include "sysemu/arch_init.h" 42 43 #if defined(CONFIG_KVM) 44 #include <linux/kvm_para.h> 45 #endif 46 47 #include "sysemu/sysemu.h" 48 #include "hw/qdev-properties.h" 49 #include "hw/i386/topology.h" 50 #ifndef CONFIG_USER_ONLY 51 #include "exec/address-spaces.h" 52 #include "hw/hw.h" 53 #include "hw/xen/xen.h" 54 #include "hw/i386/apic_internal.h" 55 #endif 56 57 #include "disas/capstone.h" 58 59 /* Helpers for building CPUID[2] descriptors: */ 60 61 struct CPUID2CacheDescriptorInfo { 62 enum CacheType type; 63 int level; 64 int size; 65 int line_size; 66 int associativity; 67 }; 68 69 #define KiB 1024 70 #define MiB (1024 * 1024) 71 72 /* 73 * Known CPUID 2 cache descriptors. 74 * From Intel SDM Volume 2A, CPUID instruction 75 */ 76 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 77 [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB, 78 .associativity = 4, .line_size = 32, }, 79 [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB, 80 .associativity = 4, .line_size = 32, }, 81 [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB, 82 .associativity = 4, .line_size = 64, }, 83 [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB, 84 .associativity = 2, .line_size = 32, }, 85 [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB, 86 .associativity = 4, .line_size = 32, }, 87 [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB, 88 .associativity = 4, .line_size = 64, }, 89 [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB, 90 .associativity = 6, .line_size = 64, }, 91 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 92 .associativity = 2, .line_size = 64, }, 93 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 94 .associativity = 8, .line_size = 64, }, 95 /* lines per sector is not supported cpuid2_cache_descriptor(), 96 * so descriptors 0x22, 0x23 are not included 97 */ 98 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 99 .associativity = 16, .line_size = 64, }, 100 /* lines per sector is not supported cpuid2_cache_descriptor(), 101 * so descriptors 0x25, 0x20 are not included 102 */ 103 [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB, 104 .associativity = 8, .line_size = 64, }, 105 [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB, 106 .associativity = 8, .line_size = 64, }, 107 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 108 .associativity = 4, .line_size = 32, }, 109 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 110 .associativity = 4, .line_size = 32, }, 111 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 112 .associativity = 4, .line_size = 32, }, 113 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 114 .associativity = 4, .line_size = 32, }, 115 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 116 .associativity = 4, .line_size = 32, }, 117 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 118 .associativity = 4, .line_size = 64, }, 119 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 120 .associativity = 8, .line_size = 64, }, 121 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 122 .associativity = 12, .line_size = 64, }, 123 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 124 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 125 .associativity = 12, .line_size = 64, }, 126 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 127 .associativity = 16, .line_size = 64, }, 128 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 129 .associativity = 12, .line_size = 64, }, 130 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 131 .associativity = 16, .line_size = 64, }, 132 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 133 .associativity = 24, .line_size = 64, }, 134 [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB, 135 .associativity = 8, .line_size = 64, }, 136 [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB, 137 .associativity = 4, .line_size = 64, }, 138 [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB, 139 .associativity = 4, .line_size = 64, }, 140 [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB, 141 .associativity = 4, .line_size = 64, }, 142 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 143 .associativity = 4, .line_size = 64, }, 144 /* lines per sector is not supported cpuid2_cache_descriptor(), 145 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 146 */ 147 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 148 .associativity = 8, .line_size = 64, }, 149 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 150 .associativity = 2, .line_size = 64, }, 151 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 152 .associativity = 8, .line_size = 64, }, 153 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 154 .associativity = 8, .line_size = 32, }, 155 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 156 .associativity = 8, .line_size = 32, }, 157 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 158 .associativity = 8, .line_size = 32, }, 159 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 160 .associativity = 8, .line_size = 32, }, 161 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 162 .associativity = 4, .line_size = 64, }, 163 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 164 .associativity = 8, .line_size = 64, }, 165 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 166 .associativity = 4, .line_size = 64, }, 167 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 168 .associativity = 4, .line_size = 64, }, 169 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 170 .associativity = 4, .line_size = 64, }, 171 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 172 .associativity = 8, .line_size = 64, }, 173 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 174 .associativity = 8, .line_size = 64, }, 175 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 176 .associativity = 8, .line_size = 64, }, 177 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 178 .associativity = 12, .line_size = 64, }, 179 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 180 .associativity = 12, .line_size = 64, }, 181 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 182 .associativity = 12, .line_size = 64, }, 183 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 184 .associativity = 16, .line_size = 64, }, 185 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 186 .associativity = 16, .line_size = 64, }, 187 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 188 .associativity = 16, .line_size = 64, }, 189 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 190 .associativity = 24, .line_size = 64, }, 191 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 192 .associativity = 24, .line_size = 64, }, 193 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 194 .associativity = 24, .line_size = 64, }, 195 }; 196 197 /* 198 * "CPUID leaf 2 does not report cache descriptor information, 199 * use CPUID leaf 4 to query cache parameters" 200 */ 201 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 202 203 /* 204 * Return a CPUID 2 cache descriptor for a given cache. 205 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 206 */ 207 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 208 { 209 int i; 210 211 assert(cache->size > 0); 212 assert(cache->level > 0); 213 assert(cache->line_size > 0); 214 assert(cache->associativity > 0); 215 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 216 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 217 if (d->level == cache->level && d->type == cache->type && 218 d->size == cache->size && d->line_size == cache->line_size && 219 d->associativity == cache->associativity) { 220 return i; 221 } 222 } 223 224 return CACHE_DESCRIPTOR_UNAVAILABLE; 225 } 226 227 /* CPUID Leaf 4 constants: */ 228 229 /* EAX: */ 230 #define CACHE_TYPE_D 1 231 #define CACHE_TYPE_I 2 232 #define CACHE_TYPE_UNIFIED 3 233 234 #define CACHE_LEVEL(l) (l << 5) 235 236 #define CACHE_SELF_INIT_LEVEL (1 << 8) 237 238 /* EDX: */ 239 #define CACHE_NO_INVD_SHARING (1 << 0) 240 #define CACHE_INCLUSIVE (1 << 1) 241 #define CACHE_COMPLEX_IDX (1 << 2) 242 243 /* Encode CacheType for CPUID[4].EAX */ 244 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \ 245 ((t) == ICACHE) ? CACHE_TYPE_I : \ 246 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 247 0 /* Invalid value */) 248 249 250 /* Encode cache info for CPUID[4] */ 251 static void encode_cache_cpuid4(CPUCacheInfo *cache, 252 int num_apic_ids, int num_cores, 253 uint32_t *eax, uint32_t *ebx, 254 uint32_t *ecx, uint32_t *edx) 255 { 256 assert(cache->size == cache->line_size * cache->associativity * 257 cache->partitions * cache->sets); 258 259 assert(num_apic_ids > 0); 260 *eax = CACHE_TYPE(cache->type) | 261 CACHE_LEVEL(cache->level) | 262 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 263 ((num_cores - 1) << 26) | 264 ((num_apic_ids - 1) << 14); 265 266 assert(cache->line_size > 0); 267 assert(cache->partitions > 0); 268 assert(cache->associativity > 0); 269 /* We don't implement fully-associative caches */ 270 assert(cache->associativity < cache->sets); 271 *ebx = (cache->line_size - 1) | 272 ((cache->partitions - 1) << 12) | 273 ((cache->associativity - 1) << 22); 274 275 assert(cache->sets > 0); 276 *ecx = cache->sets - 1; 277 278 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 279 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 280 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 281 } 282 283 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 284 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 285 { 286 assert(cache->size % 1024 == 0); 287 assert(cache->lines_per_tag > 0); 288 assert(cache->associativity > 0); 289 assert(cache->line_size > 0); 290 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 291 (cache->lines_per_tag << 8) | (cache->line_size); 292 } 293 294 #define ASSOC_FULL 0xFF 295 296 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 297 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 298 a == 2 ? 0x2 : \ 299 a == 4 ? 0x4 : \ 300 a == 8 ? 0x6 : \ 301 a == 16 ? 0x8 : \ 302 a == 32 ? 0xA : \ 303 a == 48 ? 0xB : \ 304 a == 64 ? 0xC : \ 305 a == 96 ? 0xD : \ 306 a == 128 ? 0xE : \ 307 a == ASSOC_FULL ? 0xF : \ 308 0 /* invalid value */) 309 310 /* 311 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 312 * @l3 can be NULL. 313 */ 314 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 315 CPUCacheInfo *l3, 316 uint32_t *ecx, uint32_t *edx) 317 { 318 assert(l2->size % 1024 == 0); 319 assert(l2->associativity > 0); 320 assert(l2->lines_per_tag > 0); 321 assert(l2->line_size > 0); 322 *ecx = ((l2->size / 1024) << 16) | 323 (AMD_ENC_ASSOC(l2->associativity) << 12) | 324 (l2->lines_per_tag << 8) | (l2->line_size); 325 326 if (l3) { 327 assert(l3->size % (512 * 1024) == 0); 328 assert(l3->associativity > 0); 329 assert(l3->lines_per_tag > 0); 330 assert(l3->line_size > 0); 331 *edx = ((l3->size / (512 * 1024)) << 18) | 332 (AMD_ENC_ASSOC(l3->associativity) << 12) | 333 (l3->lines_per_tag << 8) | (l3->line_size); 334 } else { 335 *edx = 0; 336 } 337 } 338 339 /* 340 * Definitions of the hardcoded cache entries we expose: 341 * These are legacy cache values. If there is a need to change any 342 * of these values please use builtin_x86_defs 343 */ 344 345 /* L1 data cache: */ 346 static CPUCacheInfo legacy_l1d_cache = { 347 .type = DCACHE, 348 .level = 1, 349 .size = 32 * KiB, 350 .self_init = 1, 351 .line_size = 64, 352 .associativity = 8, 353 .sets = 64, 354 .partitions = 1, 355 .no_invd_sharing = true, 356 }; 357 358 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 359 static CPUCacheInfo legacy_l1d_cache_amd = { 360 .type = DCACHE, 361 .level = 1, 362 .size = 64 * KiB, 363 .self_init = 1, 364 .line_size = 64, 365 .associativity = 2, 366 .sets = 512, 367 .partitions = 1, 368 .lines_per_tag = 1, 369 .no_invd_sharing = true, 370 }; 371 372 /* L1 instruction cache: */ 373 static CPUCacheInfo legacy_l1i_cache = { 374 .type = ICACHE, 375 .level = 1, 376 .size = 32 * KiB, 377 .self_init = 1, 378 .line_size = 64, 379 .associativity = 8, 380 .sets = 64, 381 .partitions = 1, 382 .no_invd_sharing = true, 383 }; 384 385 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 386 static CPUCacheInfo legacy_l1i_cache_amd = { 387 .type = ICACHE, 388 .level = 1, 389 .size = 64 * KiB, 390 .self_init = 1, 391 .line_size = 64, 392 .associativity = 2, 393 .sets = 512, 394 .partitions = 1, 395 .lines_per_tag = 1, 396 .no_invd_sharing = true, 397 }; 398 399 /* Level 2 unified cache: */ 400 static CPUCacheInfo legacy_l2_cache = { 401 .type = UNIFIED_CACHE, 402 .level = 2, 403 .size = 4 * MiB, 404 .self_init = 1, 405 .line_size = 64, 406 .associativity = 16, 407 .sets = 4096, 408 .partitions = 1, 409 .no_invd_sharing = true, 410 }; 411 412 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 413 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 414 .type = UNIFIED_CACHE, 415 .level = 2, 416 .size = 2 * MiB, 417 .line_size = 64, 418 .associativity = 8, 419 }; 420 421 422 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 423 static CPUCacheInfo legacy_l2_cache_amd = { 424 .type = UNIFIED_CACHE, 425 .level = 2, 426 .size = 512 * KiB, 427 .line_size = 64, 428 .lines_per_tag = 1, 429 .associativity = 16, 430 .sets = 512, 431 .partitions = 1, 432 }; 433 434 /* Level 3 unified cache: */ 435 static CPUCacheInfo legacy_l3_cache = { 436 .type = UNIFIED_CACHE, 437 .level = 3, 438 .size = 16 * MiB, 439 .line_size = 64, 440 .associativity = 16, 441 .sets = 16384, 442 .partitions = 1, 443 .lines_per_tag = 1, 444 .self_init = true, 445 .inclusive = true, 446 .complex_indexing = true, 447 }; 448 449 /* TLB definitions: */ 450 451 #define L1_DTLB_2M_ASSOC 1 452 #define L1_DTLB_2M_ENTRIES 255 453 #define L1_DTLB_4K_ASSOC 1 454 #define L1_DTLB_4K_ENTRIES 255 455 456 #define L1_ITLB_2M_ASSOC 1 457 #define L1_ITLB_2M_ENTRIES 255 458 #define L1_ITLB_4K_ASSOC 1 459 #define L1_ITLB_4K_ENTRIES 255 460 461 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 462 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 463 #define L2_DTLB_4K_ASSOC 4 464 #define L2_DTLB_4K_ENTRIES 512 465 466 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 467 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 468 #define L2_ITLB_4K_ASSOC 4 469 #define L2_ITLB_4K_ENTRIES 512 470 471 /* CPUID Leaf 0x14 constants: */ 472 #define INTEL_PT_MAX_SUBLEAF 0x1 473 /* 474 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 475 * MSR can be accessed; 476 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 477 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 478 * of Intel PT MSRs across warm reset; 479 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 480 */ 481 #define INTEL_PT_MINIMAL_EBX 0xf 482 /* 483 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 484 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 485 * accessed; 486 * bit[01]: ToPA tables can hold any number of output entries, up to the 487 * maximum allowed by the MaskOrTableOffset field of 488 * IA32_RTIT_OUTPUT_MASK_PTRS; 489 * bit[02]: Support Single-Range Output scheme; 490 */ 491 #define INTEL_PT_MINIMAL_ECX 0x7 492 /* generated packets which contain IP payloads have LIP values */ 493 #define INTEL_PT_IP_LIP (1 << 31) 494 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 495 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 496 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 497 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 498 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 499 500 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 501 uint32_t vendor2, uint32_t vendor3) 502 { 503 int i; 504 for (i = 0; i < 4; i++) { 505 dst[i] = vendor1 >> (8 * i); 506 dst[i + 4] = vendor2 >> (8 * i); 507 dst[i + 8] = vendor3 >> (8 * i); 508 } 509 dst[CPUID_VENDOR_SZ] = '\0'; 510 } 511 512 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 513 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 514 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 515 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 516 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 517 CPUID_PSE36 | CPUID_FXSR) 518 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 519 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 520 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 521 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 522 CPUID_PAE | CPUID_SEP | CPUID_APIC) 523 524 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 525 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 526 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 527 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 528 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 529 /* partly implemented: 530 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 531 /* missing: 532 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 533 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 534 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 535 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 536 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 537 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR) 538 /* missing: 539 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 540 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 541 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 542 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 543 CPUID_EXT_F16C, CPUID_EXT_RDRAND */ 544 545 #ifdef TARGET_X86_64 546 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 547 #else 548 #define TCG_EXT2_X86_64_FEATURES 0 549 #endif 550 551 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 552 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 553 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 554 TCG_EXT2_X86_64_FEATURES) 555 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 556 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 557 #define TCG_EXT4_FEATURES 0 558 #define TCG_SVM_FEATURES 0 559 #define TCG_KVM_FEATURES 0 560 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 561 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 562 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 563 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 564 CPUID_7_0_EBX_ERMS) 565 /* missing: 566 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 567 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 568 CPUID_7_0_EBX_RDSEED */ 569 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \ 570 CPUID_7_0_ECX_LA57) 571 #define TCG_7_0_EDX_FEATURES 0 572 #define TCG_APM_FEATURES 0 573 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 574 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 575 /* missing: 576 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 577 578 typedef struct FeatureWordInfo { 579 /* feature flags names are taken from "Intel Processor Identification and 580 * the CPUID Instruction" and AMD's "CPUID Specification". 581 * In cases of disagreement between feature naming conventions, 582 * aliases may be added. 583 */ 584 const char *feat_names[32]; 585 uint32_t cpuid_eax; /* Input EAX for CPUID */ 586 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */ 587 uint32_t cpuid_ecx; /* Input ECX value for CPUID */ 588 int cpuid_reg; /* output register (R_* constant) */ 589 uint32_t tcg_features; /* Feature flags supported by TCG */ 590 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */ 591 uint32_t migratable_flags; /* Feature flags known to be migratable */ 592 /* Features that shouldn't be auto-enabled by "-cpu host" */ 593 uint32_t no_autoenable_flags; 594 } FeatureWordInfo; 595 596 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 597 [FEAT_1_EDX] = { 598 .feat_names = { 599 "fpu", "vme", "de", "pse", 600 "tsc", "msr", "pae", "mce", 601 "cx8", "apic", NULL, "sep", 602 "mtrr", "pge", "mca", "cmov", 603 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 604 NULL, "ds" /* Intel dts */, "acpi", "mmx", 605 "fxsr", "sse", "sse2", "ss", 606 "ht" /* Intel htt */, "tm", "ia64", "pbe", 607 }, 608 .cpuid_eax = 1, .cpuid_reg = R_EDX, 609 .tcg_features = TCG_FEATURES, 610 }, 611 [FEAT_1_ECX] = { 612 .feat_names = { 613 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 614 "ds-cpl", "vmx", "smx", "est", 615 "tm2", "ssse3", "cid", NULL, 616 "fma", "cx16", "xtpr", "pdcm", 617 NULL, "pcid", "dca", "sse4.1", 618 "sse4.2", "x2apic", "movbe", "popcnt", 619 "tsc-deadline", "aes", "xsave", "osxsave", 620 "avx", "f16c", "rdrand", "hypervisor", 621 }, 622 .cpuid_eax = 1, .cpuid_reg = R_ECX, 623 .tcg_features = TCG_EXT_FEATURES, 624 }, 625 /* Feature names that are already defined on feature_name[] but 626 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 627 * names on feat_names below. They are copied automatically 628 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 629 */ 630 [FEAT_8000_0001_EDX] = { 631 .feat_names = { 632 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 633 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 634 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 635 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 636 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 637 "nx", NULL, "mmxext", NULL /* mmx */, 638 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 639 NULL, "lm", "3dnowext", "3dnow", 640 }, 641 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX, 642 .tcg_features = TCG_EXT2_FEATURES, 643 }, 644 [FEAT_8000_0001_ECX] = { 645 .feat_names = { 646 "lahf-lm", "cmp-legacy", "svm", "extapic", 647 "cr8legacy", "abm", "sse4a", "misalignsse", 648 "3dnowprefetch", "osvw", "ibs", "xop", 649 "skinit", "wdt", NULL, "lwp", 650 "fma4", "tce", NULL, "nodeid-msr", 651 NULL, "tbm", "topoext", "perfctr-core", 652 "perfctr-nb", NULL, NULL, NULL, 653 NULL, NULL, NULL, NULL, 654 }, 655 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX, 656 .tcg_features = TCG_EXT3_FEATURES, 657 }, 658 [FEAT_C000_0001_EDX] = { 659 .feat_names = { 660 NULL, NULL, "xstore", "xstore-en", 661 NULL, NULL, "xcrypt", "xcrypt-en", 662 "ace2", "ace2-en", "phe", "phe-en", 663 "pmm", "pmm-en", NULL, NULL, 664 NULL, NULL, NULL, NULL, 665 NULL, NULL, NULL, NULL, 666 NULL, NULL, NULL, NULL, 667 NULL, NULL, NULL, NULL, 668 }, 669 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX, 670 .tcg_features = TCG_EXT4_FEATURES, 671 }, 672 [FEAT_KVM] = { 673 .feat_names = { 674 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 675 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 676 NULL, "kvm-pv-tlb-flush", NULL, NULL, 677 NULL, NULL, NULL, NULL, 678 NULL, NULL, NULL, NULL, 679 NULL, NULL, NULL, NULL, 680 "kvmclock-stable-bit", NULL, NULL, NULL, 681 NULL, NULL, NULL, NULL, 682 }, 683 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX, 684 .tcg_features = TCG_KVM_FEATURES, 685 }, 686 [FEAT_KVM_HINTS] = { 687 .feat_names = { 688 "kvm-hint-dedicated", NULL, NULL, NULL, 689 NULL, NULL, NULL, NULL, 690 NULL, NULL, NULL, NULL, 691 NULL, NULL, NULL, NULL, 692 NULL, NULL, NULL, NULL, 693 NULL, NULL, NULL, NULL, 694 NULL, NULL, NULL, NULL, 695 NULL, NULL, NULL, NULL, 696 }, 697 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX, 698 .tcg_features = TCG_KVM_FEATURES, 699 /* 700 * KVM hints aren't auto-enabled by -cpu host, they need to be 701 * explicitly enabled in the command-line. 702 */ 703 .no_autoenable_flags = ~0U, 704 }, 705 [FEAT_HYPERV_EAX] = { 706 .feat_names = { 707 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 708 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 709 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 710 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 711 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 712 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 713 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 714 NULL, NULL, 715 NULL, NULL, NULL, NULL, 716 NULL, NULL, NULL, NULL, 717 NULL, NULL, NULL, NULL, 718 NULL, NULL, NULL, NULL, 719 }, 720 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX, 721 }, 722 [FEAT_HYPERV_EBX] = { 723 .feat_names = { 724 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 725 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 726 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 727 NULL /* hv_create_port */, NULL /* hv_connect_port */, 728 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 729 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 730 NULL, NULL, 731 NULL, NULL, NULL, NULL, 732 NULL, NULL, NULL, NULL, 733 NULL, NULL, NULL, NULL, 734 NULL, NULL, NULL, NULL, 735 }, 736 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX, 737 }, 738 [FEAT_HYPERV_EDX] = { 739 .feat_names = { 740 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 741 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 742 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 743 NULL, NULL, 744 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 745 NULL, NULL, NULL, NULL, 746 NULL, NULL, NULL, NULL, 747 NULL, NULL, NULL, NULL, 748 NULL, NULL, NULL, NULL, 749 NULL, NULL, NULL, NULL, 750 }, 751 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX, 752 }, 753 [FEAT_SVM] = { 754 .feat_names = { 755 "npt", "lbrv", "svm-lock", "nrip-save", 756 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 757 NULL, NULL, "pause-filter", NULL, 758 "pfthreshold", NULL, NULL, NULL, 759 NULL, NULL, NULL, NULL, 760 NULL, NULL, NULL, NULL, 761 NULL, NULL, NULL, NULL, 762 NULL, NULL, NULL, NULL, 763 }, 764 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX, 765 .tcg_features = TCG_SVM_FEATURES, 766 }, 767 [FEAT_7_0_EBX] = { 768 .feat_names = { 769 "fsgsbase", "tsc-adjust", NULL, "bmi1", 770 "hle", "avx2", NULL, "smep", 771 "bmi2", "erms", "invpcid", "rtm", 772 NULL, NULL, "mpx", NULL, 773 "avx512f", "avx512dq", "rdseed", "adx", 774 "smap", "avx512ifma", "pcommit", "clflushopt", 775 "clwb", "intel-pt", "avx512pf", "avx512er", 776 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 777 }, 778 .cpuid_eax = 7, 779 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 780 .cpuid_reg = R_EBX, 781 .tcg_features = TCG_7_0_EBX_FEATURES, 782 }, 783 [FEAT_7_0_ECX] = { 784 .feat_names = { 785 NULL, "avx512vbmi", "umip", "pku", 786 "ospke", NULL, "avx512vbmi2", NULL, 787 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 788 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 789 "la57", NULL, NULL, NULL, 790 NULL, NULL, "rdpid", NULL, 791 NULL, "cldemote", NULL, NULL, 792 NULL, NULL, NULL, NULL, 793 }, 794 .cpuid_eax = 7, 795 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 796 .cpuid_reg = R_ECX, 797 .tcg_features = TCG_7_0_ECX_FEATURES, 798 }, 799 [FEAT_7_0_EDX] = { 800 .feat_names = { 801 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 802 NULL, NULL, NULL, NULL, 803 NULL, NULL, NULL, NULL, 804 NULL, NULL, NULL, NULL, 805 NULL, NULL, NULL, NULL, 806 NULL, NULL, NULL, NULL, 807 NULL, NULL, "spec-ctrl", NULL, 808 NULL, NULL, NULL, NULL, 809 }, 810 .cpuid_eax = 7, 811 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 812 .cpuid_reg = R_EDX, 813 .tcg_features = TCG_7_0_EDX_FEATURES, 814 }, 815 [FEAT_8000_0007_EDX] = { 816 .feat_names = { 817 NULL, NULL, NULL, NULL, 818 NULL, NULL, NULL, NULL, 819 "invtsc", NULL, NULL, NULL, 820 NULL, NULL, NULL, NULL, 821 NULL, NULL, NULL, NULL, 822 NULL, NULL, NULL, NULL, 823 NULL, NULL, NULL, NULL, 824 NULL, NULL, NULL, NULL, 825 }, 826 .cpuid_eax = 0x80000007, 827 .cpuid_reg = R_EDX, 828 .tcg_features = TCG_APM_FEATURES, 829 .unmigratable_flags = CPUID_APM_INVTSC, 830 }, 831 [FEAT_8000_0008_EBX] = { 832 .feat_names = { 833 NULL, NULL, NULL, NULL, 834 NULL, NULL, NULL, NULL, 835 NULL, NULL, NULL, NULL, 836 "ibpb", NULL, NULL, NULL, 837 NULL, NULL, NULL, NULL, 838 NULL, NULL, NULL, NULL, 839 NULL, NULL, NULL, NULL, 840 NULL, NULL, NULL, NULL, 841 }, 842 .cpuid_eax = 0x80000008, 843 .cpuid_reg = R_EBX, 844 .tcg_features = 0, 845 .unmigratable_flags = 0, 846 }, 847 [FEAT_XSAVE] = { 848 .feat_names = { 849 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 850 NULL, NULL, NULL, NULL, 851 NULL, NULL, NULL, NULL, 852 NULL, NULL, NULL, NULL, 853 NULL, NULL, NULL, NULL, 854 NULL, NULL, NULL, NULL, 855 NULL, NULL, NULL, NULL, 856 NULL, NULL, NULL, NULL, 857 }, 858 .cpuid_eax = 0xd, 859 .cpuid_needs_ecx = true, .cpuid_ecx = 1, 860 .cpuid_reg = R_EAX, 861 .tcg_features = TCG_XSAVE_FEATURES, 862 }, 863 [FEAT_6_EAX] = { 864 .feat_names = { 865 NULL, NULL, "arat", NULL, 866 NULL, NULL, NULL, NULL, 867 NULL, NULL, NULL, NULL, 868 NULL, NULL, NULL, NULL, 869 NULL, NULL, NULL, NULL, 870 NULL, NULL, NULL, NULL, 871 NULL, NULL, NULL, NULL, 872 NULL, NULL, NULL, NULL, 873 }, 874 .cpuid_eax = 6, .cpuid_reg = R_EAX, 875 .tcg_features = TCG_6_EAX_FEATURES, 876 }, 877 [FEAT_XSAVE_COMP_LO] = { 878 .cpuid_eax = 0xD, 879 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 880 .cpuid_reg = R_EAX, 881 .tcg_features = ~0U, 882 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 883 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 884 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 885 XSTATE_PKRU_MASK, 886 }, 887 [FEAT_XSAVE_COMP_HI] = { 888 .cpuid_eax = 0xD, 889 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 890 .cpuid_reg = R_EDX, 891 .tcg_features = ~0U, 892 }, 893 }; 894 895 typedef struct X86RegisterInfo32 { 896 /* Name of register */ 897 const char *name; 898 /* QAPI enum value register */ 899 X86CPURegister32 qapi_enum; 900 } X86RegisterInfo32; 901 902 #define REGISTER(reg) \ 903 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 904 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 905 REGISTER(EAX), 906 REGISTER(ECX), 907 REGISTER(EDX), 908 REGISTER(EBX), 909 REGISTER(ESP), 910 REGISTER(EBP), 911 REGISTER(ESI), 912 REGISTER(EDI), 913 }; 914 #undef REGISTER 915 916 typedef struct ExtSaveArea { 917 uint32_t feature, bits; 918 uint32_t offset, size; 919 } ExtSaveArea; 920 921 static const ExtSaveArea x86_ext_save_areas[] = { 922 [XSTATE_FP_BIT] = { 923 /* x87 FP state component is always enabled if XSAVE is supported */ 924 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 925 /* x87 state is in the legacy region of the XSAVE area */ 926 .offset = 0, 927 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 928 }, 929 [XSTATE_SSE_BIT] = { 930 /* SSE state component is always enabled if XSAVE is supported */ 931 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 932 /* SSE state is in the legacy region of the XSAVE area */ 933 .offset = 0, 934 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 935 }, 936 [XSTATE_YMM_BIT] = 937 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 938 .offset = offsetof(X86XSaveArea, avx_state), 939 .size = sizeof(XSaveAVX) }, 940 [XSTATE_BNDREGS_BIT] = 941 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 942 .offset = offsetof(X86XSaveArea, bndreg_state), 943 .size = sizeof(XSaveBNDREG) }, 944 [XSTATE_BNDCSR_BIT] = 945 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 946 .offset = offsetof(X86XSaveArea, bndcsr_state), 947 .size = sizeof(XSaveBNDCSR) }, 948 [XSTATE_OPMASK_BIT] = 949 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 950 .offset = offsetof(X86XSaveArea, opmask_state), 951 .size = sizeof(XSaveOpmask) }, 952 [XSTATE_ZMM_Hi256_BIT] = 953 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 954 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 955 .size = sizeof(XSaveZMM_Hi256) }, 956 [XSTATE_Hi16_ZMM_BIT] = 957 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 958 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 959 .size = sizeof(XSaveHi16_ZMM) }, 960 [XSTATE_PKRU_BIT] = 961 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 962 .offset = offsetof(X86XSaveArea, pkru_state), 963 .size = sizeof(XSavePKRU) }, 964 }; 965 966 static uint32_t xsave_area_size(uint64_t mask) 967 { 968 int i; 969 uint64_t ret = 0; 970 971 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 972 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 973 if ((mask >> i) & 1) { 974 ret = MAX(ret, esa->offset + esa->size); 975 } 976 } 977 return ret; 978 } 979 980 static inline bool accel_uses_host_cpuid(void) 981 { 982 return kvm_enabled() || hvf_enabled(); 983 } 984 985 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 986 { 987 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 988 cpu->env.features[FEAT_XSAVE_COMP_LO]; 989 } 990 991 const char *get_register_name_32(unsigned int reg) 992 { 993 if (reg >= CPU_NB_REGS32) { 994 return NULL; 995 } 996 return x86_reg_info_32[reg].name; 997 } 998 999 /* 1000 * Returns the set of feature flags that are supported and migratable by 1001 * QEMU, for a given FeatureWord. 1002 */ 1003 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w) 1004 { 1005 FeatureWordInfo *wi = &feature_word_info[w]; 1006 uint32_t r = 0; 1007 int i; 1008 1009 for (i = 0; i < 32; i++) { 1010 uint32_t f = 1U << i; 1011 1012 /* If the feature name is known, it is implicitly considered migratable, 1013 * unless it is explicitly set in unmigratable_flags */ 1014 if ((wi->migratable_flags & f) || 1015 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1016 r |= f; 1017 } 1018 } 1019 return r; 1020 } 1021 1022 void host_cpuid(uint32_t function, uint32_t count, 1023 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1024 { 1025 uint32_t vec[4]; 1026 1027 #ifdef __x86_64__ 1028 asm volatile("cpuid" 1029 : "=a"(vec[0]), "=b"(vec[1]), 1030 "=c"(vec[2]), "=d"(vec[3]) 1031 : "0"(function), "c"(count) : "cc"); 1032 #elif defined(__i386__) 1033 asm volatile("pusha \n\t" 1034 "cpuid \n\t" 1035 "mov %%eax, 0(%2) \n\t" 1036 "mov %%ebx, 4(%2) \n\t" 1037 "mov %%ecx, 8(%2) \n\t" 1038 "mov %%edx, 12(%2) \n\t" 1039 "popa" 1040 : : "a"(function), "c"(count), "S"(vec) 1041 : "memory", "cc"); 1042 #else 1043 abort(); 1044 #endif 1045 1046 if (eax) 1047 *eax = vec[0]; 1048 if (ebx) 1049 *ebx = vec[1]; 1050 if (ecx) 1051 *ecx = vec[2]; 1052 if (edx) 1053 *edx = vec[3]; 1054 } 1055 1056 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1057 { 1058 uint32_t eax, ebx, ecx, edx; 1059 1060 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1061 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1062 1063 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1064 if (family) { 1065 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1066 } 1067 if (model) { 1068 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1069 } 1070 if (stepping) { 1071 *stepping = eax & 0x0F; 1072 } 1073 } 1074 1075 /* CPU class name definitions: */ 1076 1077 /* Return type name for a given CPU model name 1078 * Caller is responsible for freeing the returned string. 1079 */ 1080 static char *x86_cpu_type_name(const char *model_name) 1081 { 1082 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1083 } 1084 1085 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1086 { 1087 ObjectClass *oc; 1088 char *typename = x86_cpu_type_name(cpu_model); 1089 oc = object_class_by_name(typename); 1090 g_free(typename); 1091 return oc; 1092 } 1093 1094 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1095 { 1096 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1097 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1098 return g_strndup(class_name, 1099 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1100 } 1101 1102 struct X86CPUDefinition { 1103 const char *name; 1104 uint32_t level; 1105 uint32_t xlevel; 1106 /* vendor is zero-terminated, 12 character ASCII string */ 1107 char vendor[CPUID_VENDOR_SZ + 1]; 1108 int family; 1109 int model; 1110 int stepping; 1111 FeatureWordArray features; 1112 const char *model_id; 1113 CPUCaches *cache_info; 1114 }; 1115 1116 static CPUCaches epyc_cache_info = { 1117 .l1d_cache = { 1118 .type = DCACHE, 1119 .level = 1, 1120 .size = 32 * KiB, 1121 .line_size = 64, 1122 .associativity = 8, 1123 .partitions = 1, 1124 .sets = 64, 1125 .lines_per_tag = 1, 1126 .self_init = 1, 1127 .no_invd_sharing = true, 1128 }, 1129 .l1i_cache = { 1130 .type = ICACHE, 1131 .level = 1, 1132 .size = 64 * KiB, 1133 .line_size = 64, 1134 .associativity = 4, 1135 .partitions = 1, 1136 .sets = 256, 1137 .lines_per_tag = 1, 1138 .self_init = 1, 1139 .no_invd_sharing = true, 1140 }, 1141 .l2_cache = { 1142 .type = UNIFIED_CACHE, 1143 .level = 2, 1144 .size = 512 * KiB, 1145 .line_size = 64, 1146 .associativity = 8, 1147 .partitions = 1, 1148 .sets = 1024, 1149 .lines_per_tag = 1, 1150 }, 1151 .l3_cache = { 1152 .type = UNIFIED_CACHE, 1153 .level = 3, 1154 .size = 8 * MiB, 1155 .line_size = 64, 1156 .associativity = 16, 1157 .partitions = 1, 1158 .sets = 8192, 1159 .lines_per_tag = 1, 1160 .self_init = true, 1161 .inclusive = true, 1162 .complex_indexing = true, 1163 }, 1164 }; 1165 1166 static X86CPUDefinition builtin_x86_defs[] = { 1167 { 1168 .name = "qemu64", 1169 .level = 0xd, 1170 .vendor = CPUID_VENDOR_AMD, 1171 .family = 6, 1172 .model = 6, 1173 .stepping = 3, 1174 .features[FEAT_1_EDX] = 1175 PPRO_FEATURES | 1176 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1177 CPUID_PSE36, 1178 .features[FEAT_1_ECX] = 1179 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1180 .features[FEAT_8000_0001_EDX] = 1181 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1182 .features[FEAT_8000_0001_ECX] = 1183 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1184 .xlevel = 0x8000000A, 1185 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1186 }, 1187 { 1188 .name = "phenom", 1189 .level = 5, 1190 .vendor = CPUID_VENDOR_AMD, 1191 .family = 16, 1192 .model = 2, 1193 .stepping = 3, 1194 /* Missing: CPUID_HT */ 1195 .features[FEAT_1_EDX] = 1196 PPRO_FEATURES | 1197 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1198 CPUID_PSE36 | CPUID_VME, 1199 .features[FEAT_1_ECX] = 1200 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1201 CPUID_EXT_POPCNT, 1202 .features[FEAT_8000_0001_EDX] = 1203 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1204 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1205 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1206 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1207 CPUID_EXT3_CR8LEG, 1208 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1209 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1210 .features[FEAT_8000_0001_ECX] = 1211 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1212 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1213 /* Missing: CPUID_SVM_LBRV */ 1214 .features[FEAT_SVM] = 1215 CPUID_SVM_NPT, 1216 .xlevel = 0x8000001A, 1217 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1218 }, 1219 { 1220 .name = "core2duo", 1221 .level = 10, 1222 .vendor = CPUID_VENDOR_INTEL, 1223 .family = 6, 1224 .model = 15, 1225 .stepping = 11, 1226 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1227 .features[FEAT_1_EDX] = 1228 PPRO_FEATURES | 1229 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1230 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1231 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1232 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1233 .features[FEAT_1_ECX] = 1234 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1235 CPUID_EXT_CX16, 1236 .features[FEAT_8000_0001_EDX] = 1237 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1238 .features[FEAT_8000_0001_ECX] = 1239 CPUID_EXT3_LAHF_LM, 1240 .xlevel = 0x80000008, 1241 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1242 }, 1243 { 1244 .name = "kvm64", 1245 .level = 0xd, 1246 .vendor = CPUID_VENDOR_INTEL, 1247 .family = 15, 1248 .model = 6, 1249 .stepping = 1, 1250 /* Missing: CPUID_HT */ 1251 .features[FEAT_1_EDX] = 1252 PPRO_FEATURES | CPUID_VME | 1253 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1254 CPUID_PSE36, 1255 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1256 .features[FEAT_1_ECX] = 1257 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1258 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1259 .features[FEAT_8000_0001_EDX] = 1260 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1261 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1262 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1263 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1264 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1265 .features[FEAT_8000_0001_ECX] = 1266 0, 1267 .xlevel = 0x80000008, 1268 .model_id = "Common KVM processor" 1269 }, 1270 { 1271 .name = "qemu32", 1272 .level = 4, 1273 .vendor = CPUID_VENDOR_INTEL, 1274 .family = 6, 1275 .model = 6, 1276 .stepping = 3, 1277 .features[FEAT_1_EDX] = 1278 PPRO_FEATURES, 1279 .features[FEAT_1_ECX] = 1280 CPUID_EXT_SSE3, 1281 .xlevel = 0x80000004, 1282 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1283 }, 1284 { 1285 .name = "kvm32", 1286 .level = 5, 1287 .vendor = CPUID_VENDOR_INTEL, 1288 .family = 15, 1289 .model = 6, 1290 .stepping = 1, 1291 .features[FEAT_1_EDX] = 1292 PPRO_FEATURES | CPUID_VME | 1293 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1294 .features[FEAT_1_ECX] = 1295 CPUID_EXT_SSE3, 1296 .features[FEAT_8000_0001_ECX] = 1297 0, 1298 .xlevel = 0x80000008, 1299 .model_id = "Common 32-bit KVM processor" 1300 }, 1301 { 1302 .name = "coreduo", 1303 .level = 10, 1304 .vendor = CPUID_VENDOR_INTEL, 1305 .family = 6, 1306 .model = 14, 1307 .stepping = 8, 1308 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1309 .features[FEAT_1_EDX] = 1310 PPRO_FEATURES | CPUID_VME | 1311 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 1312 CPUID_SS, 1313 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 1314 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1315 .features[FEAT_1_ECX] = 1316 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 1317 .features[FEAT_8000_0001_EDX] = 1318 CPUID_EXT2_NX, 1319 .xlevel = 0x80000008, 1320 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 1321 }, 1322 { 1323 .name = "486", 1324 .level = 1, 1325 .vendor = CPUID_VENDOR_INTEL, 1326 .family = 4, 1327 .model = 8, 1328 .stepping = 0, 1329 .features[FEAT_1_EDX] = 1330 I486_FEATURES, 1331 .xlevel = 0, 1332 .model_id = "", 1333 }, 1334 { 1335 .name = "pentium", 1336 .level = 1, 1337 .vendor = CPUID_VENDOR_INTEL, 1338 .family = 5, 1339 .model = 4, 1340 .stepping = 3, 1341 .features[FEAT_1_EDX] = 1342 PENTIUM_FEATURES, 1343 .xlevel = 0, 1344 .model_id = "", 1345 }, 1346 { 1347 .name = "pentium2", 1348 .level = 2, 1349 .vendor = CPUID_VENDOR_INTEL, 1350 .family = 6, 1351 .model = 5, 1352 .stepping = 2, 1353 .features[FEAT_1_EDX] = 1354 PENTIUM2_FEATURES, 1355 .xlevel = 0, 1356 .model_id = "", 1357 }, 1358 { 1359 .name = "pentium3", 1360 .level = 3, 1361 .vendor = CPUID_VENDOR_INTEL, 1362 .family = 6, 1363 .model = 7, 1364 .stepping = 3, 1365 .features[FEAT_1_EDX] = 1366 PENTIUM3_FEATURES, 1367 .xlevel = 0, 1368 .model_id = "", 1369 }, 1370 { 1371 .name = "athlon", 1372 .level = 2, 1373 .vendor = CPUID_VENDOR_AMD, 1374 .family = 6, 1375 .model = 2, 1376 .stepping = 3, 1377 .features[FEAT_1_EDX] = 1378 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 1379 CPUID_MCA, 1380 .features[FEAT_8000_0001_EDX] = 1381 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 1382 .xlevel = 0x80000008, 1383 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1384 }, 1385 { 1386 .name = "n270", 1387 .level = 10, 1388 .vendor = CPUID_VENDOR_INTEL, 1389 .family = 6, 1390 .model = 28, 1391 .stepping = 2, 1392 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1393 .features[FEAT_1_EDX] = 1394 PPRO_FEATURES | 1395 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 1396 CPUID_ACPI | CPUID_SS, 1397 /* Some CPUs got no CPUID_SEP */ 1398 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 1399 * CPUID_EXT_XTPR */ 1400 .features[FEAT_1_ECX] = 1401 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1402 CPUID_EXT_MOVBE, 1403 .features[FEAT_8000_0001_EDX] = 1404 CPUID_EXT2_NX, 1405 .features[FEAT_8000_0001_ECX] = 1406 CPUID_EXT3_LAHF_LM, 1407 .xlevel = 0x80000008, 1408 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 1409 }, 1410 { 1411 .name = "Conroe", 1412 .level = 10, 1413 .vendor = CPUID_VENDOR_INTEL, 1414 .family = 6, 1415 .model = 15, 1416 .stepping = 3, 1417 .features[FEAT_1_EDX] = 1418 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1419 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1420 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1421 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1422 CPUID_DE | CPUID_FP87, 1423 .features[FEAT_1_ECX] = 1424 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1425 .features[FEAT_8000_0001_EDX] = 1426 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1427 .features[FEAT_8000_0001_ECX] = 1428 CPUID_EXT3_LAHF_LM, 1429 .xlevel = 0x80000008, 1430 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 1431 }, 1432 { 1433 .name = "Penryn", 1434 .level = 10, 1435 .vendor = CPUID_VENDOR_INTEL, 1436 .family = 6, 1437 .model = 23, 1438 .stepping = 3, 1439 .features[FEAT_1_EDX] = 1440 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1441 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1442 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1443 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1444 CPUID_DE | CPUID_FP87, 1445 .features[FEAT_1_ECX] = 1446 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1447 CPUID_EXT_SSE3, 1448 .features[FEAT_8000_0001_EDX] = 1449 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1450 .features[FEAT_8000_0001_ECX] = 1451 CPUID_EXT3_LAHF_LM, 1452 .xlevel = 0x80000008, 1453 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 1454 }, 1455 { 1456 .name = "Nehalem", 1457 .level = 11, 1458 .vendor = CPUID_VENDOR_INTEL, 1459 .family = 6, 1460 .model = 26, 1461 .stepping = 3, 1462 .features[FEAT_1_EDX] = 1463 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1464 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1465 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1466 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1467 CPUID_DE | CPUID_FP87, 1468 .features[FEAT_1_ECX] = 1469 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1470 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1471 .features[FEAT_8000_0001_EDX] = 1472 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1473 .features[FEAT_8000_0001_ECX] = 1474 CPUID_EXT3_LAHF_LM, 1475 .xlevel = 0x80000008, 1476 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 1477 }, 1478 { 1479 .name = "Nehalem-IBRS", 1480 .level = 11, 1481 .vendor = CPUID_VENDOR_INTEL, 1482 .family = 6, 1483 .model = 26, 1484 .stepping = 3, 1485 .features[FEAT_1_EDX] = 1486 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1487 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1488 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1489 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1490 CPUID_DE | CPUID_FP87, 1491 .features[FEAT_1_ECX] = 1492 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1493 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1494 .features[FEAT_7_0_EDX] = 1495 CPUID_7_0_EDX_SPEC_CTRL, 1496 .features[FEAT_8000_0001_EDX] = 1497 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1498 .features[FEAT_8000_0001_ECX] = 1499 CPUID_EXT3_LAHF_LM, 1500 .xlevel = 0x80000008, 1501 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)", 1502 }, 1503 { 1504 .name = "Westmere", 1505 .level = 11, 1506 .vendor = CPUID_VENDOR_INTEL, 1507 .family = 6, 1508 .model = 44, 1509 .stepping = 1, 1510 .features[FEAT_1_EDX] = 1511 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1512 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1513 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1514 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1515 CPUID_DE | CPUID_FP87, 1516 .features[FEAT_1_ECX] = 1517 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1518 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1519 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1520 .features[FEAT_8000_0001_EDX] = 1521 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1522 .features[FEAT_8000_0001_ECX] = 1523 CPUID_EXT3_LAHF_LM, 1524 .features[FEAT_6_EAX] = 1525 CPUID_6_EAX_ARAT, 1526 .xlevel = 0x80000008, 1527 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 1528 }, 1529 { 1530 .name = "Westmere-IBRS", 1531 .level = 11, 1532 .vendor = CPUID_VENDOR_INTEL, 1533 .family = 6, 1534 .model = 44, 1535 .stepping = 1, 1536 .features[FEAT_1_EDX] = 1537 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1538 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1539 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1540 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1541 CPUID_DE | CPUID_FP87, 1542 .features[FEAT_1_ECX] = 1543 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1544 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1545 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1546 .features[FEAT_8000_0001_EDX] = 1547 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1548 .features[FEAT_8000_0001_ECX] = 1549 CPUID_EXT3_LAHF_LM, 1550 .features[FEAT_7_0_EDX] = 1551 CPUID_7_0_EDX_SPEC_CTRL, 1552 .features[FEAT_6_EAX] = 1553 CPUID_6_EAX_ARAT, 1554 .xlevel = 0x80000008, 1555 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)", 1556 }, 1557 { 1558 .name = "SandyBridge", 1559 .level = 0xd, 1560 .vendor = CPUID_VENDOR_INTEL, 1561 .family = 6, 1562 .model = 42, 1563 .stepping = 1, 1564 .features[FEAT_1_EDX] = 1565 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1566 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1567 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1568 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1569 CPUID_DE | CPUID_FP87, 1570 .features[FEAT_1_ECX] = 1571 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1572 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1573 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1574 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1575 CPUID_EXT_SSE3, 1576 .features[FEAT_8000_0001_EDX] = 1577 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1578 CPUID_EXT2_SYSCALL, 1579 .features[FEAT_8000_0001_ECX] = 1580 CPUID_EXT3_LAHF_LM, 1581 .features[FEAT_XSAVE] = 1582 CPUID_XSAVE_XSAVEOPT, 1583 .features[FEAT_6_EAX] = 1584 CPUID_6_EAX_ARAT, 1585 .xlevel = 0x80000008, 1586 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 1587 }, 1588 { 1589 .name = "SandyBridge-IBRS", 1590 .level = 0xd, 1591 .vendor = CPUID_VENDOR_INTEL, 1592 .family = 6, 1593 .model = 42, 1594 .stepping = 1, 1595 .features[FEAT_1_EDX] = 1596 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1597 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1598 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1599 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1600 CPUID_DE | CPUID_FP87, 1601 .features[FEAT_1_ECX] = 1602 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1603 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1604 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1605 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1606 CPUID_EXT_SSE3, 1607 .features[FEAT_8000_0001_EDX] = 1608 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1609 CPUID_EXT2_SYSCALL, 1610 .features[FEAT_8000_0001_ECX] = 1611 CPUID_EXT3_LAHF_LM, 1612 .features[FEAT_7_0_EDX] = 1613 CPUID_7_0_EDX_SPEC_CTRL, 1614 .features[FEAT_XSAVE] = 1615 CPUID_XSAVE_XSAVEOPT, 1616 .features[FEAT_6_EAX] = 1617 CPUID_6_EAX_ARAT, 1618 .xlevel = 0x80000008, 1619 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)", 1620 }, 1621 { 1622 .name = "IvyBridge", 1623 .level = 0xd, 1624 .vendor = CPUID_VENDOR_INTEL, 1625 .family = 6, 1626 .model = 58, 1627 .stepping = 9, 1628 .features[FEAT_1_EDX] = 1629 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1630 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1631 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1632 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1633 CPUID_DE | CPUID_FP87, 1634 .features[FEAT_1_ECX] = 1635 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1636 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1637 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1638 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1639 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1640 .features[FEAT_7_0_EBX] = 1641 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1642 CPUID_7_0_EBX_ERMS, 1643 .features[FEAT_8000_0001_EDX] = 1644 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1645 CPUID_EXT2_SYSCALL, 1646 .features[FEAT_8000_0001_ECX] = 1647 CPUID_EXT3_LAHF_LM, 1648 .features[FEAT_XSAVE] = 1649 CPUID_XSAVE_XSAVEOPT, 1650 .features[FEAT_6_EAX] = 1651 CPUID_6_EAX_ARAT, 1652 .xlevel = 0x80000008, 1653 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 1654 }, 1655 { 1656 .name = "IvyBridge-IBRS", 1657 .level = 0xd, 1658 .vendor = CPUID_VENDOR_INTEL, 1659 .family = 6, 1660 .model = 58, 1661 .stepping = 9, 1662 .features[FEAT_1_EDX] = 1663 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1664 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1665 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1666 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1667 CPUID_DE | CPUID_FP87, 1668 .features[FEAT_1_ECX] = 1669 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1670 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1671 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1672 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1673 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1674 .features[FEAT_7_0_EBX] = 1675 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1676 CPUID_7_0_EBX_ERMS, 1677 .features[FEAT_8000_0001_EDX] = 1678 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1679 CPUID_EXT2_SYSCALL, 1680 .features[FEAT_8000_0001_ECX] = 1681 CPUID_EXT3_LAHF_LM, 1682 .features[FEAT_7_0_EDX] = 1683 CPUID_7_0_EDX_SPEC_CTRL, 1684 .features[FEAT_XSAVE] = 1685 CPUID_XSAVE_XSAVEOPT, 1686 .features[FEAT_6_EAX] = 1687 CPUID_6_EAX_ARAT, 1688 .xlevel = 0x80000008, 1689 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)", 1690 }, 1691 { 1692 .name = "Haswell-noTSX", 1693 .level = 0xd, 1694 .vendor = CPUID_VENDOR_INTEL, 1695 .family = 6, 1696 .model = 60, 1697 .stepping = 1, 1698 .features[FEAT_1_EDX] = 1699 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1700 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1701 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1702 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1703 CPUID_DE | CPUID_FP87, 1704 .features[FEAT_1_ECX] = 1705 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1706 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1707 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1708 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1709 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1710 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1711 .features[FEAT_8000_0001_EDX] = 1712 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1713 CPUID_EXT2_SYSCALL, 1714 .features[FEAT_8000_0001_ECX] = 1715 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1716 .features[FEAT_7_0_EBX] = 1717 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1718 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1719 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID, 1720 .features[FEAT_XSAVE] = 1721 CPUID_XSAVE_XSAVEOPT, 1722 .features[FEAT_6_EAX] = 1723 CPUID_6_EAX_ARAT, 1724 .xlevel = 0x80000008, 1725 .model_id = "Intel Core Processor (Haswell, no TSX)", 1726 }, 1727 { 1728 .name = "Haswell-noTSX-IBRS", 1729 .level = 0xd, 1730 .vendor = CPUID_VENDOR_INTEL, 1731 .family = 6, 1732 .model = 60, 1733 .stepping = 1, 1734 .features[FEAT_1_EDX] = 1735 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1736 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1737 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1738 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1739 CPUID_DE | CPUID_FP87, 1740 .features[FEAT_1_ECX] = 1741 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1742 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1743 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1744 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1745 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1746 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1747 .features[FEAT_8000_0001_EDX] = 1748 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1749 CPUID_EXT2_SYSCALL, 1750 .features[FEAT_8000_0001_ECX] = 1751 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1752 .features[FEAT_7_0_EDX] = 1753 CPUID_7_0_EDX_SPEC_CTRL, 1754 .features[FEAT_7_0_EBX] = 1755 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1756 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1757 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID, 1758 .features[FEAT_XSAVE] = 1759 CPUID_XSAVE_XSAVEOPT, 1760 .features[FEAT_6_EAX] = 1761 CPUID_6_EAX_ARAT, 1762 .xlevel = 0x80000008, 1763 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)", 1764 }, 1765 { 1766 .name = "Haswell", 1767 .level = 0xd, 1768 .vendor = CPUID_VENDOR_INTEL, 1769 .family = 6, 1770 .model = 60, 1771 .stepping = 4, 1772 .features[FEAT_1_EDX] = 1773 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1774 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1775 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1776 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1777 CPUID_DE | CPUID_FP87, 1778 .features[FEAT_1_ECX] = 1779 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1780 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1781 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1782 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1783 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1784 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1785 .features[FEAT_8000_0001_EDX] = 1786 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1787 CPUID_EXT2_SYSCALL, 1788 .features[FEAT_8000_0001_ECX] = 1789 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1790 .features[FEAT_7_0_EBX] = 1791 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1792 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1793 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1794 CPUID_7_0_EBX_RTM, 1795 .features[FEAT_XSAVE] = 1796 CPUID_XSAVE_XSAVEOPT, 1797 .features[FEAT_6_EAX] = 1798 CPUID_6_EAX_ARAT, 1799 .xlevel = 0x80000008, 1800 .model_id = "Intel Core Processor (Haswell)", 1801 }, 1802 { 1803 .name = "Haswell-IBRS", 1804 .level = 0xd, 1805 .vendor = CPUID_VENDOR_INTEL, 1806 .family = 6, 1807 .model = 60, 1808 .stepping = 4, 1809 .features[FEAT_1_EDX] = 1810 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1811 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1812 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1813 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1814 CPUID_DE | CPUID_FP87, 1815 .features[FEAT_1_ECX] = 1816 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1817 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1818 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1819 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1820 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1821 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1822 .features[FEAT_8000_0001_EDX] = 1823 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1824 CPUID_EXT2_SYSCALL, 1825 .features[FEAT_8000_0001_ECX] = 1826 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1827 .features[FEAT_7_0_EDX] = 1828 CPUID_7_0_EDX_SPEC_CTRL, 1829 .features[FEAT_7_0_EBX] = 1830 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1831 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1832 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1833 CPUID_7_0_EBX_RTM, 1834 .features[FEAT_XSAVE] = 1835 CPUID_XSAVE_XSAVEOPT, 1836 .features[FEAT_6_EAX] = 1837 CPUID_6_EAX_ARAT, 1838 .xlevel = 0x80000008, 1839 .model_id = "Intel Core Processor (Haswell, IBRS)", 1840 }, 1841 { 1842 .name = "Broadwell-noTSX", 1843 .level = 0xd, 1844 .vendor = CPUID_VENDOR_INTEL, 1845 .family = 6, 1846 .model = 61, 1847 .stepping = 2, 1848 .features[FEAT_1_EDX] = 1849 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1850 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1851 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1852 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1853 CPUID_DE | CPUID_FP87, 1854 .features[FEAT_1_ECX] = 1855 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1856 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1857 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1858 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1859 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1860 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1861 .features[FEAT_8000_0001_EDX] = 1862 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1863 CPUID_EXT2_SYSCALL, 1864 .features[FEAT_8000_0001_ECX] = 1865 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1866 .features[FEAT_7_0_EBX] = 1867 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1868 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1869 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1870 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1871 CPUID_7_0_EBX_SMAP, 1872 .features[FEAT_XSAVE] = 1873 CPUID_XSAVE_XSAVEOPT, 1874 .features[FEAT_6_EAX] = 1875 CPUID_6_EAX_ARAT, 1876 .xlevel = 0x80000008, 1877 .model_id = "Intel Core Processor (Broadwell, no TSX)", 1878 }, 1879 { 1880 .name = "Broadwell-noTSX-IBRS", 1881 .level = 0xd, 1882 .vendor = CPUID_VENDOR_INTEL, 1883 .family = 6, 1884 .model = 61, 1885 .stepping = 2, 1886 .features[FEAT_1_EDX] = 1887 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1888 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1889 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1890 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1891 CPUID_DE | CPUID_FP87, 1892 .features[FEAT_1_ECX] = 1893 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1894 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1895 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1896 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1897 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1898 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1899 .features[FEAT_8000_0001_EDX] = 1900 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1901 CPUID_EXT2_SYSCALL, 1902 .features[FEAT_8000_0001_ECX] = 1903 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1904 .features[FEAT_7_0_EDX] = 1905 CPUID_7_0_EDX_SPEC_CTRL, 1906 .features[FEAT_7_0_EBX] = 1907 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1908 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1909 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1910 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1911 CPUID_7_0_EBX_SMAP, 1912 .features[FEAT_XSAVE] = 1913 CPUID_XSAVE_XSAVEOPT, 1914 .features[FEAT_6_EAX] = 1915 CPUID_6_EAX_ARAT, 1916 .xlevel = 0x80000008, 1917 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)", 1918 }, 1919 { 1920 .name = "Broadwell", 1921 .level = 0xd, 1922 .vendor = CPUID_VENDOR_INTEL, 1923 .family = 6, 1924 .model = 61, 1925 .stepping = 2, 1926 .features[FEAT_1_EDX] = 1927 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1928 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1929 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1930 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1931 CPUID_DE | CPUID_FP87, 1932 .features[FEAT_1_ECX] = 1933 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1934 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1935 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1936 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1937 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1938 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1939 .features[FEAT_8000_0001_EDX] = 1940 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1941 CPUID_EXT2_SYSCALL, 1942 .features[FEAT_8000_0001_ECX] = 1943 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1944 .features[FEAT_7_0_EBX] = 1945 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1946 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1947 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1948 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1949 CPUID_7_0_EBX_SMAP, 1950 .features[FEAT_XSAVE] = 1951 CPUID_XSAVE_XSAVEOPT, 1952 .features[FEAT_6_EAX] = 1953 CPUID_6_EAX_ARAT, 1954 .xlevel = 0x80000008, 1955 .model_id = "Intel Core Processor (Broadwell)", 1956 }, 1957 { 1958 .name = "Broadwell-IBRS", 1959 .level = 0xd, 1960 .vendor = CPUID_VENDOR_INTEL, 1961 .family = 6, 1962 .model = 61, 1963 .stepping = 2, 1964 .features[FEAT_1_EDX] = 1965 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1966 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1967 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1968 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1969 CPUID_DE | CPUID_FP87, 1970 .features[FEAT_1_ECX] = 1971 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1972 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1973 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1974 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1975 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1976 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1977 .features[FEAT_8000_0001_EDX] = 1978 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1979 CPUID_EXT2_SYSCALL, 1980 .features[FEAT_8000_0001_ECX] = 1981 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 1982 .features[FEAT_7_0_EDX] = 1983 CPUID_7_0_EDX_SPEC_CTRL, 1984 .features[FEAT_7_0_EBX] = 1985 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1986 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1987 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1988 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 1989 CPUID_7_0_EBX_SMAP, 1990 .features[FEAT_XSAVE] = 1991 CPUID_XSAVE_XSAVEOPT, 1992 .features[FEAT_6_EAX] = 1993 CPUID_6_EAX_ARAT, 1994 .xlevel = 0x80000008, 1995 .model_id = "Intel Core Processor (Broadwell, IBRS)", 1996 }, 1997 { 1998 .name = "Skylake-Client", 1999 .level = 0xd, 2000 .vendor = CPUID_VENDOR_INTEL, 2001 .family = 6, 2002 .model = 94, 2003 .stepping = 3, 2004 .features[FEAT_1_EDX] = 2005 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2006 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2007 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2008 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2009 CPUID_DE | CPUID_FP87, 2010 .features[FEAT_1_ECX] = 2011 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2012 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2013 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2014 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2015 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2016 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2017 .features[FEAT_8000_0001_EDX] = 2018 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2019 CPUID_EXT2_SYSCALL, 2020 .features[FEAT_8000_0001_ECX] = 2021 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2022 .features[FEAT_7_0_EBX] = 2023 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2024 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2025 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2026 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2027 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX, 2028 /* Missing: XSAVES (not supported by some Linux versions, 2029 * including v4.1 to v4.12). 2030 * KVM doesn't yet expose any XSAVES state save component, 2031 * and the only one defined in Skylake (processor tracing) 2032 * probably will block migration anyway. 2033 */ 2034 .features[FEAT_XSAVE] = 2035 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2036 CPUID_XSAVE_XGETBV1, 2037 .features[FEAT_6_EAX] = 2038 CPUID_6_EAX_ARAT, 2039 .xlevel = 0x80000008, 2040 .model_id = "Intel Core Processor (Skylake)", 2041 }, 2042 { 2043 .name = "Skylake-Client-IBRS", 2044 .level = 0xd, 2045 .vendor = CPUID_VENDOR_INTEL, 2046 .family = 6, 2047 .model = 94, 2048 .stepping = 3, 2049 .features[FEAT_1_EDX] = 2050 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2051 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2052 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2053 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2054 CPUID_DE | CPUID_FP87, 2055 .features[FEAT_1_ECX] = 2056 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2057 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2058 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2059 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2060 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2061 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2062 .features[FEAT_8000_0001_EDX] = 2063 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2064 CPUID_EXT2_SYSCALL, 2065 .features[FEAT_8000_0001_ECX] = 2066 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2067 .features[FEAT_7_0_EDX] = 2068 CPUID_7_0_EDX_SPEC_CTRL, 2069 .features[FEAT_7_0_EBX] = 2070 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2071 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2072 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2073 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2074 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX, 2075 /* Missing: XSAVES (not supported by some Linux versions, 2076 * including v4.1 to v4.12). 2077 * KVM doesn't yet expose any XSAVES state save component, 2078 * and the only one defined in Skylake (processor tracing) 2079 * probably will block migration anyway. 2080 */ 2081 .features[FEAT_XSAVE] = 2082 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2083 CPUID_XSAVE_XGETBV1, 2084 .features[FEAT_6_EAX] = 2085 CPUID_6_EAX_ARAT, 2086 .xlevel = 0x80000008, 2087 .model_id = "Intel Core Processor (Skylake, IBRS)", 2088 }, 2089 { 2090 .name = "Skylake-Server", 2091 .level = 0xd, 2092 .vendor = CPUID_VENDOR_INTEL, 2093 .family = 6, 2094 .model = 85, 2095 .stepping = 4, 2096 .features[FEAT_1_EDX] = 2097 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2098 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2099 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2100 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2101 CPUID_DE | CPUID_FP87, 2102 .features[FEAT_1_ECX] = 2103 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2104 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2105 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2106 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2107 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2108 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2109 .features[FEAT_8000_0001_EDX] = 2110 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2111 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2112 .features[FEAT_8000_0001_ECX] = 2113 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2114 .features[FEAT_7_0_EBX] = 2115 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2116 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2117 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2118 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2119 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB | 2120 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2121 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2122 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2123 /* Missing: XSAVES (not supported by some Linux versions, 2124 * including v4.1 to v4.12). 2125 * KVM doesn't yet expose any XSAVES state save component, 2126 * and the only one defined in Skylake (processor tracing) 2127 * probably will block migration anyway. 2128 */ 2129 .features[FEAT_XSAVE] = 2130 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2131 CPUID_XSAVE_XGETBV1, 2132 .features[FEAT_6_EAX] = 2133 CPUID_6_EAX_ARAT, 2134 .xlevel = 0x80000008, 2135 .model_id = "Intel Xeon Processor (Skylake)", 2136 }, 2137 { 2138 .name = "Skylake-Server-IBRS", 2139 .level = 0xd, 2140 .vendor = CPUID_VENDOR_INTEL, 2141 .family = 6, 2142 .model = 85, 2143 .stepping = 4, 2144 .features[FEAT_1_EDX] = 2145 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2146 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2147 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2148 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2149 CPUID_DE | CPUID_FP87, 2150 .features[FEAT_1_ECX] = 2151 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2152 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2153 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2154 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2155 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2156 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2157 .features[FEAT_8000_0001_EDX] = 2158 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2159 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2160 .features[FEAT_8000_0001_ECX] = 2161 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2162 .features[FEAT_7_0_EDX] = 2163 CPUID_7_0_EDX_SPEC_CTRL, 2164 .features[FEAT_7_0_EBX] = 2165 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2166 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2167 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2168 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2169 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB | 2170 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2171 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2172 CPUID_7_0_EBX_AVX512VL, 2173 /* Missing: XSAVES (not supported by some Linux versions, 2174 * including v4.1 to v4.12). 2175 * KVM doesn't yet expose any XSAVES state save component, 2176 * and the only one defined in Skylake (processor tracing) 2177 * probably will block migration anyway. 2178 */ 2179 .features[FEAT_XSAVE] = 2180 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2181 CPUID_XSAVE_XGETBV1, 2182 .features[FEAT_6_EAX] = 2183 CPUID_6_EAX_ARAT, 2184 .xlevel = 0x80000008, 2185 .model_id = "Intel Xeon Processor (Skylake, IBRS)", 2186 }, 2187 { 2188 .name = "KnightsMill", 2189 .level = 0xd, 2190 .vendor = CPUID_VENDOR_INTEL, 2191 .family = 6, 2192 .model = 133, 2193 .stepping = 0, 2194 .features[FEAT_1_EDX] = 2195 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 2196 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 2197 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 2198 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 2199 CPUID_PSE | CPUID_DE | CPUID_FP87, 2200 .features[FEAT_1_ECX] = 2201 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2202 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2203 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2204 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2205 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2206 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2207 .features[FEAT_8000_0001_EDX] = 2208 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2209 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2210 .features[FEAT_8000_0001_ECX] = 2211 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2212 .features[FEAT_7_0_EBX] = 2213 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2214 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 2215 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 2216 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 2217 CPUID_7_0_EBX_AVX512ER, 2218 .features[FEAT_7_0_ECX] = 2219 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 2220 .features[FEAT_7_0_EDX] = 2221 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 2222 .features[FEAT_XSAVE] = 2223 CPUID_XSAVE_XSAVEOPT, 2224 .features[FEAT_6_EAX] = 2225 CPUID_6_EAX_ARAT, 2226 .xlevel = 0x80000008, 2227 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 2228 }, 2229 { 2230 .name = "Opteron_G1", 2231 .level = 5, 2232 .vendor = CPUID_VENDOR_AMD, 2233 .family = 15, 2234 .model = 6, 2235 .stepping = 1, 2236 .features[FEAT_1_EDX] = 2237 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2238 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2239 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2240 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2241 CPUID_DE | CPUID_FP87, 2242 .features[FEAT_1_ECX] = 2243 CPUID_EXT_SSE3, 2244 .features[FEAT_8000_0001_EDX] = 2245 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2246 .xlevel = 0x80000008, 2247 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 2248 }, 2249 { 2250 .name = "Opteron_G2", 2251 .level = 5, 2252 .vendor = CPUID_VENDOR_AMD, 2253 .family = 15, 2254 .model = 6, 2255 .stepping = 1, 2256 .features[FEAT_1_EDX] = 2257 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2258 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2259 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2260 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2261 CPUID_DE | CPUID_FP87, 2262 .features[FEAT_1_ECX] = 2263 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 2264 /* Missing: CPUID_EXT2_RDTSCP */ 2265 .features[FEAT_8000_0001_EDX] = 2266 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2267 .features[FEAT_8000_0001_ECX] = 2268 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2269 .xlevel = 0x80000008, 2270 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 2271 }, 2272 { 2273 .name = "Opteron_G3", 2274 .level = 5, 2275 .vendor = CPUID_VENDOR_AMD, 2276 .family = 16, 2277 .model = 2, 2278 .stepping = 3, 2279 .features[FEAT_1_EDX] = 2280 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2281 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2282 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2283 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2284 CPUID_DE | CPUID_FP87, 2285 .features[FEAT_1_ECX] = 2286 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 2287 CPUID_EXT_SSE3, 2288 /* Missing: CPUID_EXT2_RDTSCP */ 2289 .features[FEAT_8000_0001_EDX] = 2290 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2291 .features[FEAT_8000_0001_ECX] = 2292 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 2293 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2294 .xlevel = 0x80000008, 2295 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 2296 }, 2297 { 2298 .name = "Opteron_G4", 2299 .level = 0xd, 2300 .vendor = CPUID_VENDOR_AMD, 2301 .family = 21, 2302 .model = 1, 2303 .stepping = 2, 2304 .features[FEAT_1_EDX] = 2305 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2306 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2307 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2308 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2309 CPUID_DE | CPUID_FP87, 2310 .features[FEAT_1_ECX] = 2311 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2312 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2313 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2314 CPUID_EXT_SSE3, 2315 /* Missing: CPUID_EXT2_RDTSCP */ 2316 .features[FEAT_8000_0001_EDX] = 2317 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2318 CPUID_EXT2_SYSCALL, 2319 .features[FEAT_8000_0001_ECX] = 2320 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2321 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2322 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2323 CPUID_EXT3_LAHF_LM, 2324 /* no xsaveopt! */ 2325 .xlevel = 0x8000001A, 2326 .model_id = "AMD Opteron 62xx class CPU", 2327 }, 2328 { 2329 .name = "Opteron_G5", 2330 .level = 0xd, 2331 .vendor = CPUID_VENDOR_AMD, 2332 .family = 21, 2333 .model = 2, 2334 .stepping = 0, 2335 .features[FEAT_1_EDX] = 2336 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2337 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2338 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2339 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2340 CPUID_DE | CPUID_FP87, 2341 .features[FEAT_1_ECX] = 2342 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 2343 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2344 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 2345 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2346 /* Missing: CPUID_EXT2_RDTSCP */ 2347 .features[FEAT_8000_0001_EDX] = 2348 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2349 CPUID_EXT2_SYSCALL, 2350 .features[FEAT_8000_0001_ECX] = 2351 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2352 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2353 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2354 CPUID_EXT3_LAHF_LM, 2355 /* no xsaveopt! */ 2356 .xlevel = 0x8000001A, 2357 .model_id = "AMD Opteron 63xx class CPU", 2358 }, 2359 { 2360 .name = "EPYC", 2361 .level = 0xd, 2362 .vendor = CPUID_VENDOR_AMD, 2363 .family = 23, 2364 .model = 1, 2365 .stepping = 2, 2366 .features[FEAT_1_EDX] = 2367 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2368 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2369 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2370 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2371 CPUID_VME | CPUID_FP87, 2372 .features[FEAT_1_ECX] = 2373 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2374 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 2375 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2376 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2377 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2378 .features[FEAT_8000_0001_EDX] = 2379 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2380 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2381 CPUID_EXT2_SYSCALL, 2382 .features[FEAT_8000_0001_ECX] = 2383 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2384 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2385 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2386 .features[FEAT_7_0_EBX] = 2387 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2388 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2389 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2390 CPUID_7_0_EBX_SHA_NI, 2391 /* Missing: XSAVES (not supported by some Linux versions, 2392 * including v4.1 to v4.12). 2393 * KVM doesn't yet expose any XSAVES state save component. 2394 */ 2395 .features[FEAT_XSAVE] = 2396 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2397 CPUID_XSAVE_XGETBV1, 2398 .features[FEAT_6_EAX] = 2399 CPUID_6_EAX_ARAT, 2400 .xlevel = 0x8000000A, 2401 .model_id = "AMD EPYC Processor", 2402 .cache_info = &epyc_cache_info, 2403 }, 2404 { 2405 .name = "EPYC-IBPB", 2406 .level = 0xd, 2407 .vendor = CPUID_VENDOR_AMD, 2408 .family = 23, 2409 .model = 1, 2410 .stepping = 2, 2411 .features[FEAT_1_EDX] = 2412 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2413 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2414 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2415 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2416 CPUID_VME | CPUID_FP87, 2417 .features[FEAT_1_ECX] = 2418 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2419 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 2420 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2421 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2422 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2423 .features[FEAT_8000_0001_EDX] = 2424 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2425 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2426 CPUID_EXT2_SYSCALL, 2427 .features[FEAT_8000_0001_ECX] = 2428 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2429 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2430 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2431 .features[FEAT_8000_0008_EBX] = 2432 CPUID_8000_0008_EBX_IBPB, 2433 .features[FEAT_7_0_EBX] = 2434 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2435 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2436 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2437 CPUID_7_0_EBX_SHA_NI, 2438 /* Missing: XSAVES (not supported by some Linux versions, 2439 * including v4.1 to v4.12). 2440 * KVM doesn't yet expose any XSAVES state save component. 2441 */ 2442 .features[FEAT_XSAVE] = 2443 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2444 CPUID_XSAVE_XGETBV1, 2445 .features[FEAT_6_EAX] = 2446 CPUID_6_EAX_ARAT, 2447 .xlevel = 0x8000000A, 2448 .model_id = "AMD EPYC Processor (with IBPB)", 2449 .cache_info = &epyc_cache_info, 2450 }, 2451 }; 2452 2453 typedef struct PropValue { 2454 const char *prop, *value; 2455 } PropValue; 2456 2457 /* KVM-specific features that are automatically added/removed 2458 * from all CPU models when KVM is enabled. 2459 */ 2460 static PropValue kvm_default_props[] = { 2461 { "kvmclock", "on" }, 2462 { "kvm-nopiodelay", "on" }, 2463 { "kvm-asyncpf", "on" }, 2464 { "kvm-steal-time", "on" }, 2465 { "kvm-pv-eoi", "on" }, 2466 { "kvmclock-stable-bit", "on" }, 2467 { "x2apic", "on" }, 2468 { "acpi", "off" }, 2469 { "monitor", "off" }, 2470 { "svm", "off" }, 2471 { NULL, NULL }, 2472 }; 2473 2474 /* TCG-specific defaults that override all CPU models when using TCG 2475 */ 2476 static PropValue tcg_default_props[] = { 2477 { "vme", "off" }, 2478 { NULL, NULL }, 2479 }; 2480 2481 2482 void x86_cpu_change_kvm_default(const char *prop, const char *value) 2483 { 2484 PropValue *pv; 2485 for (pv = kvm_default_props; pv->prop; pv++) { 2486 if (!strcmp(pv->prop, prop)) { 2487 pv->value = value; 2488 break; 2489 } 2490 } 2491 2492 /* It is valid to call this function only for properties that 2493 * are already present in the kvm_default_props table. 2494 */ 2495 assert(pv->prop); 2496 } 2497 2498 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 2499 bool migratable_only); 2500 2501 static bool lmce_supported(void) 2502 { 2503 uint64_t mce_cap = 0; 2504 2505 #ifdef CONFIG_KVM 2506 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 2507 return false; 2508 } 2509 #endif 2510 2511 return !!(mce_cap & MCG_LMCE_P); 2512 } 2513 2514 #define CPUID_MODEL_ID_SZ 48 2515 2516 /** 2517 * cpu_x86_fill_model_id: 2518 * Get CPUID model ID string from host CPU. 2519 * 2520 * @str should have at least CPUID_MODEL_ID_SZ bytes 2521 * 2522 * The function does NOT add a null terminator to the string 2523 * automatically. 2524 */ 2525 static int cpu_x86_fill_model_id(char *str) 2526 { 2527 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 2528 int i; 2529 2530 for (i = 0; i < 3; i++) { 2531 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 2532 memcpy(str + i * 16 + 0, &eax, 4); 2533 memcpy(str + i * 16 + 4, &ebx, 4); 2534 memcpy(str + i * 16 + 8, &ecx, 4); 2535 memcpy(str + i * 16 + 12, &edx, 4); 2536 } 2537 return 0; 2538 } 2539 2540 static Property max_x86_cpu_properties[] = { 2541 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 2542 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 2543 DEFINE_PROP_END_OF_LIST() 2544 }; 2545 2546 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 2547 { 2548 DeviceClass *dc = DEVICE_CLASS(oc); 2549 X86CPUClass *xcc = X86_CPU_CLASS(oc); 2550 2551 xcc->ordering = 9; 2552 2553 xcc->model_description = 2554 "Enables all features supported by the accelerator in the current host"; 2555 2556 dc->props = max_x86_cpu_properties; 2557 } 2558 2559 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp); 2560 2561 static void max_x86_cpu_initfn(Object *obj) 2562 { 2563 X86CPU *cpu = X86_CPU(obj); 2564 CPUX86State *env = &cpu->env; 2565 KVMState *s = kvm_state; 2566 2567 /* We can't fill the features array here because we don't know yet if 2568 * "migratable" is true or false. 2569 */ 2570 cpu->max_features = true; 2571 2572 if (accel_uses_host_cpuid()) { 2573 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 2574 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 2575 int family, model, stepping; 2576 X86CPUDefinition host_cpudef = { }; 2577 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 2578 2579 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 2580 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx); 2581 2582 host_vendor_fms(vendor, &family, &model, &stepping); 2583 2584 cpu_x86_fill_model_id(model_id); 2585 2586 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort); 2587 object_property_set_int(OBJECT(cpu), family, "family", &error_abort); 2588 object_property_set_int(OBJECT(cpu), model, "model", &error_abort); 2589 object_property_set_int(OBJECT(cpu), stepping, "stepping", 2590 &error_abort); 2591 object_property_set_str(OBJECT(cpu), model_id, "model-id", 2592 &error_abort); 2593 2594 if (kvm_enabled()) { 2595 env->cpuid_min_level = 2596 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 2597 env->cpuid_min_xlevel = 2598 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 2599 env->cpuid_min_xlevel2 = 2600 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 2601 } else { 2602 env->cpuid_min_level = 2603 hvf_get_supported_cpuid(0x0, 0, R_EAX); 2604 env->cpuid_min_xlevel = 2605 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 2606 env->cpuid_min_xlevel2 = 2607 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 2608 } 2609 2610 if (lmce_supported()) { 2611 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 2612 } 2613 } else { 2614 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 2615 "vendor", &error_abort); 2616 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 2617 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 2618 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 2619 object_property_set_str(OBJECT(cpu), 2620 "QEMU TCG CPU version " QEMU_HW_VERSION, 2621 "model-id", &error_abort); 2622 } 2623 2624 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 2625 } 2626 2627 static const TypeInfo max_x86_cpu_type_info = { 2628 .name = X86_CPU_TYPE_NAME("max"), 2629 .parent = TYPE_X86_CPU, 2630 .instance_init = max_x86_cpu_initfn, 2631 .class_init = max_x86_cpu_class_init, 2632 }; 2633 2634 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 2635 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 2636 { 2637 X86CPUClass *xcc = X86_CPU_CLASS(oc); 2638 2639 xcc->host_cpuid_required = true; 2640 xcc->ordering = 8; 2641 2642 if (kvm_enabled()) { 2643 xcc->model_description = 2644 "KVM processor with all supported host features "; 2645 } else if (hvf_enabled()) { 2646 xcc->model_description = 2647 "HVF processor with all supported host features "; 2648 } 2649 } 2650 2651 static const TypeInfo host_x86_cpu_type_info = { 2652 .name = X86_CPU_TYPE_NAME("host"), 2653 .parent = X86_CPU_TYPE_NAME("max"), 2654 .class_init = host_x86_cpu_class_init, 2655 }; 2656 2657 #endif 2658 2659 static void report_unavailable_features(FeatureWord w, uint32_t mask) 2660 { 2661 FeatureWordInfo *f = &feature_word_info[w]; 2662 int i; 2663 2664 for (i = 0; i < 32; ++i) { 2665 if ((1UL << i) & mask) { 2666 const char *reg = get_register_name_32(f->cpuid_reg); 2667 assert(reg); 2668 warn_report("%s doesn't support requested feature: " 2669 "CPUID.%02XH:%s%s%s [bit %d]", 2670 accel_uses_host_cpuid() ? "host" : "TCG", 2671 f->cpuid_eax, reg, 2672 f->feat_names[i] ? "." : "", 2673 f->feat_names[i] ? f->feat_names[i] : "", i); 2674 } 2675 } 2676 } 2677 2678 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 2679 const char *name, void *opaque, 2680 Error **errp) 2681 { 2682 X86CPU *cpu = X86_CPU(obj); 2683 CPUX86State *env = &cpu->env; 2684 int64_t value; 2685 2686 value = (env->cpuid_version >> 8) & 0xf; 2687 if (value == 0xf) { 2688 value += (env->cpuid_version >> 20) & 0xff; 2689 } 2690 visit_type_int(v, name, &value, errp); 2691 } 2692 2693 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 2694 const char *name, void *opaque, 2695 Error **errp) 2696 { 2697 X86CPU *cpu = X86_CPU(obj); 2698 CPUX86State *env = &cpu->env; 2699 const int64_t min = 0; 2700 const int64_t max = 0xff + 0xf; 2701 Error *local_err = NULL; 2702 int64_t value; 2703 2704 visit_type_int(v, name, &value, &local_err); 2705 if (local_err) { 2706 error_propagate(errp, local_err); 2707 return; 2708 } 2709 if (value < min || value > max) { 2710 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 2711 name ? name : "null", value, min, max); 2712 return; 2713 } 2714 2715 env->cpuid_version &= ~0xff00f00; 2716 if (value > 0x0f) { 2717 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 2718 } else { 2719 env->cpuid_version |= value << 8; 2720 } 2721 } 2722 2723 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 2724 const char *name, void *opaque, 2725 Error **errp) 2726 { 2727 X86CPU *cpu = X86_CPU(obj); 2728 CPUX86State *env = &cpu->env; 2729 int64_t value; 2730 2731 value = (env->cpuid_version >> 4) & 0xf; 2732 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 2733 visit_type_int(v, name, &value, errp); 2734 } 2735 2736 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 2737 const char *name, void *opaque, 2738 Error **errp) 2739 { 2740 X86CPU *cpu = X86_CPU(obj); 2741 CPUX86State *env = &cpu->env; 2742 const int64_t min = 0; 2743 const int64_t max = 0xff; 2744 Error *local_err = NULL; 2745 int64_t value; 2746 2747 visit_type_int(v, name, &value, &local_err); 2748 if (local_err) { 2749 error_propagate(errp, local_err); 2750 return; 2751 } 2752 if (value < min || value > max) { 2753 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 2754 name ? name : "null", value, min, max); 2755 return; 2756 } 2757 2758 env->cpuid_version &= ~0xf00f0; 2759 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 2760 } 2761 2762 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 2763 const char *name, void *opaque, 2764 Error **errp) 2765 { 2766 X86CPU *cpu = X86_CPU(obj); 2767 CPUX86State *env = &cpu->env; 2768 int64_t value; 2769 2770 value = env->cpuid_version & 0xf; 2771 visit_type_int(v, name, &value, errp); 2772 } 2773 2774 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 2775 const char *name, void *opaque, 2776 Error **errp) 2777 { 2778 X86CPU *cpu = X86_CPU(obj); 2779 CPUX86State *env = &cpu->env; 2780 const int64_t min = 0; 2781 const int64_t max = 0xf; 2782 Error *local_err = NULL; 2783 int64_t value; 2784 2785 visit_type_int(v, name, &value, &local_err); 2786 if (local_err) { 2787 error_propagate(errp, local_err); 2788 return; 2789 } 2790 if (value < min || value > max) { 2791 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 2792 name ? name : "null", value, min, max); 2793 return; 2794 } 2795 2796 env->cpuid_version &= ~0xf; 2797 env->cpuid_version |= value & 0xf; 2798 } 2799 2800 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 2801 { 2802 X86CPU *cpu = X86_CPU(obj); 2803 CPUX86State *env = &cpu->env; 2804 char *value; 2805 2806 value = g_malloc(CPUID_VENDOR_SZ + 1); 2807 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 2808 env->cpuid_vendor3); 2809 return value; 2810 } 2811 2812 static void x86_cpuid_set_vendor(Object *obj, const char *value, 2813 Error **errp) 2814 { 2815 X86CPU *cpu = X86_CPU(obj); 2816 CPUX86State *env = &cpu->env; 2817 int i; 2818 2819 if (strlen(value) != CPUID_VENDOR_SZ) { 2820 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 2821 return; 2822 } 2823 2824 env->cpuid_vendor1 = 0; 2825 env->cpuid_vendor2 = 0; 2826 env->cpuid_vendor3 = 0; 2827 for (i = 0; i < 4; i++) { 2828 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 2829 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 2830 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 2831 } 2832 } 2833 2834 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 2835 { 2836 X86CPU *cpu = X86_CPU(obj); 2837 CPUX86State *env = &cpu->env; 2838 char *value; 2839 int i; 2840 2841 value = g_malloc(48 + 1); 2842 for (i = 0; i < 48; i++) { 2843 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 2844 } 2845 value[48] = '\0'; 2846 return value; 2847 } 2848 2849 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 2850 Error **errp) 2851 { 2852 X86CPU *cpu = X86_CPU(obj); 2853 CPUX86State *env = &cpu->env; 2854 int c, len, i; 2855 2856 if (model_id == NULL) { 2857 model_id = ""; 2858 } 2859 len = strlen(model_id); 2860 memset(env->cpuid_model, 0, 48); 2861 for (i = 0; i < 48; i++) { 2862 if (i >= len) { 2863 c = '\0'; 2864 } else { 2865 c = (uint8_t)model_id[i]; 2866 } 2867 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 2868 } 2869 } 2870 2871 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 2872 void *opaque, Error **errp) 2873 { 2874 X86CPU *cpu = X86_CPU(obj); 2875 int64_t value; 2876 2877 value = cpu->env.tsc_khz * 1000; 2878 visit_type_int(v, name, &value, errp); 2879 } 2880 2881 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 2882 void *opaque, Error **errp) 2883 { 2884 X86CPU *cpu = X86_CPU(obj); 2885 const int64_t min = 0; 2886 const int64_t max = INT64_MAX; 2887 Error *local_err = NULL; 2888 int64_t value; 2889 2890 visit_type_int(v, name, &value, &local_err); 2891 if (local_err) { 2892 error_propagate(errp, local_err); 2893 return; 2894 } 2895 if (value < min || value > max) { 2896 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 2897 name ? name : "null", value, min, max); 2898 return; 2899 } 2900 2901 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 2902 } 2903 2904 /* Generic getter for "feature-words" and "filtered-features" properties */ 2905 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 2906 const char *name, void *opaque, 2907 Error **errp) 2908 { 2909 uint32_t *array = (uint32_t *)opaque; 2910 FeatureWord w; 2911 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 2912 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 2913 X86CPUFeatureWordInfoList *list = NULL; 2914 2915 for (w = 0; w < FEATURE_WORDS; w++) { 2916 FeatureWordInfo *wi = &feature_word_info[w]; 2917 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 2918 qwi->cpuid_input_eax = wi->cpuid_eax; 2919 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx; 2920 qwi->cpuid_input_ecx = wi->cpuid_ecx; 2921 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum; 2922 qwi->features = array[w]; 2923 2924 /* List will be in reverse order, but order shouldn't matter */ 2925 list_entries[w].next = list; 2926 list_entries[w].value = &word_infos[w]; 2927 list = &list_entries[w]; 2928 } 2929 2930 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 2931 } 2932 2933 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name, 2934 void *opaque, Error **errp) 2935 { 2936 X86CPU *cpu = X86_CPU(obj); 2937 int64_t value = cpu->hyperv_spinlock_attempts; 2938 2939 visit_type_int(v, name, &value, errp); 2940 } 2941 2942 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name, 2943 void *opaque, Error **errp) 2944 { 2945 const int64_t min = 0xFFF; 2946 const int64_t max = UINT_MAX; 2947 X86CPU *cpu = X86_CPU(obj); 2948 Error *err = NULL; 2949 int64_t value; 2950 2951 visit_type_int(v, name, &value, &err); 2952 if (err) { 2953 error_propagate(errp, err); 2954 return; 2955 } 2956 2957 if (value < min || value > max) { 2958 error_setg(errp, "Property %s.%s doesn't take value %" PRId64 2959 " (minimum: %" PRId64 ", maximum: %" PRId64 ")", 2960 object_get_typename(obj), name ? name : "null", 2961 value, min, max); 2962 return; 2963 } 2964 cpu->hyperv_spinlock_attempts = value; 2965 } 2966 2967 static const PropertyInfo qdev_prop_spinlocks = { 2968 .name = "int", 2969 .get = x86_get_hv_spinlocks, 2970 .set = x86_set_hv_spinlocks, 2971 }; 2972 2973 /* Convert all '_' in a feature string option name to '-', to make feature 2974 * name conform to QOM property naming rule, which uses '-' instead of '_'. 2975 */ 2976 static inline void feat2prop(char *s) 2977 { 2978 while ((s = strchr(s, '_'))) { 2979 *s = '-'; 2980 } 2981 } 2982 2983 /* Return the feature property name for a feature flag bit */ 2984 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 2985 { 2986 /* XSAVE components are automatically enabled by other features, 2987 * so return the original feature name instead 2988 */ 2989 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 2990 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 2991 2992 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 2993 x86_ext_save_areas[comp].bits) { 2994 w = x86_ext_save_areas[comp].feature; 2995 bitnr = ctz32(x86_ext_save_areas[comp].bits); 2996 } 2997 } 2998 2999 assert(bitnr < 32); 3000 assert(w < FEATURE_WORDS); 3001 return feature_word_info[w].feat_names[bitnr]; 3002 } 3003 3004 /* Compatibily hack to maintain legacy +-feat semantic, 3005 * where +-feat overwrites any feature set by 3006 * feat=on|feat even if the later is parsed after +-feat 3007 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 3008 */ 3009 static GList *plus_features, *minus_features; 3010 3011 static gint compare_string(gconstpointer a, gconstpointer b) 3012 { 3013 return g_strcmp0(a, b); 3014 } 3015 3016 /* Parse "+feature,-feature,feature=foo" CPU feature string 3017 */ 3018 static void x86_cpu_parse_featurestr(const char *typename, char *features, 3019 Error **errp) 3020 { 3021 char *featurestr; /* Single 'key=value" string being parsed */ 3022 static bool cpu_globals_initialized; 3023 bool ambiguous = false; 3024 3025 if (cpu_globals_initialized) { 3026 return; 3027 } 3028 cpu_globals_initialized = true; 3029 3030 if (!features) { 3031 return; 3032 } 3033 3034 for (featurestr = strtok(features, ","); 3035 featurestr; 3036 featurestr = strtok(NULL, ",")) { 3037 const char *name; 3038 const char *val = NULL; 3039 char *eq = NULL; 3040 char num[32]; 3041 GlobalProperty *prop; 3042 3043 /* Compatibility syntax: */ 3044 if (featurestr[0] == '+') { 3045 plus_features = g_list_append(plus_features, 3046 g_strdup(featurestr + 1)); 3047 continue; 3048 } else if (featurestr[0] == '-') { 3049 minus_features = g_list_append(minus_features, 3050 g_strdup(featurestr + 1)); 3051 continue; 3052 } 3053 3054 eq = strchr(featurestr, '='); 3055 if (eq) { 3056 *eq++ = 0; 3057 val = eq; 3058 } else { 3059 val = "on"; 3060 } 3061 3062 feat2prop(featurestr); 3063 name = featurestr; 3064 3065 if (g_list_find_custom(plus_features, name, compare_string)) { 3066 warn_report("Ambiguous CPU model string. " 3067 "Don't mix both \"+%s\" and \"%s=%s\"", 3068 name, name, val); 3069 ambiguous = true; 3070 } 3071 if (g_list_find_custom(minus_features, name, compare_string)) { 3072 warn_report("Ambiguous CPU model string. " 3073 "Don't mix both \"-%s\" and \"%s=%s\"", 3074 name, name, val); 3075 ambiguous = true; 3076 } 3077 3078 /* Special case: */ 3079 if (!strcmp(name, "tsc-freq")) { 3080 int ret; 3081 uint64_t tsc_freq; 3082 3083 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 3084 if (ret < 0 || tsc_freq > INT64_MAX) { 3085 error_setg(errp, "bad numerical value %s", val); 3086 return; 3087 } 3088 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 3089 val = num; 3090 name = "tsc-frequency"; 3091 } 3092 3093 prop = g_new0(typeof(*prop), 1); 3094 prop->driver = typename; 3095 prop->property = g_strdup(name); 3096 prop->value = g_strdup(val); 3097 prop->errp = &error_fatal; 3098 qdev_prop_register_global(prop); 3099 } 3100 3101 if (ambiguous) { 3102 warn_report("Compatibility of ambiguous CPU model " 3103 "strings won't be kept on future QEMU versions"); 3104 } 3105 } 3106 3107 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 3108 static int x86_cpu_filter_features(X86CPU *cpu); 3109 3110 /* Check for missing features that may prevent the CPU class from 3111 * running using the current machine and accelerator. 3112 */ 3113 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 3114 strList **missing_feats) 3115 { 3116 X86CPU *xc; 3117 FeatureWord w; 3118 Error *err = NULL; 3119 strList **next = missing_feats; 3120 3121 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 3122 strList *new = g_new0(strList, 1); 3123 new->value = g_strdup("kvm"); 3124 *missing_feats = new; 3125 return; 3126 } 3127 3128 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 3129 3130 x86_cpu_expand_features(xc, &err); 3131 if (err) { 3132 /* Errors at x86_cpu_expand_features should never happen, 3133 * but in case it does, just report the model as not 3134 * runnable at all using the "type" property. 3135 */ 3136 strList *new = g_new0(strList, 1); 3137 new->value = g_strdup("type"); 3138 *next = new; 3139 next = &new->next; 3140 } 3141 3142 x86_cpu_filter_features(xc); 3143 3144 for (w = 0; w < FEATURE_WORDS; w++) { 3145 uint32_t filtered = xc->filtered_features[w]; 3146 int i; 3147 for (i = 0; i < 32; i++) { 3148 if (filtered & (1UL << i)) { 3149 strList *new = g_new0(strList, 1); 3150 new->value = g_strdup(x86_cpu_feature_name(w, i)); 3151 *next = new; 3152 next = &new->next; 3153 } 3154 } 3155 } 3156 3157 object_unref(OBJECT(xc)); 3158 } 3159 3160 /* Print all cpuid feature names in featureset 3161 */ 3162 static void listflags(FILE *f, fprintf_function print, const char **featureset) 3163 { 3164 int bit; 3165 bool first = true; 3166 3167 for (bit = 0; bit < 32; bit++) { 3168 if (featureset[bit]) { 3169 print(f, "%s%s", first ? "" : " ", featureset[bit]); 3170 first = false; 3171 } 3172 } 3173 } 3174 3175 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 3176 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 3177 { 3178 ObjectClass *class_a = (ObjectClass *)a; 3179 ObjectClass *class_b = (ObjectClass *)b; 3180 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 3181 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 3182 const char *name_a, *name_b; 3183 3184 if (cc_a->ordering != cc_b->ordering) { 3185 return cc_a->ordering - cc_b->ordering; 3186 } else { 3187 name_a = object_class_get_name(class_a); 3188 name_b = object_class_get_name(class_b); 3189 return strcmp(name_a, name_b); 3190 } 3191 } 3192 3193 static GSList *get_sorted_cpu_model_list(void) 3194 { 3195 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 3196 list = g_slist_sort(list, x86_cpu_list_compare); 3197 return list; 3198 } 3199 3200 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 3201 { 3202 ObjectClass *oc = data; 3203 X86CPUClass *cc = X86_CPU_CLASS(oc); 3204 CPUListState *s = user_data; 3205 char *name = x86_cpu_class_get_model_name(cc); 3206 const char *desc = cc->model_description; 3207 if (!desc && cc->cpu_def) { 3208 desc = cc->cpu_def->model_id; 3209 } 3210 3211 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n", 3212 name, desc); 3213 g_free(name); 3214 } 3215 3216 /* list available CPU models and flags */ 3217 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf) 3218 { 3219 int i; 3220 CPUListState s = { 3221 .file = f, 3222 .cpu_fprintf = cpu_fprintf, 3223 }; 3224 GSList *list; 3225 3226 (*cpu_fprintf)(f, "Available CPUs:\n"); 3227 list = get_sorted_cpu_model_list(); 3228 g_slist_foreach(list, x86_cpu_list_entry, &s); 3229 g_slist_free(list); 3230 3231 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n"); 3232 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 3233 FeatureWordInfo *fw = &feature_word_info[i]; 3234 3235 (*cpu_fprintf)(f, " "); 3236 listflags(f, cpu_fprintf, fw->feat_names); 3237 (*cpu_fprintf)(f, "\n"); 3238 } 3239 } 3240 3241 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 3242 { 3243 ObjectClass *oc = data; 3244 X86CPUClass *cc = X86_CPU_CLASS(oc); 3245 CpuDefinitionInfoList **cpu_list = user_data; 3246 CpuDefinitionInfoList *entry; 3247 CpuDefinitionInfo *info; 3248 3249 info = g_malloc0(sizeof(*info)); 3250 info->name = x86_cpu_class_get_model_name(cc); 3251 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 3252 info->has_unavailable_features = true; 3253 info->q_typename = g_strdup(object_class_get_name(oc)); 3254 info->migration_safe = cc->migration_safe; 3255 info->has_migration_safe = true; 3256 info->q_static = cc->static_model; 3257 3258 entry = g_malloc0(sizeof(*entry)); 3259 entry->value = info; 3260 entry->next = *cpu_list; 3261 *cpu_list = entry; 3262 } 3263 3264 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp) 3265 { 3266 CpuDefinitionInfoList *cpu_list = NULL; 3267 GSList *list = get_sorted_cpu_model_list(); 3268 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 3269 g_slist_free(list); 3270 return cpu_list; 3271 } 3272 3273 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 3274 bool migratable_only) 3275 { 3276 FeatureWordInfo *wi = &feature_word_info[w]; 3277 uint32_t r; 3278 3279 if (kvm_enabled()) { 3280 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax, 3281 wi->cpuid_ecx, 3282 wi->cpuid_reg); 3283 } else if (hvf_enabled()) { 3284 r = hvf_get_supported_cpuid(wi->cpuid_eax, 3285 wi->cpuid_ecx, 3286 wi->cpuid_reg); 3287 } else if (tcg_enabled()) { 3288 r = wi->tcg_features; 3289 } else { 3290 return ~0; 3291 } 3292 if (migratable_only) { 3293 r &= x86_cpu_get_migratable_flags(w); 3294 } 3295 return r; 3296 } 3297 3298 static void x86_cpu_report_filtered_features(X86CPU *cpu) 3299 { 3300 FeatureWord w; 3301 3302 for (w = 0; w < FEATURE_WORDS; w++) { 3303 report_unavailable_features(w, cpu->filtered_features[w]); 3304 } 3305 } 3306 3307 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 3308 { 3309 PropValue *pv; 3310 for (pv = props; pv->prop; pv++) { 3311 if (!pv->value) { 3312 continue; 3313 } 3314 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 3315 &error_abort); 3316 } 3317 } 3318 3319 /* Load data from X86CPUDefinition into a X86CPU object 3320 */ 3321 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp) 3322 { 3323 CPUX86State *env = &cpu->env; 3324 const char *vendor; 3325 char host_vendor[CPUID_VENDOR_SZ + 1]; 3326 FeatureWord w; 3327 3328 /*NOTE: any property set by this function should be returned by 3329 * x86_cpu_static_props(), so static expansion of 3330 * query-cpu-model-expansion is always complete. 3331 */ 3332 3333 /* CPU models only set _minimum_ values for level/xlevel: */ 3334 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp); 3335 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp); 3336 3337 object_property_set_int(OBJECT(cpu), def->family, "family", errp); 3338 object_property_set_int(OBJECT(cpu), def->model, "model", errp); 3339 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp); 3340 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); 3341 for (w = 0; w < FEATURE_WORDS; w++) { 3342 env->features[w] = def->features[w]; 3343 } 3344 3345 /* Store Cache information from the X86CPUDefinition if available */ 3346 env->cache_info = def->cache_info; 3347 cpu->legacy_cache = def->cache_info ? 0 : 1; 3348 3349 /* Special cases not set in the X86CPUDefinition structs: */ 3350 /* TODO: in-kernel irqchip for hvf */ 3351 if (kvm_enabled()) { 3352 if (!kvm_irqchip_in_kernel()) { 3353 x86_cpu_change_kvm_default("x2apic", "off"); 3354 } 3355 3356 x86_cpu_apply_props(cpu, kvm_default_props); 3357 } else if (tcg_enabled()) { 3358 x86_cpu_apply_props(cpu, tcg_default_props); 3359 } 3360 3361 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 3362 3363 /* sysenter isn't supported in compatibility mode on AMD, 3364 * syscall isn't supported in compatibility mode on Intel. 3365 * Normally we advertise the actual CPU vendor, but you can 3366 * override this using the 'vendor' property if you want to use 3367 * KVM's sysenter/syscall emulation in compatibility mode and 3368 * when doing cross vendor migration 3369 */ 3370 vendor = def->vendor; 3371 if (accel_uses_host_cpuid()) { 3372 uint32_t ebx = 0, ecx = 0, edx = 0; 3373 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 3374 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 3375 vendor = host_vendor; 3376 } 3377 3378 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp); 3379 3380 } 3381 3382 /* Return a QDict containing keys for all properties that can be included 3383 * in static expansion of CPU models. All properties set by x86_cpu_load_def() 3384 * must be included in the dictionary. 3385 */ 3386 static QDict *x86_cpu_static_props(void) 3387 { 3388 FeatureWord w; 3389 int i; 3390 static const char *props[] = { 3391 "min-level", 3392 "min-xlevel", 3393 "family", 3394 "model", 3395 "stepping", 3396 "model-id", 3397 "vendor", 3398 "lmce", 3399 NULL, 3400 }; 3401 static QDict *d; 3402 3403 if (d) { 3404 return d; 3405 } 3406 3407 d = qdict_new(); 3408 for (i = 0; props[i]; i++) { 3409 qdict_put_null(d, props[i]); 3410 } 3411 3412 for (w = 0; w < FEATURE_WORDS; w++) { 3413 FeatureWordInfo *fi = &feature_word_info[w]; 3414 int bit; 3415 for (bit = 0; bit < 32; bit++) { 3416 if (!fi->feat_names[bit]) { 3417 continue; 3418 } 3419 qdict_put_null(d, fi->feat_names[bit]); 3420 } 3421 } 3422 3423 return d; 3424 } 3425 3426 /* Add an entry to @props dict, with the value for property. */ 3427 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 3428 { 3429 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 3430 &error_abort); 3431 3432 qdict_put_obj(props, prop, value); 3433 } 3434 3435 /* Convert CPU model data from X86CPU object to a property dictionary 3436 * that can recreate exactly the same CPU model. 3437 */ 3438 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 3439 { 3440 QDict *sprops = x86_cpu_static_props(); 3441 const QDictEntry *e; 3442 3443 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 3444 const char *prop = qdict_entry_key(e); 3445 x86_cpu_expand_prop(cpu, props, prop); 3446 } 3447 } 3448 3449 /* Convert CPU model data from X86CPU object to a property dictionary 3450 * that can recreate exactly the same CPU model, including every 3451 * writeable QOM property. 3452 */ 3453 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 3454 { 3455 ObjectPropertyIterator iter; 3456 ObjectProperty *prop; 3457 3458 object_property_iter_init(&iter, OBJECT(cpu)); 3459 while ((prop = object_property_iter_next(&iter))) { 3460 /* skip read-only or write-only properties */ 3461 if (!prop->get || !prop->set) { 3462 continue; 3463 } 3464 3465 /* "hotplugged" is the only property that is configurable 3466 * on the command-line but will be set differently on CPUs 3467 * created using "-cpu ... -smp ..." and by CPUs created 3468 * on the fly by x86_cpu_from_model() for querying. Skip it. 3469 */ 3470 if (!strcmp(prop->name, "hotplugged")) { 3471 continue; 3472 } 3473 x86_cpu_expand_prop(cpu, props, prop->name); 3474 } 3475 } 3476 3477 static void object_apply_props(Object *obj, QDict *props, Error **errp) 3478 { 3479 const QDictEntry *prop; 3480 Error *err = NULL; 3481 3482 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 3483 object_property_set_qobject(obj, qdict_entry_value(prop), 3484 qdict_entry_key(prop), &err); 3485 if (err) { 3486 break; 3487 } 3488 } 3489 3490 error_propagate(errp, err); 3491 } 3492 3493 /* Create X86CPU object according to model+props specification */ 3494 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 3495 { 3496 X86CPU *xc = NULL; 3497 X86CPUClass *xcc; 3498 Error *err = NULL; 3499 3500 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 3501 if (xcc == NULL) { 3502 error_setg(&err, "CPU model '%s' not found", model); 3503 goto out; 3504 } 3505 3506 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 3507 if (props) { 3508 object_apply_props(OBJECT(xc), props, &err); 3509 if (err) { 3510 goto out; 3511 } 3512 } 3513 3514 x86_cpu_expand_features(xc, &err); 3515 if (err) { 3516 goto out; 3517 } 3518 3519 out: 3520 if (err) { 3521 error_propagate(errp, err); 3522 object_unref(OBJECT(xc)); 3523 xc = NULL; 3524 } 3525 return xc; 3526 } 3527 3528 CpuModelExpansionInfo * 3529 arch_query_cpu_model_expansion(CpuModelExpansionType type, 3530 CpuModelInfo *model, 3531 Error **errp) 3532 { 3533 X86CPU *xc = NULL; 3534 Error *err = NULL; 3535 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 3536 QDict *props = NULL; 3537 const char *base_name; 3538 3539 xc = x86_cpu_from_model(model->name, 3540 model->has_props ? 3541 qobject_to(QDict, model->props) : 3542 NULL, &err); 3543 if (err) { 3544 goto out; 3545 } 3546 3547 props = qdict_new(); 3548 3549 switch (type) { 3550 case CPU_MODEL_EXPANSION_TYPE_STATIC: 3551 /* Static expansion will be based on "base" only */ 3552 base_name = "base"; 3553 x86_cpu_to_dict(xc, props); 3554 break; 3555 case CPU_MODEL_EXPANSION_TYPE_FULL: 3556 /* As we don't return every single property, full expansion needs 3557 * to keep the original model name+props, and add extra 3558 * properties on top of that. 3559 */ 3560 base_name = model->name; 3561 x86_cpu_to_dict_full(xc, props); 3562 break; 3563 default: 3564 error_setg(&err, "Unsupportted expansion type"); 3565 goto out; 3566 } 3567 3568 if (!props) { 3569 props = qdict_new(); 3570 } 3571 x86_cpu_to_dict(xc, props); 3572 3573 ret->model = g_new0(CpuModelInfo, 1); 3574 ret->model->name = g_strdup(base_name); 3575 ret->model->props = QOBJECT(props); 3576 ret->model->has_props = true; 3577 3578 out: 3579 object_unref(OBJECT(xc)); 3580 if (err) { 3581 error_propagate(errp, err); 3582 qapi_free_CpuModelExpansionInfo(ret); 3583 ret = NULL; 3584 } 3585 return ret; 3586 } 3587 3588 static gchar *x86_gdb_arch_name(CPUState *cs) 3589 { 3590 #ifdef TARGET_X86_64 3591 return g_strdup("i386:x86-64"); 3592 #else 3593 return g_strdup("i386"); 3594 #endif 3595 } 3596 3597 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 3598 { 3599 X86CPUDefinition *cpudef = data; 3600 X86CPUClass *xcc = X86_CPU_CLASS(oc); 3601 3602 xcc->cpu_def = cpudef; 3603 xcc->migration_safe = true; 3604 } 3605 3606 static void x86_register_cpudef_type(X86CPUDefinition *def) 3607 { 3608 char *typename = x86_cpu_type_name(def->name); 3609 TypeInfo ti = { 3610 .name = typename, 3611 .parent = TYPE_X86_CPU, 3612 .class_init = x86_cpu_cpudef_class_init, 3613 .class_data = def, 3614 }; 3615 3616 /* AMD aliases are handled at runtime based on CPUID vendor, so 3617 * they shouldn't be set on the CPU model table. 3618 */ 3619 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 3620 /* catch mistakes instead of silently truncating model_id when too long */ 3621 assert(def->model_id && strlen(def->model_id) <= 48); 3622 3623 3624 type_register(&ti); 3625 g_free(typename); 3626 } 3627 3628 #if !defined(CONFIG_USER_ONLY) 3629 3630 void cpu_clear_apic_feature(CPUX86State *env) 3631 { 3632 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 3633 } 3634 3635 #endif /* !CONFIG_USER_ONLY */ 3636 3637 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 3638 uint32_t *eax, uint32_t *ebx, 3639 uint32_t *ecx, uint32_t *edx) 3640 { 3641 X86CPU *cpu = x86_env_get_cpu(env); 3642 CPUState *cs = CPU(cpu); 3643 uint32_t pkg_offset; 3644 uint32_t limit; 3645 uint32_t signature[3]; 3646 3647 /* Calculate & apply limits for different index ranges */ 3648 if (index >= 0xC0000000) { 3649 limit = env->cpuid_xlevel2; 3650 } else if (index >= 0x80000000) { 3651 limit = env->cpuid_xlevel; 3652 } else if (index >= 0x40000000) { 3653 limit = 0x40000001; 3654 } else { 3655 limit = env->cpuid_level; 3656 } 3657 3658 if (index > limit) { 3659 /* Intel documentation states that invalid EAX input will 3660 * return the same information as EAX=cpuid_level 3661 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 3662 */ 3663 index = env->cpuid_level; 3664 } 3665 3666 switch(index) { 3667 case 0: 3668 *eax = env->cpuid_level; 3669 *ebx = env->cpuid_vendor1; 3670 *edx = env->cpuid_vendor2; 3671 *ecx = env->cpuid_vendor3; 3672 break; 3673 case 1: 3674 *eax = env->cpuid_version; 3675 *ebx = (cpu->apic_id << 24) | 3676 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 3677 *ecx = env->features[FEAT_1_ECX]; 3678 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 3679 *ecx |= CPUID_EXT_OSXSAVE; 3680 } 3681 *edx = env->features[FEAT_1_EDX]; 3682 if (cs->nr_cores * cs->nr_threads > 1) { 3683 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 3684 *edx |= CPUID_HT; 3685 } 3686 break; 3687 case 2: 3688 /* cache info: needed for Pentium Pro compatibility */ 3689 if (cpu->cache_info_passthrough) { 3690 host_cpuid(index, 0, eax, ebx, ecx, edx); 3691 break; 3692 } 3693 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 3694 *ebx = 0; 3695 if (!cpu->enable_l3_cache) { 3696 *ecx = 0; 3697 } else { 3698 if (env->cache_info && !cpu->legacy_cache) { 3699 *ecx = cpuid2_cache_descriptor(&env->cache_info->l3_cache); 3700 } else { 3701 *ecx = cpuid2_cache_descriptor(&legacy_l3_cache); 3702 } 3703 } 3704 if (env->cache_info && !cpu->legacy_cache) { 3705 *edx = (cpuid2_cache_descriptor(&env->cache_info->l1d_cache) << 16) | 3706 (cpuid2_cache_descriptor(&env->cache_info->l1i_cache) << 8) | 3707 (cpuid2_cache_descriptor(&env->cache_info->l2_cache)); 3708 } else { 3709 *edx = (cpuid2_cache_descriptor(&legacy_l1d_cache) << 16) | 3710 (cpuid2_cache_descriptor(&legacy_l1i_cache) << 8) | 3711 (cpuid2_cache_descriptor(&legacy_l2_cache_cpuid2)); 3712 } 3713 break; 3714 case 4: 3715 /* cache info: needed for Core compatibility */ 3716 if (cpu->cache_info_passthrough) { 3717 host_cpuid(index, count, eax, ebx, ecx, edx); 3718 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 3719 *eax &= ~0xFC000000; 3720 if ((*eax & 31) && cs->nr_cores > 1) { 3721 *eax |= (cs->nr_cores - 1) << 26; 3722 } 3723 } else { 3724 *eax = 0; 3725 CPUCacheInfo *l1d, *l1i, *l2, *l3; 3726 if (env->cache_info && !cpu->legacy_cache) { 3727 l1d = &env->cache_info->l1d_cache; 3728 l1i = &env->cache_info->l1i_cache; 3729 l2 = &env->cache_info->l2_cache; 3730 l3 = &env->cache_info->l3_cache; 3731 } else { 3732 l1d = &legacy_l1d_cache; 3733 l1i = &legacy_l1i_cache; 3734 l2 = &legacy_l2_cache; 3735 l3 = &legacy_l3_cache; 3736 } 3737 switch (count) { 3738 case 0: /* L1 dcache info */ 3739 encode_cache_cpuid4(l1d, 1, cs->nr_cores, 3740 eax, ebx, ecx, edx); 3741 break; 3742 case 1: /* L1 icache info */ 3743 encode_cache_cpuid4(l1i, 1, cs->nr_cores, 3744 eax, ebx, ecx, edx); 3745 break; 3746 case 2: /* L2 cache info */ 3747 encode_cache_cpuid4(l2, cs->nr_threads, cs->nr_cores, 3748 eax, ebx, ecx, edx); 3749 break; 3750 case 3: /* L3 cache info */ 3751 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 3752 if (cpu->enable_l3_cache) { 3753 encode_cache_cpuid4(l3, (1 << pkg_offset), cs->nr_cores, 3754 eax, ebx, ecx, edx); 3755 break; 3756 } 3757 /* fall through */ 3758 default: /* end of info */ 3759 *eax = *ebx = *ecx = *edx = 0; 3760 break; 3761 } 3762 } 3763 break; 3764 case 5: 3765 /* mwait info: needed for Core compatibility */ 3766 *eax = 0; /* Smallest monitor-line size in bytes */ 3767 *ebx = 0; /* Largest monitor-line size in bytes */ 3768 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 3769 *edx = 0; 3770 break; 3771 case 6: 3772 /* Thermal and Power Leaf */ 3773 *eax = env->features[FEAT_6_EAX]; 3774 *ebx = 0; 3775 *ecx = 0; 3776 *edx = 0; 3777 break; 3778 case 7: 3779 /* Structured Extended Feature Flags Enumeration Leaf */ 3780 if (count == 0) { 3781 *eax = 0; /* Maximum ECX value for sub-leaves */ 3782 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 3783 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 3784 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 3785 *ecx |= CPUID_7_0_ECX_OSPKE; 3786 } 3787 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 3788 } else { 3789 *eax = 0; 3790 *ebx = 0; 3791 *ecx = 0; 3792 *edx = 0; 3793 } 3794 break; 3795 case 9: 3796 /* Direct Cache Access Information Leaf */ 3797 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 3798 *ebx = 0; 3799 *ecx = 0; 3800 *edx = 0; 3801 break; 3802 case 0xA: 3803 /* Architectural Performance Monitoring Leaf */ 3804 if (kvm_enabled() && cpu->enable_pmu) { 3805 KVMState *s = cs->kvm_state; 3806 3807 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 3808 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 3809 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 3810 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 3811 } else if (hvf_enabled() && cpu->enable_pmu) { 3812 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 3813 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 3814 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 3815 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 3816 } else { 3817 *eax = 0; 3818 *ebx = 0; 3819 *ecx = 0; 3820 *edx = 0; 3821 } 3822 break; 3823 case 0xB: 3824 /* Extended Topology Enumeration Leaf */ 3825 if (!cpu->enable_cpuid_0xb) { 3826 *eax = *ebx = *ecx = *edx = 0; 3827 break; 3828 } 3829 3830 *ecx = count & 0xff; 3831 *edx = cpu->apic_id; 3832 3833 switch (count) { 3834 case 0: 3835 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads); 3836 *ebx = cs->nr_threads; 3837 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 3838 break; 3839 case 1: 3840 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 3841 *ebx = cs->nr_cores * cs->nr_threads; 3842 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 3843 break; 3844 default: 3845 *eax = 0; 3846 *ebx = 0; 3847 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 3848 } 3849 3850 assert(!(*eax & ~0x1f)); 3851 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 3852 break; 3853 case 0xD: { 3854 /* Processor Extended State */ 3855 *eax = 0; 3856 *ebx = 0; 3857 *ecx = 0; 3858 *edx = 0; 3859 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 3860 break; 3861 } 3862 3863 if (count == 0) { 3864 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 3865 *eax = env->features[FEAT_XSAVE_COMP_LO]; 3866 *edx = env->features[FEAT_XSAVE_COMP_HI]; 3867 *ebx = *ecx; 3868 } else if (count == 1) { 3869 *eax = env->features[FEAT_XSAVE]; 3870 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 3871 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 3872 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 3873 *eax = esa->size; 3874 *ebx = esa->offset; 3875 } 3876 } 3877 break; 3878 } 3879 case 0x14: { 3880 /* Intel Processor Trace Enumeration */ 3881 *eax = 0; 3882 *ebx = 0; 3883 *ecx = 0; 3884 *edx = 0; 3885 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 3886 !kvm_enabled()) { 3887 break; 3888 } 3889 3890 if (count == 0) { 3891 *eax = INTEL_PT_MAX_SUBLEAF; 3892 *ebx = INTEL_PT_MINIMAL_EBX; 3893 *ecx = INTEL_PT_MINIMAL_ECX; 3894 } else if (count == 1) { 3895 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 3896 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 3897 } 3898 break; 3899 } 3900 case 0x40000000: 3901 /* 3902 * CPUID code in kvm_arch_init_vcpu() ignores stuff 3903 * set here, but we restrict to TCG none the less. 3904 */ 3905 if (tcg_enabled() && cpu->expose_tcg) { 3906 memcpy(signature, "TCGTCGTCGTCG", 12); 3907 *eax = 0x40000001; 3908 *ebx = signature[0]; 3909 *ecx = signature[1]; 3910 *edx = signature[2]; 3911 } else { 3912 *eax = 0; 3913 *ebx = 0; 3914 *ecx = 0; 3915 *edx = 0; 3916 } 3917 break; 3918 case 0x40000001: 3919 *eax = 0; 3920 *ebx = 0; 3921 *ecx = 0; 3922 *edx = 0; 3923 break; 3924 case 0x80000000: 3925 *eax = env->cpuid_xlevel; 3926 *ebx = env->cpuid_vendor1; 3927 *edx = env->cpuid_vendor2; 3928 *ecx = env->cpuid_vendor3; 3929 break; 3930 case 0x80000001: 3931 *eax = env->cpuid_version; 3932 *ebx = 0; 3933 *ecx = env->features[FEAT_8000_0001_ECX]; 3934 *edx = env->features[FEAT_8000_0001_EDX]; 3935 3936 /* The Linux kernel checks for the CMPLegacy bit and 3937 * discards multiple thread information if it is set. 3938 * So don't set it here for Intel to make Linux guests happy. 3939 */ 3940 if (cs->nr_cores * cs->nr_threads > 1) { 3941 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 3942 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 3943 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 3944 *ecx |= 1 << 1; /* CmpLegacy bit */ 3945 } 3946 } 3947 break; 3948 case 0x80000002: 3949 case 0x80000003: 3950 case 0x80000004: 3951 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 3952 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 3953 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 3954 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 3955 break; 3956 case 0x80000005: 3957 /* cache info (L1 cache) */ 3958 if (cpu->cache_info_passthrough) { 3959 host_cpuid(index, 0, eax, ebx, ecx, edx); 3960 break; 3961 } 3962 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ 3963 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 3964 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ 3965 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 3966 if (env->cache_info && !cpu->legacy_cache) { 3967 *ecx = encode_cache_cpuid80000005(&env->cache_info->l1d_cache); 3968 *edx = encode_cache_cpuid80000005(&env->cache_info->l1i_cache); 3969 } else { 3970 *ecx = encode_cache_cpuid80000005(&legacy_l1d_cache_amd); 3971 *edx = encode_cache_cpuid80000005(&legacy_l1i_cache_amd); 3972 } 3973 break; 3974 case 0x80000006: 3975 /* cache info (L2 cache) */ 3976 if (cpu->cache_info_passthrough) { 3977 host_cpuid(index, 0, eax, ebx, ecx, edx); 3978 break; 3979 } 3980 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ 3981 (L2_DTLB_2M_ENTRIES << 16) | \ 3982 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ 3983 (L2_ITLB_2M_ENTRIES); 3984 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ 3985 (L2_DTLB_4K_ENTRIES << 16) | \ 3986 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ 3987 (L2_ITLB_4K_ENTRIES); 3988 if (env->cache_info && !cpu->legacy_cache) { 3989 encode_cache_cpuid80000006(&env->cache_info->l2_cache, 3990 cpu->enable_l3_cache ? 3991 &env->cache_info->l3_cache : NULL, 3992 ecx, edx); 3993 } else { 3994 encode_cache_cpuid80000006(&legacy_l2_cache_amd, 3995 cpu->enable_l3_cache ? 3996 &legacy_l3_cache : NULL, 3997 ecx, edx); 3998 } 3999 break; 4000 case 0x80000007: 4001 *eax = 0; 4002 *ebx = 0; 4003 *ecx = 0; 4004 *edx = env->features[FEAT_8000_0007_EDX]; 4005 break; 4006 case 0x80000008: 4007 /* virtual & phys address size in low 2 bytes. */ 4008 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 4009 /* 64 bit processor */ 4010 *eax = cpu->phys_bits; /* configurable physical bits */ 4011 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 4012 *eax |= 0x00003900; /* 57 bits virtual */ 4013 } else { 4014 *eax |= 0x00003000; /* 48 bits virtual */ 4015 } 4016 } else { 4017 *eax = cpu->phys_bits; 4018 } 4019 *ebx = env->features[FEAT_8000_0008_EBX]; 4020 *ecx = 0; 4021 *edx = 0; 4022 if (cs->nr_cores * cs->nr_threads > 1) { 4023 *ecx |= (cs->nr_cores * cs->nr_threads) - 1; 4024 } 4025 break; 4026 case 0x8000000A: 4027 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 4028 *eax = 0x00000001; /* SVM Revision */ 4029 *ebx = 0x00000010; /* nr of ASIDs */ 4030 *ecx = 0; 4031 *edx = env->features[FEAT_SVM]; /* optional features */ 4032 } else { 4033 *eax = 0; 4034 *ebx = 0; 4035 *ecx = 0; 4036 *edx = 0; 4037 } 4038 break; 4039 case 0xC0000000: 4040 *eax = env->cpuid_xlevel2; 4041 *ebx = 0; 4042 *ecx = 0; 4043 *edx = 0; 4044 break; 4045 case 0xC0000001: 4046 /* Support for VIA CPU's CPUID instruction */ 4047 *eax = env->cpuid_version; 4048 *ebx = 0; 4049 *ecx = 0; 4050 *edx = env->features[FEAT_C000_0001_EDX]; 4051 break; 4052 case 0xC0000002: 4053 case 0xC0000003: 4054 case 0xC0000004: 4055 /* Reserved for the future, and now filled with zero */ 4056 *eax = 0; 4057 *ebx = 0; 4058 *ecx = 0; 4059 *edx = 0; 4060 break; 4061 case 0x8000001F: 4062 *eax = sev_enabled() ? 0x2 : 0; 4063 *ebx = sev_get_cbit_position(); 4064 *ebx |= sev_get_reduced_phys_bits() << 6; 4065 *ecx = 0; 4066 *edx = 0; 4067 break; 4068 default: 4069 /* reserved values: zero */ 4070 *eax = 0; 4071 *ebx = 0; 4072 *ecx = 0; 4073 *edx = 0; 4074 break; 4075 } 4076 } 4077 4078 /* CPUClass::reset() */ 4079 static void x86_cpu_reset(CPUState *s) 4080 { 4081 X86CPU *cpu = X86_CPU(s); 4082 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 4083 CPUX86State *env = &cpu->env; 4084 target_ulong cr4; 4085 uint64_t xcr0; 4086 int i; 4087 4088 xcc->parent_reset(s); 4089 4090 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 4091 4092 env->old_exception = -1; 4093 4094 /* init to reset state */ 4095 4096 env->hflags2 |= HF2_GIF_MASK; 4097 4098 cpu_x86_update_cr0(env, 0x60000010); 4099 env->a20_mask = ~0x0; 4100 env->smbase = 0x30000; 4101 env->msr_smi_count = 0; 4102 4103 env->idt.limit = 0xffff; 4104 env->gdt.limit = 0xffff; 4105 env->ldt.limit = 0xffff; 4106 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 4107 env->tr.limit = 0xffff; 4108 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 4109 4110 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 4111 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 4112 DESC_R_MASK | DESC_A_MASK); 4113 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 4114 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4115 DESC_A_MASK); 4116 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 4117 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4118 DESC_A_MASK); 4119 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 4120 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4121 DESC_A_MASK); 4122 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 4123 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4124 DESC_A_MASK); 4125 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 4126 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4127 DESC_A_MASK); 4128 4129 env->eip = 0xfff0; 4130 env->regs[R_EDX] = env->cpuid_version; 4131 4132 env->eflags = 0x2; 4133 4134 /* FPU init */ 4135 for (i = 0; i < 8; i++) { 4136 env->fptags[i] = 1; 4137 } 4138 cpu_set_fpuc(env, 0x37f); 4139 4140 env->mxcsr = 0x1f80; 4141 /* All units are in INIT state. */ 4142 env->xstate_bv = 0; 4143 4144 env->pat = 0x0007040600070406ULL; 4145 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 4146 4147 memset(env->dr, 0, sizeof(env->dr)); 4148 env->dr[6] = DR6_FIXED_1; 4149 env->dr[7] = DR7_FIXED_1; 4150 cpu_breakpoint_remove_all(s, BP_CPU); 4151 cpu_watchpoint_remove_all(s, BP_CPU); 4152 4153 cr4 = 0; 4154 xcr0 = XSTATE_FP_MASK; 4155 4156 #ifdef CONFIG_USER_ONLY 4157 /* Enable all the features for user-mode. */ 4158 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 4159 xcr0 |= XSTATE_SSE_MASK; 4160 } 4161 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 4162 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 4163 if (env->features[esa->feature] & esa->bits) { 4164 xcr0 |= 1ull << i; 4165 } 4166 } 4167 4168 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 4169 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 4170 } 4171 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 4172 cr4 |= CR4_FSGSBASE_MASK; 4173 } 4174 #endif 4175 4176 env->xcr0 = xcr0; 4177 cpu_x86_update_cr4(env, cr4); 4178 4179 /* 4180 * SDM 11.11.5 requires: 4181 * - IA32_MTRR_DEF_TYPE MSR.E = 0 4182 * - IA32_MTRR_PHYSMASKn.V = 0 4183 * All other bits are undefined. For simplification, zero it all. 4184 */ 4185 env->mtrr_deftype = 0; 4186 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 4187 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 4188 4189 env->interrupt_injected = -1; 4190 env->exception_injected = -1; 4191 env->nmi_injected = false; 4192 #if !defined(CONFIG_USER_ONLY) 4193 /* We hard-wire the BSP to the first CPU. */ 4194 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 4195 4196 s->halted = !cpu_is_bsp(cpu); 4197 4198 if (kvm_enabled()) { 4199 kvm_arch_reset_vcpu(cpu); 4200 } 4201 else if (hvf_enabled()) { 4202 hvf_reset_vcpu(s); 4203 } 4204 #endif 4205 } 4206 4207 #ifndef CONFIG_USER_ONLY 4208 bool cpu_is_bsp(X86CPU *cpu) 4209 { 4210 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 4211 } 4212 4213 /* TODO: remove me, when reset over QOM tree is implemented */ 4214 static void x86_cpu_machine_reset_cb(void *opaque) 4215 { 4216 X86CPU *cpu = opaque; 4217 cpu_reset(CPU(cpu)); 4218 } 4219 #endif 4220 4221 static void mce_init(X86CPU *cpu) 4222 { 4223 CPUX86State *cenv = &cpu->env; 4224 unsigned int bank; 4225 4226 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 4227 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 4228 (CPUID_MCE | CPUID_MCA)) { 4229 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 4230 (cpu->enable_lmce ? MCG_LMCE_P : 0); 4231 cenv->mcg_ctl = ~(uint64_t)0; 4232 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 4233 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 4234 } 4235 } 4236 } 4237 4238 #ifndef CONFIG_USER_ONLY 4239 APICCommonClass *apic_get_class(void) 4240 { 4241 const char *apic_type = "apic"; 4242 4243 /* TODO: in-kernel irqchip for hvf */ 4244 if (kvm_apic_in_kernel()) { 4245 apic_type = "kvm-apic"; 4246 } else if (xen_enabled()) { 4247 apic_type = "xen-apic"; 4248 } 4249 4250 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 4251 } 4252 4253 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 4254 { 4255 APICCommonState *apic; 4256 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 4257 4258 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class))); 4259 4260 object_property_add_child(OBJECT(cpu), "lapic", 4261 OBJECT(cpu->apic_state), &error_abort); 4262 object_unref(OBJECT(cpu->apic_state)); 4263 4264 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 4265 /* TODO: convert to link<> */ 4266 apic = APIC_COMMON(cpu->apic_state); 4267 apic->cpu = cpu; 4268 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 4269 } 4270 4271 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4272 { 4273 APICCommonState *apic; 4274 static bool apic_mmio_map_once; 4275 4276 if (cpu->apic_state == NULL) { 4277 return; 4278 } 4279 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized", 4280 errp); 4281 4282 /* Map APIC MMIO area */ 4283 apic = APIC_COMMON(cpu->apic_state); 4284 if (!apic_mmio_map_once) { 4285 memory_region_add_subregion_overlap(get_system_memory(), 4286 apic->apicbase & 4287 MSR_IA32_APICBASE_BASE, 4288 &apic->io_memory, 4289 0x1000); 4290 apic_mmio_map_once = true; 4291 } 4292 } 4293 4294 static void x86_cpu_machine_done(Notifier *n, void *unused) 4295 { 4296 X86CPU *cpu = container_of(n, X86CPU, machine_done); 4297 MemoryRegion *smram = 4298 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 4299 4300 if (smram) { 4301 cpu->smram = g_new(MemoryRegion, 1); 4302 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 4303 smram, 0, 1ull << 32); 4304 memory_region_set_enabled(cpu->smram, true); 4305 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 4306 } 4307 } 4308 #else 4309 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4310 { 4311 } 4312 #endif 4313 4314 /* Note: Only safe for use on x86(-64) hosts */ 4315 static uint32_t x86_host_phys_bits(void) 4316 { 4317 uint32_t eax; 4318 uint32_t host_phys_bits; 4319 4320 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 4321 if (eax >= 0x80000008) { 4322 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 4323 /* Note: According to AMD doc 25481 rev 2.34 they have a field 4324 * at 23:16 that can specify a maximum physical address bits for 4325 * the guest that can override this value; but I've not seen 4326 * anything with that set. 4327 */ 4328 host_phys_bits = eax & 0xff; 4329 } else { 4330 /* It's an odd 64 bit machine that doesn't have the leaf for 4331 * physical address bits; fall back to 36 that's most older 4332 * Intel. 4333 */ 4334 host_phys_bits = 36; 4335 } 4336 4337 return host_phys_bits; 4338 } 4339 4340 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 4341 { 4342 if (*min < value) { 4343 *min = value; 4344 } 4345 } 4346 4347 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 4348 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 4349 { 4350 CPUX86State *env = &cpu->env; 4351 FeatureWordInfo *fi = &feature_word_info[w]; 4352 uint32_t eax = fi->cpuid_eax; 4353 uint32_t region = eax & 0xF0000000; 4354 4355 if (!env->features[w]) { 4356 return; 4357 } 4358 4359 switch (region) { 4360 case 0x00000000: 4361 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 4362 break; 4363 case 0x80000000: 4364 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 4365 break; 4366 case 0xC0000000: 4367 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 4368 break; 4369 } 4370 } 4371 4372 /* Calculate XSAVE components based on the configured CPU feature flags */ 4373 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 4374 { 4375 CPUX86State *env = &cpu->env; 4376 int i; 4377 uint64_t mask; 4378 4379 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 4380 return; 4381 } 4382 4383 mask = 0; 4384 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 4385 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 4386 if (env->features[esa->feature] & esa->bits) { 4387 mask |= (1ULL << i); 4388 } 4389 } 4390 4391 env->features[FEAT_XSAVE_COMP_LO] = mask; 4392 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 4393 } 4394 4395 /***** Steps involved on loading and filtering CPUID data 4396 * 4397 * When initializing and realizing a CPU object, the steps 4398 * involved in setting up CPUID data are: 4399 * 4400 * 1) Loading CPU model definition (X86CPUDefinition). This is 4401 * implemented by x86_cpu_load_def() and should be completely 4402 * transparent, as it is done automatically by instance_init. 4403 * No code should need to look at X86CPUDefinition structs 4404 * outside instance_init. 4405 * 4406 * 2) CPU expansion. This is done by realize before CPUID 4407 * filtering, and will make sure host/accelerator data is 4408 * loaded for CPU models that depend on host capabilities 4409 * (e.g. "host"). Done by x86_cpu_expand_features(). 4410 * 4411 * 3) CPUID filtering. This initializes extra data related to 4412 * CPUID, and checks if the host supports all capabilities 4413 * required by the CPU. Runnability of a CPU model is 4414 * determined at this step. Done by x86_cpu_filter_features(). 4415 * 4416 * Some operations don't require all steps to be performed. 4417 * More precisely: 4418 * 4419 * - CPU instance creation (instance_init) will run only CPU 4420 * model loading. CPU expansion can't run at instance_init-time 4421 * because host/accelerator data may be not available yet. 4422 * - CPU realization will perform both CPU model expansion and CPUID 4423 * filtering, and return an error in case one of them fails. 4424 * - query-cpu-definitions needs to run all 3 steps. It needs 4425 * to run CPUID filtering, as the 'unavailable-features' 4426 * field is set based on the filtering results. 4427 * - The query-cpu-model-expansion QMP command only needs to run 4428 * CPU model loading and CPU expansion. It should not filter 4429 * any CPUID data based on host capabilities. 4430 */ 4431 4432 /* Expand CPU configuration data, based on configured features 4433 * and host/accelerator capabilities when appropriate. 4434 */ 4435 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 4436 { 4437 CPUX86State *env = &cpu->env; 4438 FeatureWord w; 4439 GList *l; 4440 Error *local_err = NULL; 4441 4442 /*TODO: Now cpu->max_features doesn't overwrite features 4443 * set using QOM properties, and we can convert 4444 * plus_features & minus_features to global properties 4445 * inside x86_cpu_parse_featurestr() too. 4446 */ 4447 if (cpu->max_features) { 4448 for (w = 0; w < FEATURE_WORDS; w++) { 4449 /* Override only features that weren't set explicitly 4450 * by the user. 4451 */ 4452 env->features[w] |= 4453 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 4454 ~env->user_features[w] & \ 4455 ~feature_word_info[w].no_autoenable_flags; 4456 } 4457 } 4458 4459 for (l = plus_features; l; l = l->next) { 4460 const char *prop = l->data; 4461 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 4462 if (local_err) { 4463 goto out; 4464 } 4465 } 4466 4467 for (l = minus_features; l; l = l->next) { 4468 const char *prop = l->data; 4469 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 4470 if (local_err) { 4471 goto out; 4472 } 4473 } 4474 4475 if (!kvm_enabled() || !cpu->expose_kvm) { 4476 env->features[FEAT_KVM] = 0; 4477 } 4478 4479 x86_cpu_enable_xsave_components(cpu); 4480 4481 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 4482 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 4483 if (cpu->full_cpuid_auto_level) { 4484 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 4485 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 4486 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 4487 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 4488 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 4489 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 4490 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 4491 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 4492 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 4493 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 4494 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 4495 /* SVM requires CPUID[0x8000000A] */ 4496 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 4497 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 4498 } 4499 4500 /* SEV requires CPUID[0x8000001F] */ 4501 if (sev_enabled()) { 4502 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 4503 } 4504 } 4505 4506 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 4507 if (env->cpuid_level == UINT32_MAX) { 4508 env->cpuid_level = env->cpuid_min_level; 4509 } 4510 if (env->cpuid_xlevel == UINT32_MAX) { 4511 env->cpuid_xlevel = env->cpuid_min_xlevel; 4512 } 4513 if (env->cpuid_xlevel2 == UINT32_MAX) { 4514 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 4515 } 4516 4517 out: 4518 if (local_err != NULL) { 4519 error_propagate(errp, local_err); 4520 } 4521 } 4522 4523 /* 4524 * Finishes initialization of CPUID data, filters CPU feature 4525 * words based on host availability of each feature. 4526 * 4527 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 4528 */ 4529 static int x86_cpu_filter_features(X86CPU *cpu) 4530 { 4531 CPUX86State *env = &cpu->env; 4532 FeatureWord w; 4533 int rv = 0; 4534 4535 for (w = 0; w < FEATURE_WORDS; w++) { 4536 uint32_t host_feat = 4537 x86_cpu_get_supported_feature_word(w, false); 4538 uint32_t requested_features = env->features[w]; 4539 env->features[w] &= host_feat; 4540 cpu->filtered_features[w] = requested_features & ~env->features[w]; 4541 if (cpu->filtered_features[w]) { 4542 rv = 1; 4543 } 4544 } 4545 4546 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 4547 kvm_enabled()) { 4548 KVMState *s = CPU(cpu)->kvm_state; 4549 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 4550 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 4551 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 4552 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 4553 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 4554 4555 if (!eax_0 || 4556 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 4557 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 4558 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 4559 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 4560 INTEL_PT_ADDR_RANGES_NUM) || 4561 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 4562 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 4563 (ecx_0 & INTEL_PT_IP_LIP)) { 4564 /* 4565 * Processor Trace capabilities aren't configurable, so if the 4566 * host can't emulate the capabilities we report on 4567 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 4568 */ 4569 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT; 4570 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT; 4571 rv = 1; 4572 } 4573 } 4574 4575 return rv; 4576 } 4577 4578 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ 4579 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ 4580 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) 4581 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ 4582 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ 4583 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) 4584 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 4585 { 4586 CPUState *cs = CPU(dev); 4587 X86CPU *cpu = X86_CPU(dev); 4588 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 4589 CPUX86State *env = &cpu->env; 4590 Error *local_err = NULL; 4591 static bool ht_warned; 4592 4593 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 4594 char *name = x86_cpu_class_get_model_name(xcc); 4595 error_setg(&local_err, "CPU model '%s' requires KVM", name); 4596 g_free(name); 4597 goto out; 4598 } 4599 4600 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 4601 error_setg(errp, "apic-id property was not initialized properly"); 4602 return; 4603 } 4604 4605 x86_cpu_expand_features(cpu, &local_err); 4606 if (local_err) { 4607 goto out; 4608 } 4609 4610 if (x86_cpu_filter_features(cpu) && 4611 (cpu->check_cpuid || cpu->enforce_cpuid)) { 4612 x86_cpu_report_filtered_features(cpu); 4613 if (cpu->enforce_cpuid) { 4614 error_setg(&local_err, 4615 accel_uses_host_cpuid() ? 4616 "Host doesn't support requested features" : 4617 "TCG doesn't support requested features"); 4618 goto out; 4619 } 4620 } 4621 4622 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 4623 * CPUID[1].EDX. 4624 */ 4625 if (IS_AMD_CPU(env)) { 4626 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 4627 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 4628 & CPUID_EXT2_AMD_ALIASES); 4629 } 4630 4631 /* For 64bit systems think about the number of physical bits to present. 4632 * ideally this should be the same as the host; anything other than matching 4633 * the host can cause incorrect guest behaviour. 4634 * QEMU used to pick the magic value of 40 bits that corresponds to 4635 * consumer AMD devices but nothing else. 4636 */ 4637 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 4638 if (accel_uses_host_cpuid()) { 4639 uint32_t host_phys_bits = x86_host_phys_bits(); 4640 static bool warned; 4641 4642 if (cpu->host_phys_bits) { 4643 /* The user asked for us to use the host physical bits */ 4644 cpu->phys_bits = host_phys_bits; 4645 } 4646 4647 /* Print a warning if the user set it to a value that's not the 4648 * host value. 4649 */ 4650 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 4651 !warned) { 4652 warn_report("Host physical bits (%u)" 4653 " does not match phys-bits property (%u)", 4654 host_phys_bits, cpu->phys_bits); 4655 warned = true; 4656 } 4657 4658 if (cpu->phys_bits && 4659 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 4660 cpu->phys_bits < 32)) { 4661 error_setg(errp, "phys-bits should be between 32 and %u " 4662 " (but is %u)", 4663 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 4664 return; 4665 } 4666 } else { 4667 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 4668 error_setg(errp, "TCG only supports phys-bits=%u", 4669 TCG_PHYS_ADDR_BITS); 4670 return; 4671 } 4672 } 4673 /* 0 means it was not explicitly set by the user (or by machine 4674 * compat_props or by the host code above). In this case, the default 4675 * is the value used by TCG (40). 4676 */ 4677 if (cpu->phys_bits == 0) { 4678 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 4679 } 4680 } else { 4681 /* For 32 bit systems don't use the user set value, but keep 4682 * phys_bits consistent with what we tell the guest. 4683 */ 4684 if (cpu->phys_bits != 0) { 4685 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 4686 return; 4687 } 4688 4689 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 4690 cpu->phys_bits = 36; 4691 } else { 4692 cpu->phys_bits = 32; 4693 } 4694 } 4695 cpu_exec_realizefn(cs, &local_err); 4696 if (local_err != NULL) { 4697 error_propagate(errp, local_err); 4698 return; 4699 } 4700 4701 #ifndef CONFIG_USER_ONLY 4702 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 4703 4704 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) { 4705 x86_cpu_apic_create(cpu, &local_err); 4706 if (local_err != NULL) { 4707 goto out; 4708 } 4709 } 4710 #endif 4711 4712 mce_init(cpu); 4713 4714 #ifndef CONFIG_USER_ONLY 4715 if (tcg_enabled()) { 4716 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 4717 cpu->cpu_as_root = g_new(MemoryRegion, 1); 4718 4719 /* Outer container... */ 4720 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 4721 memory_region_set_enabled(cpu->cpu_as_root, true); 4722 4723 /* ... with two regions inside: normal system memory with low 4724 * priority, and... 4725 */ 4726 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 4727 get_system_memory(), 0, ~0ull); 4728 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 4729 memory_region_set_enabled(cpu->cpu_as_mem, true); 4730 4731 cs->num_ases = 2; 4732 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 4733 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 4734 4735 /* ... SMRAM with higher priority, linked from /machine/smram. */ 4736 cpu->machine_done.notify = x86_cpu_machine_done; 4737 qemu_add_machine_init_done_notifier(&cpu->machine_done); 4738 } 4739 #endif 4740 4741 qemu_init_vcpu(cs); 4742 4743 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this 4744 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 4745 * based on inputs (sockets,cores,threads), it is still better to gives 4746 * users a warning. 4747 * 4748 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 4749 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 4750 */ 4751 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) { 4752 error_report("AMD CPU doesn't support hyperthreading. Please configure" 4753 " -smp options properly."); 4754 ht_warned = true; 4755 } 4756 4757 x86_cpu_apic_realize(cpu, &local_err); 4758 if (local_err != NULL) { 4759 goto out; 4760 } 4761 cpu_reset(cs); 4762 4763 xcc->parent_realize(dev, &local_err); 4764 4765 out: 4766 if (local_err != NULL) { 4767 error_propagate(errp, local_err); 4768 return; 4769 } 4770 } 4771 4772 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) 4773 { 4774 X86CPU *cpu = X86_CPU(dev); 4775 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 4776 Error *local_err = NULL; 4777 4778 #ifndef CONFIG_USER_ONLY 4779 cpu_remove_sync(CPU(dev)); 4780 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 4781 #endif 4782 4783 if (cpu->apic_state) { 4784 object_unparent(OBJECT(cpu->apic_state)); 4785 cpu->apic_state = NULL; 4786 } 4787 4788 xcc->parent_unrealize(dev, &local_err); 4789 if (local_err != NULL) { 4790 error_propagate(errp, local_err); 4791 return; 4792 } 4793 } 4794 4795 typedef struct BitProperty { 4796 FeatureWord w; 4797 uint32_t mask; 4798 } BitProperty; 4799 4800 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 4801 void *opaque, Error **errp) 4802 { 4803 X86CPU *cpu = X86_CPU(obj); 4804 BitProperty *fp = opaque; 4805 uint32_t f = cpu->env.features[fp->w]; 4806 bool value = (f & fp->mask) == fp->mask; 4807 visit_type_bool(v, name, &value, errp); 4808 } 4809 4810 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 4811 void *opaque, Error **errp) 4812 { 4813 DeviceState *dev = DEVICE(obj); 4814 X86CPU *cpu = X86_CPU(obj); 4815 BitProperty *fp = opaque; 4816 Error *local_err = NULL; 4817 bool value; 4818 4819 if (dev->realized) { 4820 qdev_prop_set_after_realize(dev, name, errp); 4821 return; 4822 } 4823 4824 visit_type_bool(v, name, &value, &local_err); 4825 if (local_err) { 4826 error_propagate(errp, local_err); 4827 return; 4828 } 4829 4830 if (value) { 4831 cpu->env.features[fp->w] |= fp->mask; 4832 } else { 4833 cpu->env.features[fp->w] &= ~fp->mask; 4834 } 4835 cpu->env.user_features[fp->w] |= fp->mask; 4836 } 4837 4838 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 4839 void *opaque) 4840 { 4841 BitProperty *prop = opaque; 4842 g_free(prop); 4843 } 4844 4845 /* Register a boolean property to get/set a single bit in a uint32_t field. 4846 * 4847 * The same property name can be registered multiple times to make it affect 4848 * multiple bits in the same FeatureWord. In that case, the getter will return 4849 * true only if all bits are set. 4850 */ 4851 static void x86_cpu_register_bit_prop(X86CPU *cpu, 4852 const char *prop_name, 4853 FeatureWord w, 4854 int bitnr) 4855 { 4856 BitProperty *fp; 4857 ObjectProperty *op; 4858 uint32_t mask = (1UL << bitnr); 4859 4860 op = object_property_find(OBJECT(cpu), prop_name, NULL); 4861 if (op) { 4862 fp = op->opaque; 4863 assert(fp->w == w); 4864 fp->mask |= mask; 4865 } else { 4866 fp = g_new0(BitProperty, 1); 4867 fp->w = w; 4868 fp->mask = mask; 4869 object_property_add(OBJECT(cpu), prop_name, "bool", 4870 x86_cpu_get_bit_prop, 4871 x86_cpu_set_bit_prop, 4872 x86_cpu_release_bit_prop, fp, &error_abort); 4873 } 4874 } 4875 4876 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 4877 FeatureWord w, 4878 int bitnr) 4879 { 4880 FeatureWordInfo *fi = &feature_word_info[w]; 4881 const char *name = fi->feat_names[bitnr]; 4882 4883 if (!name) { 4884 return; 4885 } 4886 4887 /* Property names should use "-" instead of "_". 4888 * Old names containing underscores are registered as aliases 4889 * using object_property_add_alias() 4890 */ 4891 assert(!strchr(name, '_')); 4892 /* aliases don't use "|" delimiters anymore, they are registered 4893 * manually using object_property_add_alias() */ 4894 assert(!strchr(name, '|')); 4895 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 4896 } 4897 4898 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 4899 { 4900 X86CPU *cpu = X86_CPU(cs); 4901 CPUX86State *env = &cpu->env; 4902 GuestPanicInformation *panic_info = NULL; 4903 4904 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 4905 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 4906 4907 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 4908 4909 assert(HV_CRASH_PARAMS >= 5); 4910 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 4911 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 4912 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 4913 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 4914 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 4915 } 4916 4917 return panic_info; 4918 } 4919 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 4920 const char *name, void *opaque, 4921 Error **errp) 4922 { 4923 CPUState *cs = CPU(obj); 4924 GuestPanicInformation *panic_info; 4925 4926 if (!cs->crash_occurred) { 4927 error_setg(errp, "No crash occured"); 4928 return; 4929 } 4930 4931 panic_info = x86_cpu_get_crash_info(cs); 4932 if (panic_info == NULL) { 4933 error_setg(errp, "No crash information"); 4934 return; 4935 } 4936 4937 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 4938 errp); 4939 qapi_free_GuestPanicInformation(panic_info); 4940 } 4941 4942 static void x86_cpu_initfn(Object *obj) 4943 { 4944 CPUState *cs = CPU(obj); 4945 X86CPU *cpu = X86_CPU(obj); 4946 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 4947 CPUX86State *env = &cpu->env; 4948 FeatureWord w; 4949 4950 cs->env_ptr = env; 4951 4952 object_property_add(obj, "family", "int", 4953 x86_cpuid_version_get_family, 4954 x86_cpuid_version_set_family, NULL, NULL, NULL); 4955 object_property_add(obj, "model", "int", 4956 x86_cpuid_version_get_model, 4957 x86_cpuid_version_set_model, NULL, NULL, NULL); 4958 object_property_add(obj, "stepping", "int", 4959 x86_cpuid_version_get_stepping, 4960 x86_cpuid_version_set_stepping, NULL, NULL, NULL); 4961 object_property_add_str(obj, "vendor", 4962 x86_cpuid_get_vendor, 4963 x86_cpuid_set_vendor, NULL); 4964 object_property_add_str(obj, "model-id", 4965 x86_cpuid_get_model_id, 4966 x86_cpuid_set_model_id, NULL); 4967 object_property_add(obj, "tsc-frequency", "int", 4968 x86_cpuid_get_tsc_freq, 4969 x86_cpuid_set_tsc_freq, NULL, NULL, NULL); 4970 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 4971 x86_cpu_get_feature_words, 4972 NULL, NULL, (void *)env->features, NULL); 4973 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 4974 x86_cpu_get_feature_words, 4975 NULL, NULL, (void *)cpu->filtered_features, NULL); 4976 4977 object_property_add(obj, "crash-information", "GuestPanicInformation", 4978 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); 4979 4980 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY; 4981 4982 for (w = 0; w < FEATURE_WORDS; w++) { 4983 int bitnr; 4984 4985 for (bitnr = 0; bitnr < 32; bitnr++) { 4986 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 4987 } 4988 } 4989 4990 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort); 4991 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort); 4992 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort); 4993 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort); 4994 object_property_add_alias(obj, "xd", obj, "nx", &error_abort); 4995 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort); 4996 object_property_add_alias(obj, "i64", obj, "lm", &error_abort); 4997 4998 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort); 4999 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort); 5000 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort); 5001 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort); 5002 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort); 5003 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort); 5004 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort); 5005 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort); 5006 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort); 5007 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort); 5008 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort); 5009 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort); 5010 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort); 5011 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort); 5012 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort); 5013 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort); 5014 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort); 5015 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort); 5016 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort); 5017 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort); 5018 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort); 5019 5020 if (xcc->cpu_def) { 5021 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort); 5022 } 5023 } 5024 5025 static int64_t x86_cpu_get_arch_id(CPUState *cs) 5026 { 5027 X86CPU *cpu = X86_CPU(cs); 5028 5029 return cpu->apic_id; 5030 } 5031 5032 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 5033 { 5034 X86CPU *cpu = X86_CPU(cs); 5035 5036 return cpu->env.cr[0] & CR0_PG_MASK; 5037 } 5038 5039 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 5040 { 5041 X86CPU *cpu = X86_CPU(cs); 5042 5043 cpu->env.eip = value; 5044 } 5045 5046 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 5047 { 5048 X86CPU *cpu = X86_CPU(cs); 5049 5050 cpu->env.eip = tb->pc - tb->cs_base; 5051 } 5052 5053 static bool x86_cpu_has_work(CPUState *cs) 5054 { 5055 X86CPU *cpu = X86_CPU(cs); 5056 CPUX86State *env = &cpu->env; 5057 5058 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD | 5059 CPU_INTERRUPT_POLL)) && 5060 (env->eflags & IF_MASK)) || 5061 (cs->interrupt_request & (CPU_INTERRUPT_NMI | 5062 CPU_INTERRUPT_INIT | 5063 CPU_INTERRUPT_SIPI | 5064 CPU_INTERRUPT_MCE)) || 5065 ((cs->interrupt_request & CPU_INTERRUPT_SMI) && 5066 !(env->hflags & HF_SMM_MASK)); 5067 } 5068 5069 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 5070 { 5071 X86CPU *cpu = X86_CPU(cs); 5072 CPUX86State *env = &cpu->env; 5073 5074 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 5075 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 5076 : bfd_mach_i386_i8086); 5077 info->print_insn = print_insn_i386; 5078 5079 info->cap_arch = CS_ARCH_X86; 5080 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 5081 : env->hflags & HF_CS32_MASK ? CS_MODE_32 5082 : CS_MODE_16); 5083 info->cap_insn_unit = 1; 5084 info->cap_insn_split = 8; 5085 } 5086 5087 void x86_update_hflags(CPUX86State *env) 5088 { 5089 uint32_t hflags; 5090 #define HFLAG_COPY_MASK \ 5091 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 5092 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 5093 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 5094 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 5095 5096 hflags = env->hflags & HFLAG_COPY_MASK; 5097 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 5098 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 5099 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 5100 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 5101 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 5102 5103 if (env->cr[4] & CR4_OSFXSR_MASK) { 5104 hflags |= HF_OSFXSR_MASK; 5105 } 5106 5107 if (env->efer & MSR_EFER_LMA) { 5108 hflags |= HF_LMA_MASK; 5109 } 5110 5111 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 5112 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 5113 } else { 5114 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 5115 (DESC_B_SHIFT - HF_CS32_SHIFT); 5116 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 5117 (DESC_B_SHIFT - HF_SS32_SHIFT); 5118 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 5119 !(hflags & HF_CS32_MASK)) { 5120 hflags |= HF_ADDSEG_MASK; 5121 } else { 5122 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 5123 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 5124 } 5125 } 5126 env->hflags = hflags; 5127 } 5128 5129 static Property x86_cpu_properties[] = { 5130 #ifdef CONFIG_USER_ONLY 5131 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 5132 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 5133 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 5134 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 5135 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 5136 #else 5137 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 5138 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 5139 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 5140 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 5141 #endif 5142 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 5143 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 5144 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks }, 5145 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false), 5146 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false), 5147 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false), 5148 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false), 5149 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false), 5150 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false), 5151 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false), 5152 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false), 5153 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false), 5154 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false), 5155 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false), 5156 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 5157 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 5158 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 5159 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 5160 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 5161 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 5162 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 5163 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 5164 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 5165 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 5166 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 5167 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 5168 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 5169 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 5170 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 5171 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 5172 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 5173 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 5174 false), 5175 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 5176 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 5177 /* 5178 * lecacy_cache defaults to CPU model being chosen. This is set in 5179 * x86_cpu_load_def based on cache_info which is initialized in 5180 * builtin_x86_defs 5181 */ 5182 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, false), 5183 5184 /* 5185 * From "Requirements for Implementing the Microsoft 5186 * Hypervisor Interface": 5187 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 5188 * 5189 * "Starting with Windows Server 2012 and Windows 8, if 5190 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 5191 * the hypervisor imposes no specific limit to the number of VPs. 5192 * In this case, Windows Server 2012 guest VMs may use more than 5193 * 64 VPs, up to the maximum supported number of processors applicable 5194 * to the specific Windows version being used." 5195 */ 5196 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 5197 DEFINE_PROP_END_OF_LIST() 5198 }; 5199 5200 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 5201 { 5202 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5203 CPUClass *cc = CPU_CLASS(oc); 5204 DeviceClass *dc = DEVICE_CLASS(oc); 5205 5206 device_class_set_parent_realize(dc, x86_cpu_realizefn, 5207 &xcc->parent_realize); 5208 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 5209 &xcc->parent_unrealize); 5210 dc->props = x86_cpu_properties; 5211 5212 xcc->parent_reset = cc->reset; 5213 cc->reset = x86_cpu_reset; 5214 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 5215 5216 cc->class_by_name = x86_cpu_class_by_name; 5217 cc->parse_features = x86_cpu_parse_featurestr; 5218 cc->has_work = x86_cpu_has_work; 5219 #ifdef CONFIG_TCG 5220 cc->do_interrupt = x86_cpu_do_interrupt; 5221 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 5222 #endif 5223 cc->dump_state = x86_cpu_dump_state; 5224 cc->get_crash_info = x86_cpu_get_crash_info; 5225 cc->set_pc = x86_cpu_set_pc; 5226 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 5227 cc->gdb_read_register = x86_cpu_gdb_read_register; 5228 cc->gdb_write_register = x86_cpu_gdb_write_register; 5229 cc->get_arch_id = x86_cpu_get_arch_id; 5230 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 5231 #ifdef CONFIG_USER_ONLY 5232 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault; 5233 #else 5234 cc->asidx_from_attrs = x86_asidx_from_attrs; 5235 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 5236 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; 5237 cc->write_elf64_note = x86_cpu_write_elf64_note; 5238 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 5239 cc->write_elf32_note = x86_cpu_write_elf32_note; 5240 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 5241 cc->vmsd = &vmstate_x86_cpu; 5242 #endif 5243 cc->gdb_arch_name = x86_gdb_arch_name; 5244 #ifdef TARGET_X86_64 5245 cc->gdb_core_xml_file = "i386-64bit.xml"; 5246 cc->gdb_num_core_regs = 57; 5247 #else 5248 cc->gdb_core_xml_file = "i386-32bit.xml"; 5249 cc->gdb_num_core_regs = 41; 5250 #endif 5251 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 5252 cc->debug_excp_handler = breakpoint_handler; 5253 #endif 5254 cc->cpu_exec_enter = x86_cpu_exec_enter; 5255 cc->cpu_exec_exit = x86_cpu_exec_exit; 5256 #ifdef CONFIG_TCG 5257 cc->tcg_initialize = tcg_x86_init; 5258 #endif 5259 cc->disas_set_info = x86_disas_set_info; 5260 5261 dc->user_creatable = true; 5262 } 5263 5264 static const TypeInfo x86_cpu_type_info = { 5265 .name = TYPE_X86_CPU, 5266 .parent = TYPE_CPU, 5267 .instance_size = sizeof(X86CPU), 5268 .instance_init = x86_cpu_initfn, 5269 .abstract = true, 5270 .class_size = sizeof(X86CPUClass), 5271 .class_init = x86_cpu_common_class_init, 5272 }; 5273 5274 5275 /* "base" CPU model, used by query-cpu-model-expansion */ 5276 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 5277 { 5278 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5279 5280 xcc->static_model = true; 5281 xcc->migration_safe = true; 5282 xcc->model_description = "base CPU model type with no features enabled"; 5283 xcc->ordering = 8; 5284 } 5285 5286 static const TypeInfo x86_base_cpu_type_info = { 5287 .name = X86_CPU_TYPE_NAME("base"), 5288 .parent = TYPE_X86_CPU, 5289 .class_init = x86_cpu_base_class_init, 5290 }; 5291 5292 static void x86_cpu_register_types(void) 5293 { 5294 int i; 5295 5296 type_register_static(&x86_cpu_type_info); 5297 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 5298 x86_register_cpudef_type(&builtin_x86_defs[i]); 5299 } 5300 type_register_static(&max_x86_cpu_type_info); 5301 type_register_static(&x86_base_cpu_type_info); 5302 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 5303 type_register_static(&host_x86_cpu_type_info); 5304 #endif 5305 } 5306 5307 type_init(x86_cpu_register_types) 5308