1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "exec/exec-all.h" 28 #include "sysemu/kvm.h" 29 #include "sysemu/hvf.h" 30 #include "sysemu/cpus.h" 31 #include "kvm_i386.h" 32 #include "sev_i386.h" 33 34 #include "qemu/error-report.h" 35 #include "qemu/module.h" 36 #include "qemu/option.h" 37 #include "qemu/config-file.h" 38 #include "qapi/error.h" 39 #include "qapi/qapi-visit-machine.h" 40 #include "qapi/qapi-visit-run-state.h" 41 #include "qapi/qmp/qdict.h" 42 #include "qapi/qmp/qerror.h" 43 #include "qapi/visitor.h" 44 #include "qom/qom-qobject.h" 45 #include "sysemu/arch_init.h" 46 #include "qapi/qapi-commands-machine-target.h" 47 48 #include "standard-headers/asm-x86/kvm_para.h" 49 50 #include "sysemu/sysemu.h" 51 #include "sysemu/tcg.h" 52 #include "hw/qdev-properties.h" 53 #include "hw/i386/topology.h" 54 #ifndef CONFIG_USER_ONLY 55 #include "exec/address-spaces.h" 56 #include "hw/hw.h" 57 #include "hw/xen/xen.h" 58 #include "hw/i386/apic_internal.h" 59 #endif 60 61 #include "disas/capstone.h" 62 63 /* Helpers for building CPUID[2] descriptors: */ 64 65 struct CPUID2CacheDescriptorInfo { 66 enum CacheType type; 67 int level; 68 int size; 69 int line_size; 70 int associativity; 71 }; 72 73 /* 74 * Known CPUID 2 cache descriptors. 75 * From Intel SDM Volume 2A, CPUID instruction 76 */ 77 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 78 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 79 .associativity = 4, .line_size = 32, }, 80 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 81 .associativity = 4, .line_size = 32, }, 82 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 83 .associativity = 4, .line_size = 64, }, 84 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 85 .associativity = 2, .line_size = 32, }, 86 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 87 .associativity = 4, .line_size = 32, }, 88 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 89 .associativity = 4, .line_size = 64, }, 90 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 91 .associativity = 6, .line_size = 64, }, 92 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 93 .associativity = 2, .line_size = 64, }, 94 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 95 .associativity = 8, .line_size = 64, }, 96 /* lines per sector is not supported cpuid2_cache_descriptor(), 97 * so descriptors 0x22, 0x23 are not included 98 */ 99 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 100 .associativity = 16, .line_size = 64, }, 101 /* lines per sector is not supported cpuid2_cache_descriptor(), 102 * so descriptors 0x25, 0x20 are not included 103 */ 104 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 105 .associativity = 8, .line_size = 64, }, 106 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 107 .associativity = 8, .line_size = 64, }, 108 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 109 .associativity = 4, .line_size = 32, }, 110 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 111 .associativity = 4, .line_size = 32, }, 112 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 113 .associativity = 4, .line_size = 32, }, 114 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 115 .associativity = 4, .line_size = 32, }, 116 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 117 .associativity = 4, .line_size = 32, }, 118 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 119 .associativity = 4, .line_size = 64, }, 120 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 121 .associativity = 8, .line_size = 64, }, 122 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 123 .associativity = 12, .line_size = 64, }, 124 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 125 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 126 .associativity = 12, .line_size = 64, }, 127 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 128 .associativity = 16, .line_size = 64, }, 129 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 130 .associativity = 12, .line_size = 64, }, 131 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 132 .associativity = 16, .line_size = 64, }, 133 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 134 .associativity = 24, .line_size = 64, }, 135 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 136 .associativity = 8, .line_size = 64, }, 137 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 138 .associativity = 4, .line_size = 64, }, 139 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 140 .associativity = 4, .line_size = 64, }, 141 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 142 .associativity = 4, .line_size = 64, }, 143 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 144 .associativity = 4, .line_size = 64, }, 145 /* lines per sector is not supported cpuid2_cache_descriptor(), 146 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 147 */ 148 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 149 .associativity = 8, .line_size = 64, }, 150 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 151 .associativity = 2, .line_size = 64, }, 152 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 153 .associativity = 8, .line_size = 64, }, 154 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 155 .associativity = 8, .line_size = 32, }, 156 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 157 .associativity = 8, .line_size = 32, }, 158 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 159 .associativity = 8, .line_size = 32, }, 160 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 161 .associativity = 8, .line_size = 32, }, 162 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 163 .associativity = 4, .line_size = 64, }, 164 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 165 .associativity = 8, .line_size = 64, }, 166 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 167 .associativity = 4, .line_size = 64, }, 168 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 169 .associativity = 4, .line_size = 64, }, 170 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 171 .associativity = 4, .line_size = 64, }, 172 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 173 .associativity = 8, .line_size = 64, }, 174 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 175 .associativity = 8, .line_size = 64, }, 176 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 177 .associativity = 8, .line_size = 64, }, 178 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 179 .associativity = 12, .line_size = 64, }, 180 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 181 .associativity = 12, .line_size = 64, }, 182 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 183 .associativity = 12, .line_size = 64, }, 184 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 185 .associativity = 16, .line_size = 64, }, 186 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 187 .associativity = 16, .line_size = 64, }, 188 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 189 .associativity = 16, .line_size = 64, }, 190 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 191 .associativity = 24, .line_size = 64, }, 192 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 193 .associativity = 24, .line_size = 64, }, 194 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 195 .associativity = 24, .line_size = 64, }, 196 }; 197 198 /* 199 * "CPUID leaf 2 does not report cache descriptor information, 200 * use CPUID leaf 4 to query cache parameters" 201 */ 202 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 203 204 /* 205 * Return a CPUID 2 cache descriptor for a given cache. 206 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 207 */ 208 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 209 { 210 int i; 211 212 assert(cache->size > 0); 213 assert(cache->level > 0); 214 assert(cache->line_size > 0); 215 assert(cache->associativity > 0); 216 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 217 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 218 if (d->level == cache->level && d->type == cache->type && 219 d->size == cache->size && d->line_size == cache->line_size && 220 d->associativity == cache->associativity) { 221 return i; 222 } 223 } 224 225 return CACHE_DESCRIPTOR_UNAVAILABLE; 226 } 227 228 /* CPUID Leaf 4 constants: */ 229 230 /* EAX: */ 231 #define CACHE_TYPE_D 1 232 #define CACHE_TYPE_I 2 233 #define CACHE_TYPE_UNIFIED 3 234 235 #define CACHE_LEVEL(l) (l << 5) 236 237 #define CACHE_SELF_INIT_LEVEL (1 << 8) 238 239 /* EDX: */ 240 #define CACHE_NO_INVD_SHARING (1 << 0) 241 #define CACHE_INCLUSIVE (1 << 1) 242 #define CACHE_COMPLEX_IDX (1 << 2) 243 244 /* Encode CacheType for CPUID[4].EAX */ 245 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 246 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 247 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 248 0 /* Invalid value */) 249 250 251 /* Encode cache info for CPUID[4] */ 252 static void encode_cache_cpuid4(CPUCacheInfo *cache, 253 int num_apic_ids, int num_cores, 254 uint32_t *eax, uint32_t *ebx, 255 uint32_t *ecx, uint32_t *edx) 256 { 257 assert(cache->size == cache->line_size * cache->associativity * 258 cache->partitions * cache->sets); 259 260 assert(num_apic_ids > 0); 261 *eax = CACHE_TYPE(cache->type) | 262 CACHE_LEVEL(cache->level) | 263 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 264 ((num_cores - 1) << 26) | 265 ((num_apic_ids - 1) << 14); 266 267 assert(cache->line_size > 0); 268 assert(cache->partitions > 0); 269 assert(cache->associativity > 0); 270 /* We don't implement fully-associative caches */ 271 assert(cache->associativity < cache->sets); 272 *ebx = (cache->line_size - 1) | 273 ((cache->partitions - 1) << 12) | 274 ((cache->associativity - 1) << 22); 275 276 assert(cache->sets > 0); 277 *ecx = cache->sets - 1; 278 279 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 280 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 281 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 282 } 283 284 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 285 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 286 { 287 assert(cache->size % 1024 == 0); 288 assert(cache->lines_per_tag > 0); 289 assert(cache->associativity > 0); 290 assert(cache->line_size > 0); 291 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 292 (cache->lines_per_tag << 8) | (cache->line_size); 293 } 294 295 #define ASSOC_FULL 0xFF 296 297 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 298 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 299 a == 2 ? 0x2 : \ 300 a == 4 ? 0x4 : \ 301 a == 8 ? 0x6 : \ 302 a == 16 ? 0x8 : \ 303 a == 32 ? 0xA : \ 304 a == 48 ? 0xB : \ 305 a == 64 ? 0xC : \ 306 a == 96 ? 0xD : \ 307 a == 128 ? 0xE : \ 308 a == ASSOC_FULL ? 0xF : \ 309 0 /* invalid value */) 310 311 /* 312 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 313 * @l3 can be NULL. 314 */ 315 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 316 CPUCacheInfo *l3, 317 uint32_t *ecx, uint32_t *edx) 318 { 319 assert(l2->size % 1024 == 0); 320 assert(l2->associativity > 0); 321 assert(l2->lines_per_tag > 0); 322 assert(l2->line_size > 0); 323 *ecx = ((l2->size / 1024) << 16) | 324 (AMD_ENC_ASSOC(l2->associativity) << 12) | 325 (l2->lines_per_tag << 8) | (l2->line_size); 326 327 if (l3) { 328 assert(l3->size % (512 * 1024) == 0); 329 assert(l3->associativity > 0); 330 assert(l3->lines_per_tag > 0); 331 assert(l3->line_size > 0); 332 *edx = ((l3->size / (512 * 1024)) << 18) | 333 (AMD_ENC_ASSOC(l3->associativity) << 12) | 334 (l3->lines_per_tag << 8) | (l3->line_size); 335 } else { 336 *edx = 0; 337 } 338 } 339 340 /* 341 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E 342 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3. 343 * Define the constants to build the cpu topology. Right now, TOPOEXT 344 * feature is enabled only on EPYC. So, these constants are based on 345 * EPYC supported configurations. We may need to handle the cases if 346 * these values change in future. 347 */ 348 /* Maximum core complexes in a node */ 349 #define MAX_CCX 2 350 /* Maximum cores in a core complex */ 351 #define MAX_CORES_IN_CCX 4 352 /* Maximum cores in a node */ 353 #define MAX_CORES_IN_NODE 8 354 /* Maximum nodes in a socket */ 355 #define MAX_NODES_PER_SOCKET 4 356 357 /* 358 * Figure out the number of nodes required to build this config. 359 * Max cores in a node is 8 360 */ 361 static int nodes_in_socket(int nr_cores) 362 { 363 int nodes; 364 365 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE); 366 367 /* Hardware does not support config with 3 nodes, return 4 in that case */ 368 return (nodes == 3) ? 4 : nodes; 369 } 370 371 /* 372 * Decide the number of cores in a core complex with the given nr_cores using 373 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and 374 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible 375 * L3 cache is shared across all cores in a core complex. So, this will also 376 * tell us how many cores are sharing the L3 cache. 377 */ 378 static int cores_in_core_complex(int nr_cores) 379 { 380 int nodes; 381 382 /* Check if we can fit all the cores in one core complex */ 383 if (nr_cores <= MAX_CORES_IN_CCX) { 384 return nr_cores; 385 } 386 /* Get the number of nodes required to build this config */ 387 nodes = nodes_in_socket(nr_cores); 388 389 /* 390 * Divide the cores accros all the core complexes 391 * Return rounded up value 392 */ 393 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX); 394 } 395 396 /* Encode cache info for CPUID[8000001D] */ 397 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs, 398 uint32_t *eax, uint32_t *ebx, 399 uint32_t *ecx, uint32_t *edx) 400 { 401 uint32_t l3_cores; 402 assert(cache->size == cache->line_size * cache->associativity * 403 cache->partitions * cache->sets); 404 405 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 406 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 407 408 /* L3 is shared among multiple cores */ 409 if (cache->level == 3) { 410 l3_cores = cores_in_core_complex(cs->nr_cores); 411 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14; 412 } else { 413 *eax |= ((cs->nr_threads - 1) << 14); 414 } 415 416 assert(cache->line_size > 0); 417 assert(cache->partitions > 0); 418 assert(cache->associativity > 0); 419 /* We don't implement fully-associative caches */ 420 assert(cache->associativity < cache->sets); 421 *ebx = (cache->line_size - 1) | 422 ((cache->partitions - 1) << 12) | 423 ((cache->associativity - 1) << 22); 424 425 assert(cache->sets > 0); 426 *ecx = cache->sets - 1; 427 428 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 429 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 430 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 431 } 432 433 /* Data structure to hold the configuration info for a given core index */ 434 struct core_topology { 435 /* core complex id of the current core index */ 436 int ccx_id; 437 /* 438 * Adjusted core index for this core in the topology 439 * This can be 0,1,2,3 with max 4 cores in a core complex 440 */ 441 int core_id; 442 /* Node id for this core index */ 443 int node_id; 444 /* Number of nodes in this config */ 445 int num_nodes; 446 }; 447 448 /* 449 * Build the configuration closely match the EPYC hardware. Using the EPYC 450 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE) 451 * right now. This could change in future. 452 * nr_cores : Total number of cores in the config 453 * core_id : Core index of the current CPU 454 * topo : Data structure to hold all the config info for this core index 455 */ 456 static void build_core_topology(int nr_cores, int core_id, 457 struct core_topology *topo) 458 { 459 int nodes, cores_in_ccx; 460 461 /* First get the number of nodes required */ 462 nodes = nodes_in_socket(nr_cores); 463 464 cores_in_ccx = cores_in_core_complex(nr_cores); 465 466 topo->node_id = core_id / (cores_in_ccx * MAX_CCX); 467 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx; 468 topo->core_id = core_id % cores_in_ccx; 469 topo->num_nodes = nodes; 470 } 471 472 /* Encode cache info for CPUID[8000001E] */ 473 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu, 474 uint32_t *eax, uint32_t *ebx, 475 uint32_t *ecx, uint32_t *edx) 476 { 477 struct core_topology topo = {0}; 478 unsigned long nodes; 479 int shift; 480 481 build_core_topology(cs->nr_cores, cpu->core_id, &topo); 482 *eax = cpu->apic_id; 483 /* 484 * CPUID_Fn8000001E_EBX 485 * 31:16 Reserved 486 * 15:8 Threads per core (The number of threads per core is 487 * Threads per core + 1) 488 * 7:0 Core id (see bit decoding below) 489 * SMT: 490 * 4:3 node id 491 * 2 Core complex id 492 * 1:0 Core id 493 * Non SMT: 494 * 5:4 node id 495 * 3 Core complex id 496 * 1:0 Core id 497 */ 498 if (cs->nr_threads - 1) { 499 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) | 500 (topo.ccx_id << 2) | topo.core_id; 501 } else { 502 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id; 503 } 504 /* 505 * CPUID_Fn8000001E_ECX 506 * 31:11 Reserved 507 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) 508 * 7:0 Node id (see bit decoding below) 509 * 2 Socket id 510 * 1:0 Node id 511 */ 512 if (topo.num_nodes <= 4) { 513 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) | 514 topo.node_id; 515 } else { 516 /* 517 * Node id fix up. Actual hardware supports up to 4 nodes. But with 518 * more than 32 cores, we may end up with more than 4 nodes. 519 * Node id is a combination of socket id and node id. Only requirement 520 * here is that this number should be unique accross the system. 521 * Shift the socket id to accommodate more nodes. We dont expect both 522 * socket id and node id to be big number at the same time. This is not 523 * an ideal config but we need to to support it. Max nodes we can have 524 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need 525 * 5 bits for nodes. Find the left most set bit to represent the total 526 * number of nodes. find_last_bit returns last set bit(0 based). Left 527 * shift(+1) the socket id to represent all the nodes. 528 */ 529 nodes = topo.num_nodes - 1; 530 shift = find_last_bit(&nodes, 8); 531 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) | 532 topo.node_id; 533 } 534 *edx = 0; 535 } 536 537 /* 538 * Definitions of the hardcoded cache entries we expose: 539 * These are legacy cache values. If there is a need to change any 540 * of these values please use builtin_x86_defs 541 */ 542 543 /* L1 data cache: */ 544 static CPUCacheInfo legacy_l1d_cache = { 545 .type = DATA_CACHE, 546 .level = 1, 547 .size = 32 * KiB, 548 .self_init = 1, 549 .line_size = 64, 550 .associativity = 8, 551 .sets = 64, 552 .partitions = 1, 553 .no_invd_sharing = true, 554 }; 555 556 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 557 static CPUCacheInfo legacy_l1d_cache_amd = { 558 .type = DATA_CACHE, 559 .level = 1, 560 .size = 64 * KiB, 561 .self_init = 1, 562 .line_size = 64, 563 .associativity = 2, 564 .sets = 512, 565 .partitions = 1, 566 .lines_per_tag = 1, 567 .no_invd_sharing = true, 568 }; 569 570 /* L1 instruction cache: */ 571 static CPUCacheInfo legacy_l1i_cache = { 572 .type = INSTRUCTION_CACHE, 573 .level = 1, 574 .size = 32 * KiB, 575 .self_init = 1, 576 .line_size = 64, 577 .associativity = 8, 578 .sets = 64, 579 .partitions = 1, 580 .no_invd_sharing = true, 581 }; 582 583 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 584 static CPUCacheInfo legacy_l1i_cache_amd = { 585 .type = INSTRUCTION_CACHE, 586 .level = 1, 587 .size = 64 * KiB, 588 .self_init = 1, 589 .line_size = 64, 590 .associativity = 2, 591 .sets = 512, 592 .partitions = 1, 593 .lines_per_tag = 1, 594 .no_invd_sharing = true, 595 }; 596 597 /* Level 2 unified cache: */ 598 static CPUCacheInfo legacy_l2_cache = { 599 .type = UNIFIED_CACHE, 600 .level = 2, 601 .size = 4 * MiB, 602 .self_init = 1, 603 .line_size = 64, 604 .associativity = 16, 605 .sets = 4096, 606 .partitions = 1, 607 .no_invd_sharing = true, 608 }; 609 610 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 611 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 612 .type = UNIFIED_CACHE, 613 .level = 2, 614 .size = 2 * MiB, 615 .line_size = 64, 616 .associativity = 8, 617 }; 618 619 620 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 621 static CPUCacheInfo legacy_l2_cache_amd = { 622 .type = UNIFIED_CACHE, 623 .level = 2, 624 .size = 512 * KiB, 625 .line_size = 64, 626 .lines_per_tag = 1, 627 .associativity = 16, 628 .sets = 512, 629 .partitions = 1, 630 }; 631 632 /* Level 3 unified cache: */ 633 static CPUCacheInfo legacy_l3_cache = { 634 .type = UNIFIED_CACHE, 635 .level = 3, 636 .size = 16 * MiB, 637 .line_size = 64, 638 .associativity = 16, 639 .sets = 16384, 640 .partitions = 1, 641 .lines_per_tag = 1, 642 .self_init = true, 643 .inclusive = true, 644 .complex_indexing = true, 645 }; 646 647 /* TLB definitions: */ 648 649 #define L1_DTLB_2M_ASSOC 1 650 #define L1_DTLB_2M_ENTRIES 255 651 #define L1_DTLB_4K_ASSOC 1 652 #define L1_DTLB_4K_ENTRIES 255 653 654 #define L1_ITLB_2M_ASSOC 1 655 #define L1_ITLB_2M_ENTRIES 255 656 #define L1_ITLB_4K_ASSOC 1 657 #define L1_ITLB_4K_ENTRIES 255 658 659 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 660 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 661 #define L2_DTLB_4K_ASSOC 4 662 #define L2_DTLB_4K_ENTRIES 512 663 664 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 665 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 666 #define L2_ITLB_4K_ASSOC 4 667 #define L2_ITLB_4K_ENTRIES 512 668 669 /* CPUID Leaf 0x14 constants: */ 670 #define INTEL_PT_MAX_SUBLEAF 0x1 671 /* 672 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 673 * MSR can be accessed; 674 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 675 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 676 * of Intel PT MSRs across warm reset; 677 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 678 */ 679 #define INTEL_PT_MINIMAL_EBX 0xf 680 /* 681 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 682 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 683 * accessed; 684 * bit[01]: ToPA tables can hold any number of output entries, up to the 685 * maximum allowed by the MaskOrTableOffset field of 686 * IA32_RTIT_OUTPUT_MASK_PTRS; 687 * bit[02]: Support Single-Range Output scheme; 688 */ 689 #define INTEL_PT_MINIMAL_ECX 0x7 690 /* generated packets which contain IP payloads have LIP values */ 691 #define INTEL_PT_IP_LIP (1 << 31) 692 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 693 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 694 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 695 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 696 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 697 698 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 699 uint32_t vendor2, uint32_t vendor3) 700 { 701 int i; 702 for (i = 0; i < 4; i++) { 703 dst[i] = vendor1 >> (8 * i); 704 dst[i + 4] = vendor2 >> (8 * i); 705 dst[i + 8] = vendor3 >> (8 * i); 706 } 707 dst[CPUID_VENDOR_SZ] = '\0'; 708 } 709 710 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 711 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 712 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 713 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 714 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 715 CPUID_PSE36 | CPUID_FXSR) 716 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 717 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 718 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 719 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 720 CPUID_PAE | CPUID_SEP | CPUID_APIC) 721 722 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 723 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 724 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 725 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 726 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 727 /* partly implemented: 728 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 729 /* missing: 730 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 731 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 732 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 733 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 734 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 735 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 736 CPUID_EXT_RDRAND) 737 /* missing: 738 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 739 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 740 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 741 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 742 CPUID_EXT_F16C */ 743 744 #ifdef TARGET_X86_64 745 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 746 #else 747 #define TCG_EXT2_X86_64_FEATURES 0 748 #endif 749 750 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 751 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 752 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 753 TCG_EXT2_X86_64_FEATURES) 754 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 755 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 756 #define TCG_EXT4_FEATURES 0 757 #define TCG_SVM_FEATURES CPUID_SVM_NPT 758 #define TCG_KVM_FEATURES 0 759 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 760 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 761 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 762 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 763 CPUID_7_0_EBX_ERMS) 764 /* missing: 765 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 766 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 767 CPUID_7_0_EBX_RDSEED */ 768 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 769 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 770 CPUID_7_0_ECX_LA57) 771 #define TCG_7_0_EDX_FEATURES 0 772 #define TCG_APM_FEATURES 0 773 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 774 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 775 /* missing: 776 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 777 778 typedef enum FeatureWordType { 779 CPUID_FEATURE_WORD, 780 MSR_FEATURE_WORD, 781 } FeatureWordType; 782 783 typedef struct FeatureWordInfo { 784 FeatureWordType type; 785 /* feature flags names are taken from "Intel Processor Identification and 786 * the CPUID Instruction" and AMD's "CPUID Specification". 787 * In cases of disagreement between feature naming conventions, 788 * aliases may be added. 789 */ 790 const char *feat_names[32]; 791 union { 792 /* If type==CPUID_FEATURE_WORD */ 793 struct { 794 uint32_t eax; /* Input EAX for CPUID */ 795 bool needs_ecx; /* CPUID instruction uses ECX as input */ 796 uint32_t ecx; /* Input ECX value for CPUID */ 797 int reg; /* output register (R_* constant) */ 798 } cpuid; 799 /* If type==MSR_FEATURE_WORD */ 800 struct { 801 uint32_t index; 802 struct { /*CPUID that enumerate this MSR*/ 803 FeatureWord cpuid_class; 804 uint32_t cpuid_flag; 805 } cpuid_dep; 806 } msr; 807 }; 808 uint32_t tcg_features; /* Feature flags supported by TCG */ 809 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */ 810 uint32_t migratable_flags; /* Feature flags known to be migratable */ 811 /* Features that shouldn't be auto-enabled by "-cpu host" */ 812 uint32_t no_autoenable_flags; 813 } FeatureWordInfo; 814 815 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 816 [FEAT_1_EDX] = { 817 .type = CPUID_FEATURE_WORD, 818 .feat_names = { 819 "fpu", "vme", "de", "pse", 820 "tsc", "msr", "pae", "mce", 821 "cx8", "apic", NULL, "sep", 822 "mtrr", "pge", "mca", "cmov", 823 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 824 NULL, "ds" /* Intel dts */, "acpi", "mmx", 825 "fxsr", "sse", "sse2", "ss", 826 "ht" /* Intel htt */, "tm", "ia64", "pbe", 827 }, 828 .cpuid = {.eax = 1, .reg = R_EDX, }, 829 .tcg_features = TCG_FEATURES, 830 }, 831 [FEAT_1_ECX] = { 832 .type = CPUID_FEATURE_WORD, 833 .feat_names = { 834 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 835 "ds-cpl", "vmx", "smx", "est", 836 "tm2", "ssse3", "cid", NULL, 837 "fma", "cx16", "xtpr", "pdcm", 838 NULL, "pcid", "dca", "sse4.1", 839 "sse4.2", "x2apic", "movbe", "popcnt", 840 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 841 "avx", "f16c", "rdrand", "hypervisor", 842 }, 843 .cpuid = { .eax = 1, .reg = R_ECX, }, 844 .tcg_features = TCG_EXT_FEATURES, 845 }, 846 /* Feature names that are already defined on feature_name[] but 847 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 848 * names on feat_names below. They are copied automatically 849 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 850 */ 851 [FEAT_8000_0001_EDX] = { 852 .type = CPUID_FEATURE_WORD, 853 .feat_names = { 854 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 855 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 856 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 857 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 858 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 859 "nx", NULL, "mmxext", NULL /* mmx */, 860 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 861 NULL, "lm", "3dnowext", "3dnow", 862 }, 863 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 864 .tcg_features = TCG_EXT2_FEATURES, 865 }, 866 [FEAT_8000_0001_ECX] = { 867 .type = CPUID_FEATURE_WORD, 868 .feat_names = { 869 "lahf-lm", "cmp-legacy", "svm", "extapic", 870 "cr8legacy", "abm", "sse4a", "misalignsse", 871 "3dnowprefetch", "osvw", "ibs", "xop", 872 "skinit", "wdt", NULL, "lwp", 873 "fma4", "tce", NULL, "nodeid-msr", 874 NULL, "tbm", "topoext", "perfctr-core", 875 "perfctr-nb", NULL, NULL, NULL, 876 NULL, NULL, NULL, NULL, 877 }, 878 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 879 .tcg_features = TCG_EXT3_FEATURES, 880 /* 881 * TOPOEXT is always allowed but can't be enabled blindly by 882 * "-cpu host", as it requires consistent cache topology info 883 * to be provided so it doesn't confuse guests. 884 */ 885 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 886 }, 887 [FEAT_C000_0001_EDX] = { 888 .type = CPUID_FEATURE_WORD, 889 .feat_names = { 890 NULL, NULL, "xstore", "xstore-en", 891 NULL, NULL, "xcrypt", "xcrypt-en", 892 "ace2", "ace2-en", "phe", "phe-en", 893 "pmm", "pmm-en", NULL, NULL, 894 NULL, NULL, NULL, NULL, 895 NULL, NULL, NULL, NULL, 896 NULL, NULL, NULL, NULL, 897 NULL, NULL, NULL, NULL, 898 }, 899 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 900 .tcg_features = TCG_EXT4_FEATURES, 901 }, 902 [FEAT_KVM] = { 903 .type = CPUID_FEATURE_WORD, 904 .feat_names = { 905 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 906 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 907 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 908 NULL, NULL, NULL, NULL, 909 NULL, NULL, NULL, NULL, 910 NULL, NULL, NULL, NULL, 911 "kvmclock-stable-bit", NULL, NULL, NULL, 912 NULL, NULL, NULL, NULL, 913 }, 914 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 915 .tcg_features = TCG_KVM_FEATURES, 916 }, 917 [FEAT_KVM_HINTS] = { 918 .type = CPUID_FEATURE_WORD, 919 .feat_names = { 920 "kvm-hint-dedicated", NULL, NULL, NULL, 921 NULL, NULL, NULL, NULL, 922 NULL, NULL, NULL, NULL, 923 NULL, NULL, NULL, NULL, 924 NULL, NULL, NULL, NULL, 925 NULL, NULL, NULL, NULL, 926 NULL, NULL, NULL, NULL, 927 NULL, NULL, NULL, NULL, 928 }, 929 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 930 .tcg_features = TCG_KVM_FEATURES, 931 /* 932 * KVM hints aren't auto-enabled by -cpu host, they need to be 933 * explicitly enabled in the command-line. 934 */ 935 .no_autoenable_flags = ~0U, 936 }, 937 /* 938 * .feat_names are commented out for Hyper-V enlightenments because we 939 * don't want to have two different ways for enabling them on QEMU command 940 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 941 * enabling several feature bits simultaneously, exposing these bits 942 * individually may just confuse guests. 943 */ 944 [FEAT_HYPERV_EAX] = { 945 .type = CPUID_FEATURE_WORD, 946 .feat_names = { 947 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 948 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 949 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 950 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 951 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 952 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 953 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 954 NULL, NULL, 955 NULL, NULL, NULL, NULL, 956 NULL, NULL, NULL, NULL, 957 NULL, NULL, NULL, NULL, 958 NULL, NULL, NULL, NULL, 959 }, 960 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 961 }, 962 [FEAT_HYPERV_EBX] = { 963 .type = CPUID_FEATURE_WORD, 964 .feat_names = { 965 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 966 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 967 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 968 NULL /* hv_create_port */, NULL /* hv_connect_port */, 969 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 970 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 971 NULL, NULL, 972 NULL, NULL, NULL, NULL, 973 NULL, NULL, NULL, NULL, 974 NULL, NULL, NULL, NULL, 975 NULL, NULL, NULL, NULL, 976 }, 977 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 978 }, 979 [FEAT_HYPERV_EDX] = { 980 .type = CPUID_FEATURE_WORD, 981 .feat_names = { 982 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 983 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 984 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 985 NULL, NULL, 986 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 987 NULL, NULL, NULL, NULL, 988 NULL, NULL, NULL, NULL, 989 NULL, NULL, NULL, NULL, 990 NULL, NULL, NULL, NULL, 991 NULL, NULL, NULL, NULL, 992 }, 993 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 994 }, 995 [FEAT_HV_RECOMM_EAX] = { 996 .type = CPUID_FEATURE_WORD, 997 .feat_names = { 998 NULL /* hv_recommend_pv_as_switch */, 999 NULL /* hv_recommend_pv_tlbflush_local */, 1000 NULL /* hv_recommend_pv_tlbflush_remote */, 1001 NULL /* hv_recommend_msr_apic_access */, 1002 NULL /* hv_recommend_msr_reset */, 1003 NULL /* hv_recommend_relaxed_timing */, 1004 NULL /* hv_recommend_dma_remapping */, 1005 NULL /* hv_recommend_int_remapping */, 1006 NULL /* hv_recommend_x2apic_msrs */, 1007 NULL /* hv_recommend_autoeoi_deprecation */, 1008 NULL /* hv_recommend_pv_ipi */, 1009 NULL /* hv_recommend_ex_hypercalls */, 1010 NULL /* hv_hypervisor_is_nested */, 1011 NULL /* hv_recommend_int_mbec */, 1012 NULL /* hv_recommend_evmcs */, 1013 NULL, 1014 NULL, NULL, NULL, NULL, 1015 NULL, NULL, NULL, NULL, 1016 NULL, NULL, NULL, NULL, 1017 NULL, NULL, NULL, NULL, 1018 }, 1019 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 1020 }, 1021 [FEAT_HV_NESTED_EAX] = { 1022 .type = CPUID_FEATURE_WORD, 1023 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 1024 }, 1025 [FEAT_SVM] = { 1026 .type = CPUID_FEATURE_WORD, 1027 .feat_names = { 1028 "npt", "lbrv", "svm-lock", "nrip-save", 1029 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 1030 NULL, NULL, "pause-filter", NULL, 1031 "pfthreshold", NULL, NULL, NULL, 1032 NULL, NULL, NULL, NULL, 1033 NULL, NULL, NULL, NULL, 1034 NULL, NULL, NULL, NULL, 1035 NULL, NULL, NULL, NULL, 1036 }, 1037 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 1038 .tcg_features = TCG_SVM_FEATURES, 1039 }, 1040 [FEAT_7_0_EBX] = { 1041 .type = CPUID_FEATURE_WORD, 1042 .feat_names = { 1043 "fsgsbase", "tsc-adjust", NULL, "bmi1", 1044 "hle", "avx2", NULL, "smep", 1045 "bmi2", "erms", "invpcid", "rtm", 1046 NULL, NULL, "mpx", NULL, 1047 "avx512f", "avx512dq", "rdseed", "adx", 1048 "smap", "avx512ifma", "pcommit", "clflushopt", 1049 "clwb", "intel-pt", "avx512pf", "avx512er", 1050 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 1051 }, 1052 .cpuid = { 1053 .eax = 7, 1054 .needs_ecx = true, .ecx = 0, 1055 .reg = R_EBX, 1056 }, 1057 .tcg_features = TCG_7_0_EBX_FEATURES, 1058 }, 1059 [FEAT_7_0_ECX] = { 1060 .type = CPUID_FEATURE_WORD, 1061 .feat_names = { 1062 NULL, "avx512vbmi", "umip", "pku", 1063 NULL /* ospke */, NULL, "avx512vbmi2", NULL, 1064 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 1065 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 1066 "la57", NULL, NULL, NULL, 1067 NULL, NULL, "rdpid", NULL, 1068 NULL, "cldemote", NULL, "movdiri", 1069 "movdir64b", NULL, NULL, NULL, 1070 }, 1071 .cpuid = { 1072 .eax = 7, 1073 .needs_ecx = true, .ecx = 0, 1074 .reg = R_ECX, 1075 }, 1076 .tcg_features = TCG_7_0_ECX_FEATURES, 1077 }, 1078 [FEAT_7_0_EDX] = { 1079 .type = CPUID_FEATURE_WORD, 1080 .feat_names = { 1081 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 1082 NULL, NULL, NULL, NULL, 1083 NULL, NULL, "md-clear", NULL, 1084 NULL, NULL, NULL, NULL, 1085 NULL, NULL, NULL, NULL, 1086 NULL, NULL, NULL, NULL, 1087 NULL, NULL, "spec-ctrl", "stibp", 1088 NULL, "arch-capabilities", "core-capability", "ssbd", 1089 }, 1090 .cpuid = { 1091 .eax = 7, 1092 .needs_ecx = true, .ecx = 0, 1093 .reg = R_EDX, 1094 }, 1095 .tcg_features = TCG_7_0_EDX_FEATURES, 1096 }, 1097 [FEAT_8000_0007_EDX] = { 1098 .type = CPUID_FEATURE_WORD, 1099 .feat_names = { 1100 NULL, NULL, NULL, NULL, 1101 NULL, NULL, NULL, NULL, 1102 "invtsc", NULL, NULL, NULL, 1103 NULL, NULL, NULL, NULL, 1104 NULL, NULL, NULL, NULL, 1105 NULL, NULL, NULL, NULL, 1106 NULL, NULL, NULL, NULL, 1107 NULL, NULL, NULL, NULL, 1108 }, 1109 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1110 .tcg_features = TCG_APM_FEATURES, 1111 .unmigratable_flags = CPUID_APM_INVTSC, 1112 }, 1113 [FEAT_8000_0008_EBX] = { 1114 .type = CPUID_FEATURE_WORD, 1115 .feat_names = { 1116 NULL, NULL, NULL, NULL, 1117 NULL, NULL, NULL, NULL, 1118 NULL, "wbnoinvd", NULL, NULL, 1119 "ibpb", NULL, NULL, NULL, 1120 NULL, NULL, NULL, NULL, 1121 NULL, NULL, NULL, NULL, 1122 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1123 NULL, NULL, NULL, NULL, 1124 }, 1125 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1126 .tcg_features = 0, 1127 .unmigratable_flags = 0, 1128 }, 1129 [FEAT_XSAVE] = { 1130 .type = CPUID_FEATURE_WORD, 1131 .feat_names = { 1132 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1133 NULL, NULL, NULL, NULL, 1134 NULL, NULL, NULL, NULL, 1135 NULL, NULL, NULL, NULL, 1136 NULL, NULL, NULL, NULL, 1137 NULL, NULL, NULL, NULL, 1138 NULL, NULL, NULL, NULL, 1139 NULL, NULL, NULL, NULL, 1140 }, 1141 .cpuid = { 1142 .eax = 0xd, 1143 .needs_ecx = true, .ecx = 1, 1144 .reg = R_EAX, 1145 }, 1146 .tcg_features = TCG_XSAVE_FEATURES, 1147 }, 1148 [FEAT_6_EAX] = { 1149 .type = CPUID_FEATURE_WORD, 1150 .feat_names = { 1151 NULL, NULL, "arat", NULL, 1152 NULL, NULL, NULL, NULL, 1153 NULL, NULL, NULL, NULL, 1154 NULL, NULL, NULL, NULL, 1155 NULL, NULL, NULL, NULL, 1156 NULL, NULL, NULL, NULL, 1157 NULL, NULL, NULL, NULL, 1158 NULL, NULL, NULL, NULL, 1159 }, 1160 .cpuid = { .eax = 6, .reg = R_EAX, }, 1161 .tcg_features = TCG_6_EAX_FEATURES, 1162 }, 1163 [FEAT_XSAVE_COMP_LO] = { 1164 .type = CPUID_FEATURE_WORD, 1165 .cpuid = { 1166 .eax = 0xD, 1167 .needs_ecx = true, .ecx = 0, 1168 .reg = R_EAX, 1169 }, 1170 .tcg_features = ~0U, 1171 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1172 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1173 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1174 XSTATE_PKRU_MASK, 1175 }, 1176 [FEAT_XSAVE_COMP_HI] = { 1177 .type = CPUID_FEATURE_WORD, 1178 .cpuid = { 1179 .eax = 0xD, 1180 .needs_ecx = true, .ecx = 0, 1181 .reg = R_EDX, 1182 }, 1183 .tcg_features = ~0U, 1184 }, 1185 /*Below are MSR exposed features*/ 1186 [FEAT_ARCH_CAPABILITIES] = { 1187 .type = MSR_FEATURE_WORD, 1188 .feat_names = { 1189 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1190 "ssb-no", "mds-no", NULL, NULL, 1191 NULL, NULL, NULL, NULL, 1192 NULL, NULL, NULL, NULL, 1193 NULL, NULL, NULL, NULL, 1194 NULL, NULL, NULL, NULL, 1195 NULL, NULL, NULL, NULL, 1196 NULL, NULL, NULL, NULL, 1197 }, 1198 .msr = { 1199 .index = MSR_IA32_ARCH_CAPABILITIES, 1200 .cpuid_dep = { 1201 FEAT_7_0_EDX, 1202 CPUID_7_0_EDX_ARCH_CAPABILITIES 1203 } 1204 }, 1205 }, 1206 [FEAT_CORE_CAPABILITY] = { 1207 .type = MSR_FEATURE_WORD, 1208 .feat_names = { 1209 NULL, NULL, NULL, NULL, 1210 NULL, "split-lock-detect", NULL, NULL, 1211 NULL, NULL, NULL, NULL, 1212 NULL, NULL, NULL, NULL, 1213 NULL, NULL, NULL, NULL, 1214 NULL, NULL, NULL, NULL, 1215 NULL, NULL, NULL, NULL, 1216 NULL, NULL, NULL, NULL, 1217 }, 1218 .msr = { 1219 .index = MSR_IA32_CORE_CAPABILITY, 1220 .cpuid_dep = { 1221 FEAT_7_0_EDX, 1222 CPUID_7_0_EDX_CORE_CAPABILITY, 1223 }, 1224 }, 1225 }, 1226 }; 1227 1228 typedef struct X86RegisterInfo32 { 1229 /* Name of register */ 1230 const char *name; 1231 /* QAPI enum value register */ 1232 X86CPURegister32 qapi_enum; 1233 } X86RegisterInfo32; 1234 1235 #define REGISTER(reg) \ 1236 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1237 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1238 REGISTER(EAX), 1239 REGISTER(ECX), 1240 REGISTER(EDX), 1241 REGISTER(EBX), 1242 REGISTER(ESP), 1243 REGISTER(EBP), 1244 REGISTER(ESI), 1245 REGISTER(EDI), 1246 }; 1247 #undef REGISTER 1248 1249 typedef struct ExtSaveArea { 1250 uint32_t feature, bits; 1251 uint32_t offset, size; 1252 } ExtSaveArea; 1253 1254 static const ExtSaveArea x86_ext_save_areas[] = { 1255 [XSTATE_FP_BIT] = { 1256 /* x87 FP state component is always enabled if XSAVE is supported */ 1257 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1258 /* x87 state is in the legacy region of the XSAVE area */ 1259 .offset = 0, 1260 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1261 }, 1262 [XSTATE_SSE_BIT] = { 1263 /* SSE state component is always enabled if XSAVE is supported */ 1264 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1265 /* SSE state is in the legacy region of the XSAVE area */ 1266 .offset = 0, 1267 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1268 }, 1269 [XSTATE_YMM_BIT] = 1270 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1271 .offset = offsetof(X86XSaveArea, avx_state), 1272 .size = sizeof(XSaveAVX) }, 1273 [XSTATE_BNDREGS_BIT] = 1274 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1275 .offset = offsetof(X86XSaveArea, bndreg_state), 1276 .size = sizeof(XSaveBNDREG) }, 1277 [XSTATE_BNDCSR_BIT] = 1278 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1279 .offset = offsetof(X86XSaveArea, bndcsr_state), 1280 .size = sizeof(XSaveBNDCSR) }, 1281 [XSTATE_OPMASK_BIT] = 1282 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1283 .offset = offsetof(X86XSaveArea, opmask_state), 1284 .size = sizeof(XSaveOpmask) }, 1285 [XSTATE_ZMM_Hi256_BIT] = 1286 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1287 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1288 .size = sizeof(XSaveZMM_Hi256) }, 1289 [XSTATE_Hi16_ZMM_BIT] = 1290 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1291 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1292 .size = sizeof(XSaveHi16_ZMM) }, 1293 [XSTATE_PKRU_BIT] = 1294 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1295 .offset = offsetof(X86XSaveArea, pkru_state), 1296 .size = sizeof(XSavePKRU) }, 1297 }; 1298 1299 static uint32_t xsave_area_size(uint64_t mask) 1300 { 1301 int i; 1302 uint64_t ret = 0; 1303 1304 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1305 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1306 if ((mask >> i) & 1) { 1307 ret = MAX(ret, esa->offset + esa->size); 1308 } 1309 } 1310 return ret; 1311 } 1312 1313 static inline bool accel_uses_host_cpuid(void) 1314 { 1315 return kvm_enabled() || hvf_enabled(); 1316 } 1317 1318 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1319 { 1320 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1321 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1322 } 1323 1324 const char *get_register_name_32(unsigned int reg) 1325 { 1326 if (reg >= CPU_NB_REGS32) { 1327 return NULL; 1328 } 1329 return x86_reg_info_32[reg].name; 1330 } 1331 1332 /* 1333 * Returns the set of feature flags that are supported and migratable by 1334 * QEMU, for a given FeatureWord. 1335 */ 1336 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w) 1337 { 1338 FeatureWordInfo *wi = &feature_word_info[w]; 1339 uint32_t r = 0; 1340 int i; 1341 1342 for (i = 0; i < 32; i++) { 1343 uint32_t f = 1U << i; 1344 1345 /* If the feature name is known, it is implicitly considered migratable, 1346 * unless it is explicitly set in unmigratable_flags */ 1347 if ((wi->migratable_flags & f) || 1348 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1349 r |= f; 1350 } 1351 } 1352 return r; 1353 } 1354 1355 void host_cpuid(uint32_t function, uint32_t count, 1356 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1357 { 1358 uint32_t vec[4]; 1359 1360 #ifdef __x86_64__ 1361 asm volatile("cpuid" 1362 : "=a"(vec[0]), "=b"(vec[1]), 1363 "=c"(vec[2]), "=d"(vec[3]) 1364 : "0"(function), "c"(count) : "cc"); 1365 #elif defined(__i386__) 1366 asm volatile("pusha \n\t" 1367 "cpuid \n\t" 1368 "mov %%eax, 0(%2) \n\t" 1369 "mov %%ebx, 4(%2) \n\t" 1370 "mov %%ecx, 8(%2) \n\t" 1371 "mov %%edx, 12(%2) \n\t" 1372 "popa" 1373 : : "a"(function), "c"(count), "S"(vec) 1374 : "memory", "cc"); 1375 #else 1376 abort(); 1377 #endif 1378 1379 if (eax) 1380 *eax = vec[0]; 1381 if (ebx) 1382 *ebx = vec[1]; 1383 if (ecx) 1384 *ecx = vec[2]; 1385 if (edx) 1386 *edx = vec[3]; 1387 } 1388 1389 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1390 { 1391 uint32_t eax, ebx, ecx, edx; 1392 1393 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1394 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1395 1396 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1397 if (family) { 1398 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1399 } 1400 if (model) { 1401 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1402 } 1403 if (stepping) { 1404 *stepping = eax & 0x0F; 1405 } 1406 } 1407 1408 /* CPU class name definitions: */ 1409 1410 /* Return type name for a given CPU model name 1411 * Caller is responsible for freeing the returned string. 1412 */ 1413 static char *x86_cpu_type_name(const char *model_name) 1414 { 1415 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1416 } 1417 1418 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1419 { 1420 ObjectClass *oc; 1421 char *typename = x86_cpu_type_name(cpu_model); 1422 oc = object_class_by_name(typename); 1423 g_free(typename); 1424 return oc; 1425 } 1426 1427 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1428 { 1429 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1430 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1431 return g_strndup(class_name, 1432 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1433 } 1434 1435 struct X86CPUDefinition { 1436 const char *name; 1437 uint32_t level; 1438 uint32_t xlevel; 1439 /* vendor is zero-terminated, 12 character ASCII string */ 1440 char vendor[CPUID_VENDOR_SZ + 1]; 1441 int family; 1442 int model; 1443 int stepping; 1444 FeatureWordArray features; 1445 const char *model_id; 1446 CPUCaches *cache_info; 1447 }; 1448 1449 static CPUCaches epyc_cache_info = { 1450 .l1d_cache = &(CPUCacheInfo) { 1451 .type = DATA_CACHE, 1452 .level = 1, 1453 .size = 32 * KiB, 1454 .line_size = 64, 1455 .associativity = 8, 1456 .partitions = 1, 1457 .sets = 64, 1458 .lines_per_tag = 1, 1459 .self_init = 1, 1460 .no_invd_sharing = true, 1461 }, 1462 .l1i_cache = &(CPUCacheInfo) { 1463 .type = INSTRUCTION_CACHE, 1464 .level = 1, 1465 .size = 64 * KiB, 1466 .line_size = 64, 1467 .associativity = 4, 1468 .partitions = 1, 1469 .sets = 256, 1470 .lines_per_tag = 1, 1471 .self_init = 1, 1472 .no_invd_sharing = true, 1473 }, 1474 .l2_cache = &(CPUCacheInfo) { 1475 .type = UNIFIED_CACHE, 1476 .level = 2, 1477 .size = 512 * KiB, 1478 .line_size = 64, 1479 .associativity = 8, 1480 .partitions = 1, 1481 .sets = 1024, 1482 .lines_per_tag = 1, 1483 }, 1484 .l3_cache = &(CPUCacheInfo) { 1485 .type = UNIFIED_CACHE, 1486 .level = 3, 1487 .size = 8 * MiB, 1488 .line_size = 64, 1489 .associativity = 16, 1490 .partitions = 1, 1491 .sets = 8192, 1492 .lines_per_tag = 1, 1493 .self_init = true, 1494 .inclusive = true, 1495 .complex_indexing = true, 1496 }, 1497 }; 1498 1499 static X86CPUDefinition builtin_x86_defs[] = { 1500 { 1501 .name = "qemu64", 1502 .level = 0xd, 1503 .vendor = CPUID_VENDOR_AMD, 1504 .family = 6, 1505 .model = 6, 1506 .stepping = 3, 1507 .features[FEAT_1_EDX] = 1508 PPRO_FEATURES | 1509 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1510 CPUID_PSE36, 1511 .features[FEAT_1_ECX] = 1512 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1513 .features[FEAT_8000_0001_EDX] = 1514 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1515 .features[FEAT_8000_0001_ECX] = 1516 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1517 .xlevel = 0x8000000A, 1518 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1519 }, 1520 { 1521 .name = "phenom", 1522 .level = 5, 1523 .vendor = CPUID_VENDOR_AMD, 1524 .family = 16, 1525 .model = 2, 1526 .stepping = 3, 1527 /* Missing: CPUID_HT */ 1528 .features[FEAT_1_EDX] = 1529 PPRO_FEATURES | 1530 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1531 CPUID_PSE36 | CPUID_VME, 1532 .features[FEAT_1_ECX] = 1533 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1534 CPUID_EXT_POPCNT, 1535 .features[FEAT_8000_0001_EDX] = 1536 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1537 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1538 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1539 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1540 CPUID_EXT3_CR8LEG, 1541 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1542 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1543 .features[FEAT_8000_0001_ECX] = 1544 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1545 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1546 /* Missing: CPUID_SVM_LBRV */ 1547 .features[FEAT_SVM] = 1548 CPUID_SVM_NPT, 1549 .xlevel = 0x8000001A, 1550 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1551 }, 1552 { 1553 .name = "core2duo", 1554 .level = 10, 1555 .vendor = CPUID_VENDOR_INTEL, 1556 .family = 6, 1557 .model = 15, 1558 .stepping = 11, 1559 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1560 .features[FEAT_1_EDX] = 1561 PPRO_FEATURES | 1562 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1563 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1564 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1565 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1566 .features[FEAT_1_ECX] = 1567 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1568 CPUID_EXT_CX16, 1569 .features[FEAT_8000_0001_EDX] = 1570 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1571 .features[FEAT_8000_0001_ECX] = 1572 CPUID_EXT3_LAHF_LM, 1573 .xlevel = 0x80000008, 1574 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1575 }, 1576 { 1577 .name = "kvm64", 1578 .level = 0xd, 1579 .vendor = CPUID_VENDOR_INTEL, 1580 .family = 15, 1581 .model = 6, 1582 .stepping = 1, 1583 /* Missing: CPUID_HT */ 1584 .features[FEAT_1_EDX] = 1585 PPRO_FEATURES | CPUID_VME | 1586 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1587 CPUID_PSE36, 1588 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1589 .features[FEAT_1_ECX] = 1590 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1591 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1592 .features[FEAT_8000_0001_EDX] = 1593 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1594 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1595 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1596 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1597 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1598 .features[FEAT_8000_0001_ECX] = 1599 0, 1600 .xlevel = 0x80000008, 1601 .model_id = "Common KVM processor" 1602 }, 1603 { 1604 .name = "qemu32", 1605 .level = 4, 1606 .vendor = CPUID_VENDOR_INTEL, 1607 .family = 6, 1608 .model = 6, 1609 .stepping = 3, 1610 .features[FEAT_1_EDX] = 1611 PPRO_FEATURES, 1612 .features[FEAT_1_ECX] = 1613 CPUID_EXT_SSE3, 1614 .xlevel = 0x80000004, 1615 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1616 }, 1617 { 1618 .name = "kvm32", 1619 .level = 5, 1620 .vendor = CPUID_VENDOR_INTEL, 1621 .family = 15, 1622 .model = 6, 1623 .stepping = 1, 1624 .features[FEAT_1_EDX] = 1625 PPRO_FEATURES | CPUID_VME | 1626 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1627 .features[FEAT_1_ECX] = 1628 CPUID_EXT_SSE3, 1629 .features[FEAT_8000_0001_ECX] = 1630 0, 1631 .xlevel = 0x80000008, 1632 .model_id = "Common 32-bit KVM processor" 1633 }, 1634 { 1635 .name = "coreduo", 1636 .level = 10, 1637 .vendor = CPUID_VENDOR_INTEL, 1638 .family = 6, 1639 .model = 14, 1640 .stepping = 8, 1641 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1642 .features[FEAT_1_EDX] = 1643 PPRO_FEATURES | CPUID_VME | 1644 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 1645 CPUID_SS, 1646 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 1647 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1648 .features[FEAT_1_ECX] = 1649 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 1650 .features[FEAT_8000_0001_EDX] = 1651 CPUID_EXT2_NX, 1652 .xlevel = 0x80000008, 1653 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 1654 }, 1655 { 1656 .name = "486", 1657 .level = 1, 1658 .vendor = CPUID_VENDOR_INTEL, 1659 .family = 4, 1660 .model = 8, 1661 .stepping = 0, 1662 .features[FEAT_1_EDX] = 1663 I486_FEATURES, 1664 .xlevel = 0, 1665 .model_id = "", 1666 }, 1667 { 1668 .name = "pentium", 1669 .level = 1, 1670 .vendor = CPUID_VENDOR_INTEL, 1671 .family = 5, 1672 .model = 4, 1673 .stepping = 3, 1674 .features[FEAT_1_EDX] = 1675 PENTIUM_FEATURES, 1676 .xlevel = 0, 1677 .model_id = "", 1678 }, 1679 { 1680 .name = "pentium2", 1681 .level = 2, 1682 .vendor = CPUID_VENDOR_INTEL, 1683 .family = 6, 1684 .model = 5, 1685 .stepping = 2, 1686 .features[FEAT_1_EDX] = 1687 PENTIUM2_FEATURES, 1688 .xlevel = 0, 1689 .model_id = "", 1690 }, 1691 { 1692 .name = "pentium3", 1693 .level = 3, 1694 .vendor = CPUID_VENDOR_INTEL, 1695 .family = 6, 1696 .model = 7, 1697 .stepping = 3, 1698 .features[FEAT_1_EDX] = 1699 PENTIUM3_FEATURES, 1700 .xlevel = 0, 1701 .model_id = "", 1702 }, 1703 { 1704 .name = "athlon", 1705 .level = 2, 1706 .vendor = CPUID_VENDOR_AMD, 1707 .family = 6, 1708 .model = 2, 1709 .stepping = 3, 1710 .features[FEAT_1_EDX] = 1711 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 1712 CPUID_MCA, 1713 .features[FEAT_8000_0001_EDX] = 1714 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 1715 .xlevel = 0x80000008, 1716 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1717 }, 1718 { 1719 .name = "n270", 1720 .level = 10, 1721 .vendor = CPUID_VENDOR_INTEL, 1722 .family = 6, 1723 .model = 28, 1724 .stepping = 2, 1725 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1726 .features[FEAT_1_EDX] = 1727 PPRO_FEATURES | 1728 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 1729 CPUID_ACPI | CPUID_SS, 1730 /* Some CPUs got no CPUID_SEP */ 1731 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 1732 * CPUID_EXT_XTPR */ 1733 .features[FEAT_1_ECX] = 1734 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1735 CPUID_EXT_MOVBE, 1736 .features[FEAT_8000_0001_EDX] = 1737 CPUID_EXT2_NX, 1738 .features[FEAT_8000_0001_ECX] = 1739 CPUID_EXT3_LAHF_LM, 1740 .xlevel = 0x80000008, 1741 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 1742 }, 1743 { 1744 .name = "Conroe", 1745 .level = 10, 1746 .vendor = CPUID_VENDOR_INTEL, 1747 .family = 6, 1748 .model = 15, 1749 .stepping = 3, 1750 .features[FEAT_1_EDX] = 1751 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1752 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1753 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1754 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1755 CPUID_DE | CPUID_FP87, 1756 .features[FEAT_1_ECX] = 1757 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1758 .features[FEAT_8000_0001_EDX] = 1759 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1760 .features[FEAT_8000_0001_ECX] = 1761 CPUID_EXT3_LAHF_LM, 1762 .xlevel = 0x80000008, 1763 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 1764 }, 1765 { 1766 .name = "Penryn", 1767 .level = 10, 1768 .vendor = CPUID_VENDOR_INTEL, 1769 .family = 6, 1770 .model = 23, 1771 .stepping = 3, 1772 .features[FEAT_1_EDX] = 1773 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1774 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1775 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1776 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1777 CPUID_DE | CPUID_FP87, 1778 .features[FEAT_1_ECX] = 1779 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1780 CPUID_EXT_SSE3, 1781 .features[FEAT_8000_0001_EDX] = 1782 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1783 .features[FEAT_8000_0001_ECX] = 1784 CPUID_EXT3_LAHF_LM, 1785 .xlevel = 0x80000008, 1786 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 1787 }, 1788 { 1789 .name = "Nehalem", 1790 .level = 11, 1791 .vendor = CPUID_VENDOR_INTEL, 1792 .family = 6, 1793 .model = 26, 1794 .stepping = 3, 1795 .features[FEAT_1_EDX] = 1796 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1797 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1798 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1799 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1800 CPUID_DE | CPUID_FP87, 1801 .features[FEAT_1_ECX] = 1802 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1803 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1804 .features[FEAT_8000_0001_EDX] = 1805 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1806 .features[FEAT_8000_0001_ECX] = 1807 CPUID_EXT3_LAHF_LM, 1808 .xlevel = 0x80000008, 1809 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 1810 }, 1811 { 1812 .name = "Nehalem-IBRS", 1813 .level = 11, 1814 .vendor = CPUID_VENDOR_INTEL, 1815 .family = 6, 1816 .model = 26, 1817 .stepping = 3, 1818 .features[FEAT_1_EDX] = 1819 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1820 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1821 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1822 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1823 CPUID_DE | CPUID_FP87, 1824 .features[FEAT_1_ECX] = 1825 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1826 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1827 .features[FEAT_7_0_EDX] = 1828 CPUID_7_0_EDX_SPEC_CTRL, 1829 .features[FEAT_8000_0001_EDX] = 1830 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1831 .features[FEAT_8000_0001_ECX] = 1832 CPUID_EXT3_LAHF_LM, 1833 .xlevel = 0x80000008, 1834 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)", 1835 }, 1836 { 1837 .name = "Westmere", 1838 .level = 11, 1839 .vendor = CPUID_VENDOR_INTEL, 1840 .family = 6, 1841 .model = 44, 1842 .stepping = 1, 1843 .features[FEAT_1_EDX] = 1844 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1845 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1846 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1847 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1848 CPUID_DE | CPUID_FP87, 1849 .features[FEAT_1_ECX] = 1850 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1851 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1852 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1853 .features[FEAT_8000_0001_EDX] = 1854 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1855 .features[FEAT_8000_0001_ECX] = 1856 CPUID_EXT3_LAHF_LM, 1857 .features[FEAT_6_EAX] = 1858 CPUID_6_EAX_ARAT, 1859 .xlevel = 0x80000008, 1860 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 1861 }, 1862 { 1863 .name = "Westmere-IBRS", 1864 .level = 11, 1865 .vendor = CPUID_VENDOR_INTEL, 1866 .family = 6, 1867 .model = 44, 1868 .stepping = 1, 1869 .features[FEAT_1_EDX] = 1870 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1871 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1872 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1873 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1874 CPUID_DE | CPUID_FP87, 1875 .features[FEAT_1_ECX] = 1876 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1877 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1878 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1879 .features[FEAT_8000_0001_EDX] = 1880 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1881 .features[FEAT_8000_0001_ECX] = 1882 CPUID_EXT3_LAHF_LM, 1883 .features[FEAT_7_0_EDX] = 1884 CPUID_7_0_EDX_SPEC_CTRL, 1885 .features[FEAT_6_EAX] = 1886 CPUID_6_EAX_ARAT, 1887 .xlevel = 0x80000008, 1888 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)", 1889 }, 1890 { 1891 .name = "SandyBridge", 1892 .level = 0xd, 1893 .vendor = CPUID_VENDOR_INTEL, 1894 .family = 6, 1895 .model = 42, 1896 .stepping = 1, 1897 .features[FEAT_1_EDX] = 1898 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1899 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1900 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1901 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1902 CPUID_DE | CPUID_FP87, 1903 .features[FEAT_1_ECX] = 1904 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1905 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1906 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1907 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1908 CPUID_EXT_SSE3, 1909 .features[FEAT_8000_0001_EDX] = 1910 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1911 CPUID_EXT2_SYSCALL, 1912 .features[FEAT_8000_0001_ECX] = 1913 CPUID_EXT3_LAHF_LM, 1914 .features[FEAT_XSAVE] = 1915 CPUID_XSAVE_XSAVEOPT, 1916 .features[FEAT_6_EAX] = 1917 CPUID_6_EAX_ARAT, 1918 .xlevel = 0x80000008, 1919 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 1920 }, 1921 { 1922 .name = "SandyBridge-IBRS", 1923 .level = 0xd, 1924 .vendor = CPUID_VENDOR_INTEL, 1925 .family = 6, 1926 .model = 42, 1927 .stepping = 1, 1928 .features[FEAT_1_EDX] = 1929 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1930 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1931 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1932 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1933 CPUID_DE | CPUID_FP87, 1934 .features[FEAT_1_ECX] = 1935 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1936 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1937 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1938 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1939 CPUID_EXT_SSE3, 1940 .features[FEAT_8000_0001_EDX] = 1941 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1942 CPUID_EXT2_SYSCALL, 1943 .features[FEAT_8000_0001_ECX] = 1944 CPUID_EXT3_LAHF_LM, 1945 .features[FEAT_7_0_EDX] = 1946 CPUID_7_0_EDX_SPEC_CTRL, 1947 .features[FEAT_XSAVE] = 1948 CPUID_XSAVE_XSAVEOPT, 1949 .features[FEAT_6_EAX] = 1950 CPUID_6_EAX_ARAT, 1951 .xlevel = 0x80000008, 1952 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)", 1953 }, 1954 { 1955 .name = "IvyBridge", 1956 .level = 0xd, 1957 .vendor = CPUID_VENDOR_INTEL, 1958 .family = 6, 1959 .model = 58, 1960 .stepping = 9, 1961 .features[FEAT_1_EDX] = 1962 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1963 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1964 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1965 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1966 CPUID_DE | CPUID_FP87, 1967 .features[FEAT_1_ECX] = 1968 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1969 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1970 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1971 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1972 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1973 .features[FEAT_7_0_EBX] = 1974 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1975 CPUID_7_0_EBX_ERMS, 1976 .features[FEAT_8000_0001_EDX] = 1977 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1978 CPUID_EXT2_SYSCALL, 1979 .features[FEAT_8000_0001_ECX] = 1980 CPUID_EXT3_LAHF_LM, 1981 .features[FEAT_XSAVE] = 1982 CPUID_XSAVE_XSAVEOPT, 1983 .features[FEAT_6_EAX] = 1984 CPUID_6_EAX_ARAT, 1985 .xlevel = 0x80000008, 1986 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 1987 }, 1988 { 1989 .name = "IvyBridge-IBRS", 1990 .level = 0xd, 1991 .vendor = CPUID_VENDOR_INTEL, 1992 .family = 6, 1993 .model = 58, 1994 .stepping = 9, 1995 .features[FEAT_1_EDX] = 1996 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1997 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1998 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1999 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2000 CPUID_DE | CPUID_FP87, 2001 .features[FEAT_1_ECX] = 2002 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2003 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2004 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2005 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2006 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2007 .features[FEAT_7_0_EBX] = 2008 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 2009 CPUID_7_0_EBX_ERMS, 2010 .features[FEAT_8000_0001_EDX] = 2011 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2012 CPUID_EXT2_SYSCALL, 2013 .features[FEAT_8000_0001_ECX] = 2014 CPUID_EXT3_LAHF_LM, 2015 .features[FEAT_7_0_EDX] = 2016 CPUID_7_0_EDX_SPEC_CTRL, 2017 .features[FEAT_XSAVE] = 2018 CPUID_XSAVE_XSAVEOPT, 2019 .features[FEAT_6_EAX] = 2020 CPUID_6_EAX_ARAT, 2021 .xlevel = 0x80000008, 2022 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)", 2023 }, 2024 { 2025 .name = "Haswell-noTSX", 2026 .level = 0xd, 2027 .vendor = CPUID_VENDOR_INTEL, 2028 .family = 6, 2029 .model = 60, 2030 .stepping = 1, 2031 .features[FEAT_1_EDX] = 2032 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2033 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2034 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2035 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2036 CPUID_DE | CPUID_FP87, 2037 .features[FEAT_1_ECX] = 2038 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2039 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2040 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2041 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2042 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2043 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2044 .features[FEAT_8000_0001_EDX] = 2045 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2046 CPUID_EXT2_SYSCALL, 2047 .features[FEAT_8000_0001_ECX] = 2048 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2049 .features[FEAT_7_0_EBX] = 2050 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2051 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2052 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID, 2053 .features[FEAT_XSAVE] = 2054 CPUID_XSAVE_XSAVEOPT, 2055 .features[FEAT_6_EAX] = 2056 CPUID_6_EAX_ARAT, 2057 .xlevel = 0x80000008, 2058 .model_id = "Intel Core Processor (Haswell, no TSX)", 2059 }, 2060 { 2061 .name = "Haswell-noTSX-IBRS", 2062 .level = 0xd, 2063 .vendor = CPUID_VENDOR_INTEL, 2064 .family = 6, 2065 .model = 60, 2066 .stepping = 1, 2067 .features[FEAT_1_EDX] = 2068 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2069 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2070 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2071 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2072 CPUID_DE | CPUID_FP87, 2073 .features[FEAT_1_ECX] = 2074 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2075 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2076 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2077 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2078 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2079 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2080 .features[FEAT_8000_0001_EDX] = 2081 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2082 CPUID_EXT2_SYSCALL, 2083 .features[FEAT_8000_0001_ECX] = 2084 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2085 .features[FEAT_7_0_EDX] = 2086 CPUID_7_0_EDX_SPEC_CTRL, 2087 .features[FEAT_7_0_EBX] = 2088 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2089 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2090 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID, 2091 .features[FEAT_XSAVE] = 2092 CPUID_XSAVE_XSAVEOPT, 2093 .features[FEAT_6_EAX] = 2094 CPUID_6_EAX_ARAT, 2095 .xlevel = 0x80000008, 2096 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)", 2097 }, 2098 { 2099 .name = "Haswell", 2100 .level = 0xd, 2101 .vendor = CPUID_VENDOR_INTEL, 2102 .family = 6, 2103 .model = 60, 2104 .stepping = 4, 2105 .features[FEAT_1_EDX] = 2106 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2107 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2108 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2109 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2110 CPUID_DE | CPUID_FP87, 2111 .features[FEAT_1_ECX] = 2112 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2113 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2114 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2115 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2116 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2117 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2118 .features[FEAT_8000_0001_EDX] = 2119 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2120 CPUID_EXT2_SYSCALL, 2121 .features[FEAT_8000_0001_ECX] = 2122 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2123 .features[FEAT_7_0_EBX] = 2124 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2125 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2126 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2127 CPUID_7_0_EBX_RTM, 2128 .features[FEAT_XSAVE] = 2129 CPUID_XSAVE_XSAVEOPT, 2130 .features[FEAT_6_EAX] = 2131 CPUID_6_EAX_ARAT, 2132 .xlevel = 0x80000008, 2133 .model_id = "Intel Core Processor (Haswell)", 2134 }, 2135 { 2136 .name = "Haswell-IBRS", 2137 .level = 0xd, 2138 .vendor = CPUID_VENDOR_INTEL, 2139 .family = 6, 2140 .model = 60, 2141 .stepping = 4, 2142 .features[FEAT_1_EDX] = 2143 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2144 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2145 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2146 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2147 CPUID_DE | CPUID_FP87, 2148 .features[FEAT_1_ECX] = 2149 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2150 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2151 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2152 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2153 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2154 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2155 .features[FEAT_8000_0001_EDX] = 2156 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2157 CPUID_EXT2_SYSCALL, 2158 .features[FEAT_8000_0001_ECX] = 2159 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2160 .features[FEAT_7_0_EDX] = 2161 CPUID_7_0_EDX_SPEC_CTRL, 2162 .features[FEAT_7_0_EBX] = 2163 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2164 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2165 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2166 CPUID_7_0_EBX_RTM, 2167 .features[FEAT_XSAVE] = 2168 CPUID_XSAVE_XSAVEOPT, 2169 .features[FEAT_6_EAX] = 2170 CPUID_6_EAX_ARAT, 2171 .xlevel = 0x80000008, 2172 .model_id = "Intel Core Processor (Haswell, IBRS)", 2173 }, 2174 { 2175 .name = "Broadwell-noTSX", 2176 .level = 0xd, 2177 .vendor = CPUID_VENDOR_INTEL, 2178 .family = 6, 2179 .model = 61, 2180 .stepping = 2, 2181 .features[FEAT_1_EDX] = 2182 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2183 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2184 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2185 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2186 CPUID_DE | CPUID_FP87, 2187 .features[FEAT_1_ECX] = 2188 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2189 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2190 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2191 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2192 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2193 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2194 .features[FEAT_8000_0001_EDX] = 2195 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2196 CPUID_EXT2_SYSCALL, 2197 .features[FEAT_8000_0001_ECX] = 2198 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2199 .features[FEAT_7_0_EBX] = 2200 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2201 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2202 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2203 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2204 CPUID_7_0_EBX_SMAP, 2205 .features[FEAT_XSAVE] = 2206 CPUID_XSAVE_XSAVEOPT, 2207 .features[FEAT_6_EAX] = 2208 CPUID_6_EAX_ARAT, 2209 .xlevel = 0x80000008, 2210 .model_id = "Intel Core Processor (Broadwell, no TSX)", 2211 }, 2212 { 2213 .name = "Broadwell-noTSX-IBRS", 2214 .level = 0xd, 2215 .vendor = CPUID_VENDOR_INTEL, 2216 .family = 6, 2217 .model = 61, 2218 .stepping = 2, 2219 .features[FEAT_1_EDX] = 2220 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2221 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2222 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2223 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2224 CPUID_DE | CPUID_FP87, 2225 .features[FEAT_1_ECX] = 2226 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2227 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2228 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2229 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2230 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2231 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2232 .features[FEAT_8000_0001_EDX] = 2233 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2234 CPUID_EXT2_SYSCALL, 2235 .features[FEAT_8000_0001_ECX] = 2236 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2237 .features[FEAT_7_0_EDX] = 2238 CPUID_7_0_EDX_SPEC_CTRL, 2239 .features[FEAT_7_0_EBX] = 2240 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2241 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2242 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2243 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2244 CPUID_7_0_EBX_SMAP, 2245 .features[FEAT_XSAVE] = 2246 CPUID_XSAVE_XSAVEOPT, 2247 .features[FEAT_6_EAX] = 2248 CPUID_6_EAX_ARAT, 2249 .xlevel = 0x80000008, 2250 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)", 2251 }, 2252 { 2253 .name = "Broadwell", 2254 .level = 0xd, 2255 .vendor = CPUID_VENDOR_INTEL, 2256 .family = 6, 2257 .model = 61, 2258 .stepping = 2, 2259 .features[FEAT_1_EDX] = 2260 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2261 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2262 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2263 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2264 CPUID_DE | CPUID_FP87, 2265 .features[FEAT_1_ECX] = 2266 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2267 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2268 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2269 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2270 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2271 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2272 .features[FEAT_8000_0001_EDX] = 2273 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2274 CPUID_EXT2_SYSCALL, 2275 .features[FEAT_8000_0001_ECX] = 2276 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2277 .features[FEAT_7_0_EBX] = 2278 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2279 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2280 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2281 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2282 CPUID_7_0_EBX_SMAP, 2283 .features[FEAT_XSAVE] = 2284 CPUID_XSAVE_XSAVEOPT, 2285 .features[FEAT_6_EAX] = 2286 CPUID_6_EAX_ARAT, 2287 .xlevel = 0x80000008, 2288 .model_id = "Intel Core Processor (Broadwell)", 2289 }, 2290 { 2291 .name = "Broadwell-IBRS", 2292 .level = 0xd, 2293 .vendor = CPUID_VENDOR_INTEL, 2294 .family = 6, 2295 .model = 61, 2296 .stepping = 2, 2297 .features[FEAT_1_EDX] = 2298 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2299 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2300 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2301 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2302 CPUID_DE | CPUID_FP87, 2303 .features[FEAT_1_ECX] = 2304 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2305 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2306 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2307 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2308 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2309 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2310 .features[FEAT_8000_0001_EDX] = 2311 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2312 CPUID_EXT2_SYSCALL, 2313 .features[FEAT_8000_0001_ECX] = 2314 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2315 .features[FEAT_7_0_EDX] = 2316 CPUID_7_0_EDX_SPEC_CTRL, 2317 .features[FEAT_7_0_EBX] = 2318 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2319 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2320 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2321 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2322 CPUID_7_0_EBX_SMAP, 2323 .features[FEAT_XSAVE] = 2324 CPUID_XSAVE_XSAVEOPT, 2325 .features[FEAT_6_EAX] = 2326 CPUID_6_EAX_ARAT, 2327 .xlevel = 0x80000008, 2328 .model_id = "Intel Core Processor (Broadwell, IBRS)", 2329 }, 2330 { 2331 .name = "Skylake-Client", 2332 .level = 0xd, 2333 .vendor = CPUID_VENDOR_INTEL, 2334 .family = 6, 2335 .model = 94, 2336 .stepping = 3, 2337 .features[FEAT_1_EDX] = 2338 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2339 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2340 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2341 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2342 CPUID_DE | CPUID_FP87, 2343 .features[FEAT_1_ECX] = 2344 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2345 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2346 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2347 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2348 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2349 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2350 .features[FEAT_8000_0001_EDX] = 2351 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2352 CPUID_EXT2_SYSCALL, 2353 .features[FEAT_8000_0001_ECX] = 2354 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2355 .features[FEAT_7_0_EBX] = 2356 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2357 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2358 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2359 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2360 CPUID_7_0_EBX_SMAP, 2361 /* Missing: XSAVES (not supported by some Linux versions, 2362 * including v4.1 to v4.12). 2363 * KVM doesn't yet expose any XSAVES state save component, 2364 * and the only one defined in Skylake (processor tracing) 2365 * probably will block migration anyway. 2366 */ 2367 .features[FEAT_XSAVE] = 2368 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2369 CPUID_XSAVE_XGETBV1, 2370 .features[FEAT_6_EAX] = 2371 CPUID_6_EAX_ARAT, 2372 .xlevel = 0x80000008, 2373 .model_id = "Intel Core Processor (Skylake)", 2374 }, 2375 { 2376 .name = "Skylake-Client-IBRS", 2377 .level = 0xd, 2378 .vendor = CPUID_VENDOR_INTEL, 2379 .family = 6, 2380 .model = 94, 2381 .stepping = 3, 2382 .features[FEAT_1_EDX] = 2383 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2384 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2385 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2386 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2387 CPUID_DE | CPUID_FP87, 2388 .features[FEAT_1_ECX] = 2389 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2390 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2391 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2392 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2393 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2394 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2395 .features[FEAT_8000_0001_EDX] = 2396 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2397 CPUID_EXT2_SYSCALL, 2398 .features[FEAT_8000_0001_ECX] = 2399 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2400 .features[FEAT_7_0_EDX] = 2401 CPUID_7_0_EDX_SPEC_CTRL, 2402 .features[FEAT_7_0_EBX] = 2403 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2404 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2405 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2406 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2407 CPUID_7_0_EBX_SMAP, 2408 /* Missing: XSAVES (not supported by some Linux versions, 2409 * including v4.1 to v4.12). 2410 * KVM doesn't yet expose any XSAVES state save component, 2411 * and the only one defined in Skylake (processor tracing) 2412 * probably will block migration anyway. 2413 */ 2414 .features[FEAT_XSAVE] = 2415 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2416 CPUID_XSAVE_XGETBV1, 2417 .features[FEAT_6_EAX] = 2418 CPUID_6_EAX_ARAT, 2419 .xlevel = 0x80000008, 2420 .model_id = "Intel Core Processor (Skylake, IBRS)", 2421 }, 2422 { 2423 .name = "Skylake-Server", 2424 .level = 0xd, 2425 .vendor = CPUID_VENDOR_INTEL, 2426 .family = 6, 2427 .model = 85, 2428 .stepping = 4, 2429 .features[FEAT_1_EDX] = 2430 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2431 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2432 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2433 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2434 CPUID_DE | CPUID_FP87, 2435 .features[FEAT_1_ECX] = 2436 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2437 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2438 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2439 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2440 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2441 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2442 .features[FEAT_8000_0001_EDX] = 2443 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2444 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2445 .features[FEAT_8000_0001_ECX] = 2446 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2447 .features[FEAT_7_0_EBX] = 2448 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2449 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2450 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2451 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2452 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2453 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2454 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2455 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2456 .features[FEAT_7_0_ECX] = 2457 CPUID_7_0_ECX_PKU, 2458 /* Missing: XSAVES (not supported by some Linux versions, 2459 * including v4.1 to v4.12). 2460 * KVM doesn't yet expose any XSAVES state save component, 2461 * and the only one defined in Skylake (processor tracing) 2462 * probably will block migration anyway. 2463 */ 2464 .features[FEAT_XSAVE] = 2465 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2466 CPUID_XSAVE_XGETBV1, 2467 .features[FEAT_6_EAX] = 2468 CPUID_6_EAX_ARAT, 2469 .xlevel = 0x80000008, 2470 .model_id = "Intel Xeon Processor (Skylake)", 2471 }, 2472 { 2473 .name = "Skylake-Server-IBRS", 2474 .level = 0xd, 2475 .vendor = CPUID_VENDOR_INTEL, 2476 .family = 6, 2477 .model = 85, 2478 .stepping = 4, 2479 .features[FEAT_1_EDX] = 2480 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2481 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2482 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2483 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2484 CPUID_DE | CPUID_FP87, 2485 .features[FEAT_1_ECX] = 2486 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2487 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2488 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2489 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2490 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2491 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2492 .features[FEAT_8000_0001_EDX] = 2493 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2494 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2495 .features[FEAT_8000_0001_ECX] = 2496 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2497 .features[FEAT_7_0_EDX] = 2498 CPUID_7_0_EDX_SPEC_CTRL, 2499 .features[FEAT_7_0_EBX] = 2500 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2501 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2502 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2503 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2504 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2505 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2506 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2507 CPUID_7_0_EBX_AVX512VL, 2508 .features[FEAT_7_0_ECX] = 2509 CPUID_7_0_ECX_PKU, 2510 /* Missing: XSAVES (not supported by some Linux versions, 2511 * including v4.1 to v4.12). 2512 * KVM doesn't yet expose any XSAVES state save component, 2513 * and the only one defined in Skylake (processor tracing) 2514 * probably will block migration anyway. 2515 */ 2516 .features[FEAT_XSAVE] = 2517 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2518 CPUID_XSAVE_XGETBV1, 2519 .features[FEAT_6_EAX] = 2520 CPUID_6_EAX_ARAT, 2521 .xlevel = 0x80000008, 2522 .model_id = "Intel Xeon Processor (Skylake, IBRS)", 2523 }, 2524 { 2525 .name = "Cascadelake-Server", 2526 .level = 0xd, 2527 .vendor = CPUID_VENDOR_INTEL, 2528 .family = 6, 2529 .model = 85, 2530 .stepping = 6, 2531 .features[FEAT_1_EDX] = 2532 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2533 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2534 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2535 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2536 CPUID_DE | CPUID_FP87, 2537 .features[FEAT_1_ECX] = 2538 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2539 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2540 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2541 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2542 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2543 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2544 .features[FEAT_8000_0001_EDX] = 2545 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2546 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2547 .features[FEAT_8000_0001_ECX] = 2548 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2549 .features[FEAT_7_0_EBX] = 2550 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2551 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2552 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2553 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2554 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2555 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2556 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2557 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2558 .features[FEAT_7_0_ECX] = 2559 CPUID_7_0_ECX_PKU | 2560 CPUID_7_0_ECX_AVX512VNNI, 2561 .features[FEAT_7_0_EDX] = 2562 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2563 /* Missing: XSAVES (not supported by some Linux versions, 2564 * including v4.1 to v4.12). 2565 * KVM doesn't yet expose any XSAVES state save component, 2566 * and the only one defined in Skylake (processor tracing) 2567 * probably will block migration anyway. 2568 */ 2569 .features[FEAT_XSAVE] = 2570 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2571 CPUID_XSAVE_XGETBV1, 2572 .features[FEAT_6_EAX] = 2573 CPUID_6_EAX_ARAT, 2574 .xlevel = 0x80000008, 2575 .model_id = "Intel Xeon Processor (Cascadelake)", 2576 }, 2577 { 2578 .name = "Icelake-Client", 2579 .level = 0xd, 2580 .vendor = CPUID_VENDOR_INTEL, 2581 .family = 6, 2582 .model = 126, 2583 .stepping = 0, 2584 .features[FEAT_1_EDX] = 2585 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2586 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2587 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2588 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2589 CPUID_DE | CPUID_FP87, 2590 .features[FEAT_1_ECX] = 2591 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2592 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2593 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2594 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2595 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2596 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2597 .features[FEAT_8000_0001_EDX] = 2598 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2599 CPUID_EXT2_SYSCALL, 2600 .features[FEAT_8000_0001_ECX] = 2601 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2602 .features[FEAT_8000_0008_EBX] = 2603 CPUID_8000_0008_EBX_WBNOINVD, 2604 .features[FEAT_7_0_EBX] = 2605 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2606 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2607 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2608 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2609 CPUID_7_0_EBX_SMAP, 2610 .features[FEAT_7_0_ECX] = 2611 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 2612 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI | 2613 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 2614 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 2615 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 2616 .features[FEAT_7_0_EDX] = 2617 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2618 /* Missing: XSAVES (not supported by some Linux versions, 2619 * including v4.1 to v4.12). 2620 * KVM doesn't yet expose any XSAVES state save component, 2621 * and the only one defined in Skylake (processor tracing) 2622 * probably will block migration anyway. 2623 */ 2624 .features[FEAT_XSAVE] = 2625 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2626 CPUID_XSAVE_XGETBV1, 2627 .features[FEAT_6_EAX] = 2628 CPUID_6_EAX_ARAT, 2629 .xlevel = 0x80000008, 2630 .model_id = "Intel Core Processor (Icelake)", 2631 }, 2632 { 2633 .name = "Icelake-Server", 2634 .level = 0xd, 2635 .vendor = CPUID_VENDOR_INTEL, 2636 .family = 6, 2637 .model = 134, 2638 .stepping = 0, 2639 .features[FEAT_1_EDX] = 2640 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2641 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2642 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2643 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2644 CPUID_DE | CPUID_FP87, 2645 .features[FEAT_1_ECX] = 2646 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2647 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2648 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2649 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2650 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2651 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2652 .features[FEAT_8000_0001_EDX] = 2653 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2654 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2655 .features[FEAT_8000_0001_ECX] = 2656 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2657 .features[FEAT_8000_0008_EBX] = 2658 CPUID_8000_0008_EBX_WBNOINVD, 2659 .features[FEAT_7_0_EBX] = 2660 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2661 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2662 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2663 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2664 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2665 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2666 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2667 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2668 .features[FEAT_7_0_ECX] = 2669 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 2670 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI | 2671 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 2672 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 2673 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 2674 .features[FEAT_7_0_EDX] = 2675 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2676 /* Missing: XSAVES (not supported by some Linux versions, 2677 * including v4.1 to v4.12). 2678 * KVM doesn't yet expose any XSAVES state save component, 2679 * and the only one defined in Skylake (processor tracing) 2680 * probably will block migration anyway. 2681 */ 2682 .features[FEAT_XSAVE] = 2683 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2684 CPUID_XSAVE_XGETBV1, 2685 .features[FEAT_6_EAX] = 2686 CPUID_6_EAX_ARAT, 2687 .xlevel = 0x80000008, 2688 .model_id = "Intel Xeon Processor (Icelake)", 2689 }, 2690 { 2691 .name = "KnightsMill", 2692 .level = 0xd, 2693 .vendor = CPUID_VENDOR_INTEL, 2694 .family = 6, 2695 .model = 133, 2696 .stepping = 0, 2697 .features[FEAT_1_EDX] = 2698 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 2699 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 2700 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 2701 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 2702 CPUID_PSE | CPUID_DE | CPUID_FP87, 2703 .features[FEAT_1_ECX] = 2704 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2705 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2706 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2707 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2708 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2709 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2710 .features[FEAT_8000_0001_EDX] = 2711 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2712 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2713 .features[FEAT_8000_0001_ECX] = 2714 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2715 .features[FEAT_7_0_EBX] = 2716 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2717 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 2718 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 2719 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 2720 CPUID_7_0_EBX_AVX512ER, 2721 .features[FEAT_7_0_ECX] = 2722 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 2723 .features[FEAT_7_0_EDX] = 2724 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 2725 .features[FEAT_XSAVE] = 2726 CPUID_XSAVE_XSAVEOPT, 2727 .features[FEAT_6_EAX] = 2728 CPUID_6_EAX_ARAT, 2729 .xlevel = 0x80000008, 2730 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 2731 }, 2732 { 2733 .name = "Opteron_G1", 2734 .level = 5, 2735 .vendor = CPUID_VENDOR_AMD, 2736 .family = 15, 2737 .model = 6, 2738 .stepping = 1, 2739 .features[FEAT_1_EDX] = 2740 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2741 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2742 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2743 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2744 CPUID_DE | CPUID_FP87, 2745 .features[FEAT_1_ECX] = 2746 CPUID_EXT_SSE3, 2747 .features[FEAT_8000_0001_EDX] = 2748 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2749 .xlevel = 0x80000008, 2750 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 2751 }, 2752 { 2753 .name = "Opteron_G2", 2754 .level = 5, 2755 .vendor = CPUID_VENDOR_AMD, 2756 .family = 15, 2757 .model = 6, 2758 .stepping = 1, 2759 .features[FEAT_1_EDX] = 2760 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2761 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2762 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2763 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2764 CPUID_DE | CPUID_FP87, 2765 .features[FEAT_1_ECX] = 2766 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 2767 .features[FEAT_8000_0001_EDX] = 2768 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2769 .features[FEAT_8000_0001_ECX] = 2770 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2771 .xlevel = 0x80000008, 2772 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 2773 }, 2774 { 2775 .name = "Opteron_G3", 2776 .level = 5, 2777 .vendor = CPUID_VENDOR_AMD, 2778 .family = 16, 2779 .model = 2, 2780 .stepping = 3, 2781 .features[FEAT_1_EDX] = 2782 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2783 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2784 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2785 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2786 CPUID_DE | CPUID_FP87, 2787 .features[FEAT_1_ECX] = 2788 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 2789 CPUID_EXT_SSE3, 2790 .features[FEAT_8000_0001_EDX] = 2791 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 2792 CPUID_EXT2_RDTSCP, 2793 .features[FEAT_8000_0001_ECX] = 2794 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 2795 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2796 .xlevel = 0x80000008, 2797 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 2798 }, 2799 { 2800 .name = "Opteron_G4", 2801 .level = 0xd, 2802 .vendor = CPUID_VENDOR_AMD, 2803 .family = 21, 2804 .model = 1, 2805 .stepping = 2, 2806 .features[FEAT_1_EDX] = 2807 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2808 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2809 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2810 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2811 CPUID_DE | CPUID_FP87, 2812 .features[FEAT_1_ECX] = 2813 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2814 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2815 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2816 CPUID_EXT_SSE3, 2817 .features[FEAT_8000_0001_EDX] = 2818 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2819 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 2820 .features[FEAT_8000_0001_ECX] = 2821 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2822 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2823 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2824 CPUID_EXT3_LAHF_LM, 2825 .features[FEAT_SVM] = 2826 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2827 /* no xsaveopt! */ 2828 .xlevel = 0x8000001A, 2829 .model_id = "AMD Opteron 62xx class CPU", 2830 }, 2831 { 2832 .name = "Opteron_G5", 2833 .level = 0xd, 2834 .vendor = CPUID_VENDOR_AMD, 2835 .family = 21, 2836 .model = 2, 2837 .stepping = 0, 2838 .features[FEAT_1_EDX] = 2839 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2840 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2841 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2842 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2843 CPUID_DE | CPUID_FP87, 2844 .features[FEAT_1_ECX] = 2845 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 2846 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2847 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 2848 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2849 .features[FEAT_8000_0001_EDX] = 2850 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2851 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 2852 .features[FEAT_8000_0001_ECX] = 2853 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2854 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2855 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2856 CPUID_EXT3_LAHF_LM, 2857 .features[FEAT_SVM] = 2858 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2859 /* no xsaveopt! */ 2860 .xlevel = 0x8000001A, 2861 .model_id = "AMD Opteron 63xx class CPU", 2862 }, 2863 { 2864 .name = "EPYC", 2865 .level = 0xd, 2866 .vendor = CPUID_VENDOR_AMD, 2867 .family = 23, 2868 .model = 1, 2869 .stepping = 2, 2870 .features[FEAT_1_EDX] = 2871 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2872 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2873 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2874 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2875 CPUID_VME | CPUID_FP87, 2876 .features[FEAT_1_ECX] = 2877 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2878 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 2879 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2880 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2881 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2882 .features[FEAT_8000_0001_EDX] = 2883 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2884 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2885 CPUID_EXT2_SYSCALL, 2886 .features[FEAT_8000_0001_ECX] = 2887 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2888 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2889 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2890 CPUID_EXT3_TOPOEXT, 2891 .features[FEAT_7_0_EBX] = 2892 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2893 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2894 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2895 CPUID_7_0_EBX_SHA_NI, 2896 /* Missing: XSAVES (not supported by some Linux versions, 2897 * including v4.1 to v4.12). 2898 * KVM doesn't yet expose any XSAVES state save component. 2899 */ 2900 .features[FEAT_XSAVE] = 2901 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2902 CPUID_XSAVE_XGETBV1, 2903 .features[FEAT_6_EAX] = 2904 CPUID_6_EAX_ARAT, 2905 .features[FEAT_SVM] = 2906 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2907 .xlevel = 0x8000001E, 2908 .model_id = "AMD EPYC Processor", 2909 .cache_info = &epyc_cache_info, 2910 }, 2911 { 2912 .name = "EPYC-IBPB", 2913 .level = 0xd, 2914 .vendor = CPUID_VENDOR_AMD, 2915 .family = 23, 2916 .model = 1, 2917 .stepping = 2, 2918 .features[FEAT_1_EDX] = 2919 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2920 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2921 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2922 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2923 CPUID_VME | CPUID_FP87, 2924 .features[FEAT_1_ECX] = 2925 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2926 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 2927 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2928 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2929 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2930 .features[FEAT_8000_0001_EDX] = 2931 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2932 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2933 CPUID_EXT2_SYSCALL, 2934 .features[FEAT_8000_0001_ECX] = 2935 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2936 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2937 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2938 CPUID_EXT3_TOPOEXT, 2939 .features[FEAT_8000_0008_EBX] = 2940 CPUID_8000_0008_EBX_IBPB, 2941 .features[FEAT_7_0_EBX] = 2942 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2943 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2944 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2945 CPUID_7_0_EBX_SHA_NI, 2946 /* Missing: XSAVES (not supported by some Linux versions, 2947 * including v4.1 to v4.12). 2948 * KVM doesn't yet expose any XSAVES state save component. 2949 */ 2950 .features[FEAT_XSAVE] = 2951 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2952 CPUID_XSAVE_XGETBV1, 2953 .features[FEAT_6_EAX] = 2954 CPUID_6_EAX_ARAT, 2955 .features[FEAT_SVM] = 2956 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2957 .xlevel = 0x8000001E, 2958 .model_id = "AMD EPYC Processor (with IBPB)", 2959 .cache_info = &epyc_cache_info, 2960 }, 2961 { 2962 .name = "Dhyana", 2963 .level = 0xd, 2964 .vendor = CPUID_VENDOR_HYGON, 2965 .family = 24, 2966 .model = 0, 2967 .stepping = 1, 2968 .features[FEAT_1_EDX] = 2969 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2970 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2971 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2972 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2973 CPUID_VME | CPUID_FP87, 2974 .features[FEAT_1_ECX] = 2975 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2976 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 2977 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2978 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2979 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 2980 .features[FEAT_8000_0001_EDX] = 2981 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2982 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2983 CPUID_EXT2_SYSCALL, 2984 .features[FEAT_8000_0001_ECX] = 2985 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2986 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2987 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2988 CPUID_EXT3_TOPOEXT, 2989 .features[FEAT_8000_0008_EBX] = 2990 CPUID_8000_0008_EBX_IBPB, 2991 .features[FEAT_7_0_EBX] = 2992 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2993 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2994 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 2995 /* 2996 * Missing: XSAVES (not supported by some Linux versions, 2997 * including v4.1 to v4.12). 2998 * KVM doesn't yet expose any XSAVES state save component. 2999 */ 3000 .features[FEAT_XSAVE] = 3001 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3002 CPUID_XSAVE_XGETBV1, 3003 .features[FEAT_6_EAX] = 3004 CPUID_6_EAX_ARAT, 3005 .features[FEAT_SVM] = 3006 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3007 .xlevel = 0x8000001E, 3008 .model_id = "Hygon Dhyana Processor", 3009 .cache_info = &epyc_cache_info, 3010 }, 3011 }; 3012 3013 typedef struct PropValue { 3014 const char *prop, *value; 3015 } PropValue; 3016 3017 /* KVM-specific features that are automatically added/removed 3018 * from all CPU models when KVM is enabled. 3019 */ 3020 static PropValue kvm_default_props[] = { 3021 { "kvmclock", "on" }, 3022 { "kvm-nopiodelay", "on" }, 3023 { "kvm-asyncpf", "on" }, 3024 { "kvm-steal-time", "on" }, 3025 { "kvm-pv-eoi", "on" }, 3026 { "kvmclock-stable-bit", "on" }, 3027 { "x2apic", "on" }, 3028 { "acpi", "off" }, 3029 { "monitor", "off" }, 3030 { "svm", "off" }, 3031 { NULL, NULL }, 3032 }; 3033 3034 /* TCG-specific defaults that override all CPU models when using TCG 3035 */ 3036 static PropValue tcg_default_props[] = { 3037 { "vme", "off" }, 3038 { NULL, NULL }, 3039 }; 3040 3041 3042 void x86_cpu_change_kvm_default(const char *prop, const char *value) 3043 { 3044 PropValue *pv; 3045 for (pv = kvm_default_props; pv->prop; pv++) { 3046 if (!strcmp(pv->prop, prop)) { 3047 pv->value = value; 3048 break; 3049 } 3050 } 3051 3052 /* It is valid to call this function only for properties that 3053 * are already present in the kvm_default_props table. 3054 */ 3055 assert(pv->prop); 3056 } 3057 3058 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 3059 bool migratable_only); 3060 3061 static bool lmce_supported(void) 3062 { 3063 uint64_t mce_cap = 0; 3064 3065 #ifdef CONFIG_KVM 3066 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 3067 return false; 3068 } 3069 #endif 3070 3071 return !!(mce_cap & MCG_LMCE_P); 3072 } 3073 3074 #define CPUID_MODEL_ID_SZ 48 3075 3076 /** 3077 * cpu_x86_fill_model_id: 3078 * Get CPUID model ID string from host CPU. 3079 * 3080 * @str should have at least CPUID_MODEL_ID_SZ bytes 3081 * 3082 * The function does NOT add a null terminator to the string 3083 * automatically. 3084 */ 3085 static int cpu_x86_fill_model_id(char *str) 3086 { 3087 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 3088 int i; 3089 3090 for (i = 0; i < 3; i++) { 3091 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 3092 memcpy(str + i * 16 + 0, &eax, 4); 3093 memcpy(str + i * 16 + 4, &ebx, 4); 3094 memcpy(str + i * 16 + 8, &ecx, 4); 3095 memcpy(str + i * 16 + 12, &edx, 4); 3096 } 3097 return 0; 3098 } 3099 3100 static Property max_x86_cpu_properties[] = { 3101 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 3102 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 3103 DEFINE_PROP_END_OF_LIST() 3104 }; 3105 3106 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 3107 { 3108 DeviceClass *dc = DEVICE_CLASS(oc); 3109 X86CPUClass *xcc = X86_CPU_CLASS(oc); 3110 3111 xcc->ordering = 9; 3112 3113 xcc->model_description = 3114 "Enables all features supported by the accelerator in the current host"; 3115 3116 dc->props = max_x86_cpu_properties; 3117 } 3118 3119 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp); 3120 3121 static void max_x86_cpu_initfn(Object *obj) 3122 { 3123 X86CPU *cpu = X86_CPU(obj); 3124 CPUX86State *env = &cpu->env; 3125 KVMState *s = kvm_state; 3126 3127 /* We can't fill the features array here because we don't know yet if 3128 * "migratable" is true or false. 3129 */ 3130 cpu->max_features = true; 3131 3132 if (accel_uses_host_cpuid()) { 3133 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 3134 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 3135 int family, model, stepping; 3136 X86CPUDefinition host_cpudef = { }; 3137 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 3138 3139 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 3140 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx); 3141 3142 host_vendor_fms(vendor, &family, &model, &stepping); 3143 3144 cpu_x86_fill_model_id(model_id); 3145 3146 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort); 3147 object_property_set_int(OBJECT(cpu), family, "family", &error_abort); 3148 object_property_set_int(OBJECT(cpu), model, "model", &error_abort); 3149 object_property_set_int(OBJECT(cpu), stepping, "stepping", 3150 &error_abort); 3151 object_property_set_str(OBJECT(cpu), model_id, "model-id", 3152 &error_abort); 3153 3154 if (kvm_enabled()) { 3155 env->cpuid_min_level = 3156 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 3157 env->cpuid_min_xlevel = 3158 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 3159 env->cpuid_min_xlevel2 = 3160 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 3161 } else { 3162 env->cpuid_min_level = 3163 hvf_get_supported_cpuid(0x0, 0, R_EAX); 3164 env->cpuid_min_xlevel = 3165 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 3166 env->cpuid_min_xlevel2 = 3167 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 3168 } 3169 3170 if (lmce_supported()) { 3171 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 3172 } 3173 } else { 3174 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 3175 "vendor", &error_abort); 3176 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 3177 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 3178 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 3179 object_property_set_str(OBJECT(cpu), 3180 "QEMU TCG CPU version " QEMU_HW_VERSION, 3181 "model-id", &error_abort); 3182 } 3183 3184 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 3185 } 3186 3187 static const TypeInfo max_x86_cpu_type_info = { 3188 .name = X86_CPU_TYPE_NAME("max"), 3189 .parent = TYPE_X86_CPU, 3190 .instance_init = max_x86_cpu_initfn, 3191 .class_init = max_x86_cpu_class_init, 3192 }; 3193 3194 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 3195 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 3196 { 3197 X86CPUClass *xcc = X86_CPU_CLASS(oc); 3198 3199 xcc->host_cpuid_required = true; 3200 xcc->ordering = 8; 3201 3202 #if defined(CONFIG_KVM) 3203 xcc->model_description = 3204 "KVM processor with all supported host features "; 3205 #elif defined(CONFIG_HVF) 3206 xcc->model_description = 3207 "HVF processor with all supported host features "; 3208 #endif 3209 } 3210 3211 static const TypeInfo host_x86_cpu_type_info = { 3212 .name = X86_CPU_TYPE_NAME("host"), 3213 .parent = X86_CPU_TYPE_NAME("max"), 3214 .class_init = host_x86_cpu_class_init, 3215 }; 3216 3217 #endif 3218 3219 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 3220 { 3221 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 3222 3223 switch (f->type) { 3224 case CPUID_FEATURE_WORD: 3225 { 3226 const char *reg = get_register_name_32(f->cpuid.reg); 3227 assert(reg); 3228 return g_strdup_printf("CPUID.%02XH:%s", 3229 f->cpuid.eax, reg); 3230 } 3231 case MSR_FEATURE_WORD: 3232 return g_strdup_printf("MSR(%02XH)", 3233 f->msr.index); 3234 } 3235 3236 return NULL; 3237 } 3238 3239 static void report_unavailable_features(FeatureWord w, uint32_t mask) 3240 { 3241 FeatureWordInfo *f = &feature_word_info[w]; 3242 int i; 3243 char *feat_word_str; 3244 3245 for (i = 0; i < 32; ++i) { 3246 if ((1UL << i) & mask) { 3247 feat_word_str = feature_word_description(f, i); 3248 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]", 3249 accel_uses_host_cpuid() ? "host" : "TCG", 3250 feat_word_str, 3251 f->feat_names[i] ? "." : "", 3252 f->feat_names[i] ? f->feat_names[i] : "", i); 3253 g_free(feat_word_str); 3254 } 3255 } 3256 } 3257 3258 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 3259 const char *name, void *opaque, 3260 Error **errp) 3261 { 3262 X86CPU *cpu = X86_CPU(obj); 3263 CPUX86State *env = &cpu->env; 3264 int64_t value; 3265 3266 value = (env->cpuid_version >> 8) & 0xf; 3267 if (value == 0xf) { 3268 value += (env->cpuid_version >> 20) & 0xff; 3269 } 3270 visit_type_int(v, name, &value, errp); 3271 } 3272 3273 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 3274 const char *name, void *opaque, 3275 Error **errp) 3276 { 3277 X86CPU *cpu = X86_CPU(obj); 3278 CPUX86State *env = &cpu->env; 3279 const int64_t min = 0; 3280 const int64_t max = 0xff + 0xf; 3281 Error *local_err = NULL; 3282 int64_t value; 3283 3284 visit_type_int(v, name, &value, &local_err); 3285 if (local_err) { 3286 error_propagate(errp, local_err); 3287 return; 3288 } 3289 if (value < min || value > max) { 3290 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3291 name ? name : "null", value, min, max); 3292 return; 3293 } 3294 3295 env->cpuid_version &= ~0xff00f00; 3296 if (value > 0x0f) { 3297 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 3298 } else { 3299 env->cpuid_version |= value << 8; 3300 } 3301 } 3302 3303 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 3304 const char *name, void *opaque, 3305 Error **errp) 3306 { 3307 X86CPU *cpu = X86_CPU(obj); 3308 CPUX86State *env = &cpu->env; 3309 int64_t value; 3310 3311 value = (env->cpuid_version >> 4) & 0xf; 3312 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 3313 visit_type_int(v, name, &value, errp); 3314 } 3315 3316 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 3317 const char *name, void *opaque, 3318 Error **errp) 3319 { 3320 X86CPU *cpu = X86_CPU(obj); 3321 CPUX86State *env = &cpu->env; 3322 const int64_t min = 0; 3323 const int64_t max = 0xff; 3324 Error *local_err = NULL; 3325 int64_t value; 3326 3327 visit_type_int(v, name, &value, &local_err); 3328 if (local_err) { 3329 error_propagate(errp, local_err); 3330 return; 3331 } 3332 if (value < min || value > max) { 3333 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3334 name ? name : "null", value, min, max); 3335 return; 3336 } 3337 3338 env->cpuid_version &= ~0xf00f0; 3339 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 3340 } 3341 3342 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 3343 const char *name, void *opaque, 3344 Error **errp) 3345 { 3346 X86CPU *cpu = X86_CPU(obj); 3347 CPUX86State *env = &cpu->env; 3348 int64_t value; 3349 3350 value = env->cpuid_version & 0xf; 3351 visit_type_int(v, name, &value, errp); 3352 } 3353 3354 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 3355 const char *name, void *opaque, 3356 Error **errp) 3357 { 3358 X86CPU *cpu = X86_CPU(obj); 3359 CPUX86State *env = &cpu->env; 3360 const int64_t min = 0; 3361 const int64_t max = 0xf; 3362 Error *local_err = NULL; 3363 int64_t value; 3364 3365 visit_type_int(v, name, &value, &local_err); 3366 if (local_err) { 3367 error_propagate(errp, local_err); 3368 return; 3369 } 3370 if (value < min || value > max) { 3371 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3372 name ? name : "null", value, min, max); 3373 return; 3374 } 3375 3376 env->cpuid_version &= ~0xf; 3377 env->cpuid_version |= value & 0xf; 3378 } 3379 3380 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 3381 { 3382 X86CPU *cpu = X86_CPU(obj); 3383 CPUX86State *env = &cpu->env; 3384 char *value; 3385 3386 value = g_malloc(CPUID_VENDOR_SZ + 1); 3387 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 3388 env->cpuid_vendor3); 3389 return value; 3390 } 3391 3392 static void x86_cpuid_set_vendor(Object *obj, const char *value, 3393 Error **errp) 3394 { 3395 X86CPU *cpu = X86_CPU(obj); 3396 CPUX86State *env = &cpu->env; 3397 int i; 3398 3399 if (strlen(value) != CPUID_VENDOR_SZ) { 3400 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 3401 return; 3402 } 3403 3404 env->cpuid_vendor1 = 0; 3405 env->cpuid_vendor2 = 0; 3406 env->cpuid_vendor3 = 0; 3407 for (i = 0; i < 4; i++) { 3408 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 3409 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 3410 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 3411 } 3412 } 3413 3414 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 3415 { 3416 X86CPU *cpu = X86_CPU(obj); 3417 CPUX86State *env = &cpu->env; 3418 char *value; 3419 int i; 3420 3421 value = g_malloc(48 + 1); 3422 for (i = 0; i < 48; i++) { 3423 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 3424 } 3425 value[48] = '\0'; 3426 return value; 3427 } 3428 3429 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 3430 Error **errp) 3431 { 3432 X86CPU *cpu = X86_CPU(obj); 3433 CPUX86State *env = &cpu->env; 3434 int c, len, i; 3435 3436 if (model_id == NULL) { 3437 model_id = ""; 3438 } 3439 len = strlen(model_id); 3440 memset(env->cpuid_model, 0, 48); 3441 for (i = 0; i < 48; i++) { 3442 if (i >= len) { 3443 c = '\0'; 3444 } else { 3445 c = (uint8_t)model_id[i]; 3446 } 3447 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 3448 } 3449 } 3450 3451 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 3452 void *opaque, Error **errp) 3453 { 3454 X86CPU *cpu = X86_CPU(obj); 3455 int64_t value; 3456 3457 value = cpu->env.tsc_khz * 1000; 3458 visit_type_int(v, name, &value, errp); 3459 } 3460 3461 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 3462 void *opaque, Error **errp) 3463 { 3464 X86CPU *cpu = X86_CPU(obj); 3465 const int64_t min = 0; 3466 const int64_t max = INT64_MAX; 3467 Error *local_err = NULL; 3468 int64_t value; 3469 3470 visit_type_int(v, name, &value, &local_err); 3471 if (local_err) { 3472 error_propagate(errp, local_err); 3473 return; 3474 } 3475 if (value < min || value > max) { 3476 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3477 name ? name : "null", value, min, max); 3478 return; 3479 } 3480 3481 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 3482 } 3483 3484 /* Generic getter for "feature-words" and "filtered-features" properties */ 3485 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 3486 const char *name, void *opaque, 3487 Error **errp) 3488 { 3489 uint32_t *array = (uint32_t *)opaque; 3490 FeatureWord w; 3491 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 3492 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 3493 X86CPUFeatureWordInfoList *list = NULL; 3494 3495 for (w = 0; w < FEATURE_WORDS; w++) { 3496 FeatureWordInfo *wi = &feature_word_info[w]; 3497 /* 3498 * We didn't have MSR features when "feature-words" was 3499 * introduced. Therefore skipped other type entries. 3500 */ 3501 if (wi->type != CPUID_FEATURE_WORD) { 3502 continue; 3503 } 3504 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 3505 qwi->cpuid_input_eax = wi->cpuid.eax; 3506 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 3507 qwi->cpuid_input_ecx = wi->cpuid.ecx; 3508 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 3509 qwi->features = array[w]; 3510 3511 /* List will be in reverse order, but order shouldn't matter */ 3512 list_entries[w].next = list; 3513 list_entries[w].value = &word_infos[w]; 3514 list = &list_entries[w]; 3515 } 3516 3517 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 3518 } 3519 3520 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name, 3521 void *opaque, Error **errp) 3522 { 3523 X86CPU *cpu = X86_CPU(obj); 3524 int64_t value = cpu->hyperv_spinlock_attempts; 3525 3526 visit_type_int(v, name, &value, errp); 3527 } 3528 3529 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name, 3530 void *opaque, Error **errp) 3531 { 3532 const int64_t min = 0xFFF; 3533 const int64_t max = UINT_MAX; 3534 X86CPU *cpu = X86_CPU(obj); 3535 Error *err = NULL; 3536 int64_t value; 3537 3538 visit_type_int(v, name, &value, &err); 3539 if (err) { 3540 error_propagate(errp, err); 3541 return; 3542 } 3543 3544 if (value < min || value > max) { 3545 error_setg(errp, "Property %s.%s doesn't take value %" PRId64 3546 " (minimum: %" PRId64 ", maximum: %" PRId64 ")", 3547 object_get_typename(obj), name ? name : "null", 3548 value, min, max); 3549 return; 3550 } 3551 cpu->hyperv_spinlock_attempts = value; 3552 } 3553 3554 static const PropertyInfo qdev_prop_spinlocks = { 3555 .name = "int", 3556 .get = x86_get_hv_spinlocks, 3557 .set = x86_set_hv_spinlocks, 3558 }; 3559 3560 /* Convert all '_' in a feature string option name to '-', to make feature 3561 * name conform to QOM property naming rule, which uses '-' instead of '_'. 3562 */ 3563 static inline void feat2prop(char *s) 3564 { 3565 while ((s = strchr(s, '_'))) { 3566 *s = '-'; 3567 } 3568 } 3569 3570 /* Return the feature property name for a feature flag bit */ 3571 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 3572 { 3573 /* XSAVE components are automatically enabled by other features, 3574 * so return the original feature name instead 3575 */ 3576 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 3577 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 3578 3579 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 3580 x86_ext_save_areas[comp].bits) { 3581 w = x86_ext_save_areas[comp].feature; 3582 bitnr = ctz32(x86_ext_save_areas[comp].bits); 3583 } 3584 } 3585 3586 assert(bitnr < 32); 3587 assert(w < FEATURE_WORDS); 3588 return feature_word_info[w].feat_names[bitnr]; 3589 } 3590 3591 /* Compatibily hack to maintain legacy +-feat semantic, 3592 * where +-feat overwrites any feature set by 3593 * feat=on|feat even if the later is parsed after +-feat 3594 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 3595 */ 3596 static GList *plus_features, *minus_features; 3597 3598 static gint compare_string(gconstpointer a, gconstpointer b) 3599 { 3600 return g_strcmp0(a, b); 3601 } 3602 3603 /* Parse "+feature,-feature,feature=foo" CPU feature string 3604 */ 3605 static void x86_cpu_parse_featurestr(const char *typename, char *features, 3606 Error **errp) 3607 { 3608 char *featurestr; /* Single 'key=value" string being parsed */ 3609 static bool cpu_globals_initialized; 3610 bool ambiguous = false; 3611 3612 if (cpu_globals_initialized) { 3613 return; 3614 } 3615 cpu_globals_initialized = true; 3616 3617 if (!features) { 3618 return; 3619 } 3620 3621 for (featurestr = strtok(features, ","); 3622 featurestr; 3623 featurestr = strtok(NULL, ",")) { 3624 const char *name; 3625 const char *val = NULL; 3626 char *eq = NULL; 3627 char num[32]; 3628 GlobalProperty *prop; 3629 3630 /* Compatibility syntax: */ 3631 if (featurestr[0] == '+') { 3632 plus_features = g_list_append(plus_features, 3633 g_strdup(featurestr + 1)); 3634 continue; 3635 } else if (featurestr[0] == '-') { 3636 minus_features = g_list_append(minus_features, 3637 g_strdup(featurestr + 1)); 3638 continue; 3639 } 3640 3641 eq = strchr(featurestr, '='); 3642 if (eq) { 3643 *eq++ = 0; 3644 val = eq; 3645 } else { 3646 val = "on"; 3647 } 3648 3649 feat2prop(featurestr); 3650 name = featurestr; 3651 3652 if (g_list_find_custom(plus_features, name, compare_string)) { 3653 warn_report("Ambiguous CPU model string. " 3654 "Don't mix both \"+%s\" and \"%s=%s\"", 3655 name, name, val); 3656 ambiguous = true; 3657 } 3658 if (g_list_find_custom(minus_features, name, compare_string)) { 3659 warn_report("Ambiguous CPU model string. " 3660 "Don't mix both \"-%s\" and \"%s=%s\"", 3661 name, name, val); 3662 ambiguous = true; 3663 } 3664 3665 /* Special case: */ 3666 if (!strcmp(name, "tsc-freq")) { 3667 int ret; 3668 uint64_t tsc_freq; 3669 3670 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 3671 if (ret < 0 || tsc_freq > INT64_MAX) { 3672 error_setg(errp, "bad numerical value %s", val); 3673 return; 3674 } 3675 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 3676 val = num; 3677 name = "tsc-frequency"; 3678 } 3679 3680 prop = g_new0(typeof(*prop), 1); 3681 prop->driver = typename; 3682 prop->property = g_strdup(name); 3683 prop->value = g_strdup(val); 3684 qdev_prop_register_global(prop); 3685 } 3686 3687 if (ambiguous) { 3688 warn_report("Compatibility of ambiguous CPU model " 3689 "strings won't be kept on future QEMU versions"); 3690 } 3691 } 3692 3693 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 3694 static int x86_cpu_filter_features(X86CPU *cpu); 3695 3696 /* Build a list with the name of all features on a feature word array */ 3697 static void x86_cpu_list_feature_names(FeatureWordArray features, 3698 strList **feat_names) 3699 { 3700 FeatureWord w; 3701 strList **next = feat_names; 3702 3703 for (w = 0; w < FEATURE_WORDS; w++) { 3704 uint32_t filtered = features[w]; 3705 int i; 3706 for (i = 0; i < 32; i++) { 3707 if (filtered & (1UL << i)) { 3708 strList *new = g_new0(strList, 1); 3709 new->value = g_strdup(x86_cpu_feature_name(w, i)); 3710 *next = new; 3711 next = &new->next; 3712 } 3713 } 3714 } 3715 } 3716 3717 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 3718 const char *name, void *opaque, 3719 Error **errp) 3720 { 3721 X86CPU *xc = X86_CPU(obj); 3722 strList *result = NULL; 3723 3724 x86_cpu_list_feature_names(xc->filtered_features, &result); 3725 visit_type_strList(v, "unavailable-features", &result, errp); 3726 } 3727 3728 /* Check for missing features that may prevent the CPU class from 3729 * running using the current machine and accelerator. 3730 */ 3731 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 3732 strList **missing_feats) 3733 { 3734 X86CPU *xc; 3735 Error *err = NULL; 3736 strList **next = missing_feats; 3737 3738 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 3739 strList *new = g_new0(strList, 1); 3740 new->value = g_strdup("kvm"); 3741 *missing_feats = new; 3742 return; 3743 } 3744 3745 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 3746 3747 x86_cpu_expand_features(xc, &err); 3748 if (err) { 3749 /* Errors at x86_cpu_expand_features should never happen, 3750 * but in case it does, just report the model as not 3751 * runnable at all using the "type" property. 3752 */ 3753 strList *new = g_new0(strList, 1); 3754 new->value = g_strdup("type"); 3755 *next = new; 3756 next = &new->next; 3757 } 3758 3759 x86_cpu_filter_features(xc); 3760 3761 x86_cpu_list_feature_names(xc->filtered_features, next); 3762 3763 object_unref(OBJECT(xc)); 3764 } 3765 3766 /* Print all cpuid feature names in featureset 3767 */ 3768 static void listflags(GList *features) 3769 { 3770 size_t len = 0; 3771 GList *tmp; 3772 3773 for (tmp = features; tmp; tmp = tmp->next) { 3774 const char *name = tmp->data; 3775 if ((len + strlen(name) + 1) >= 75) { 3776 qemu_printf("\n"); 3777 len = 0; 3778 } 3779 qemu_printf("%s%s", len == 0 ? " " : " ", name); 3780 len += strlen(name) + 1; 3781 } 3782 qemu_printf("\n"); 3783 } 3784 3785 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 3786 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 3787 { 3788 ObjectClass *class_a = (ObjectClass *)a; 3789 ObjectClass *class_b = (ObjectClass *)b; 3790 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 3791 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 3792 char *name_a, *name_b; 3793 int ret; 3794 3795 if (cc_a->ordering != cc_b->ordering) { 3796 ret = cc_a->ordering - cc_b->ordering; 3797 } else { 3798 name_a = x86_cpu_class_get_model_name(cc_a); 3799 name_b = x86_cpu_class_get_model_name(cc_b); 3800 ret = strcmp(name_a, name_b); 3801 g_free(name_a); 3802 g_free(name_b); 3803 } 3804 return ret; 3805 } 3806 3807 static GSList *get_sorted_cpu_model_list(void) 3808 { 3809 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 3810 list = g_slist_sort(list, x86_cpu_list_compare); 3811 return list; 3812 } 3813 3814 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 3815 { 3816 ObjectClass *oc = data; 3817 X86CPUClass *cc = X86_CPU_CLASS(oc); 3818 char *name = x86_cpu_class_get_model_name(cc); 3819 const char *desc = cc->model_description; 3820 if (!desc && cc->cpu_def) { 3821 desc = cc->cpu_def->model_id; 3822 } 3823 3824 qemu_printf("x86 %-20s %-48s\n", name, desc); 3825 g_free(name); 3826 } 3827 3828 /* list available CPU models and flags */ 3829 void x86_cpu_list(void) 3830 { 3831 int i, j; 3832 GSList *list; 3833 GList *names = NULL; 3834 3835 qemu_printf("Available CPUs:\n"); 3836 list = get_sorted_cpu_model_list(); 3837 g_slist_foreach(list, x86_cpu_list_entry, NULL); 3838 g_slist_free(list); 3839 3840 names = NULL; 3841 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 3842 FeatureWordInfo *fw = &feature_word_info[i]; 3843 for (j = 0; j < 32; j++) { 3844 if (fw->feat_names[j]) { 3845 names = g_list_append(names, (gpointer)fw->feat_names[j]); 3846 } 3847 } 3848 } 3849 3850 names = g_list_sort(names, (GCompareFunc)strcmp); 3851 3852 qemu_printf("\nRecognized CPUID flags:\n"); 3853 listflags(names); 3854 qemu_printf("\n"); 3855 g_list_free(names); 3856 } 3857 3858 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 3859 { 3860 ObjectClass *oc = data; 3861 X86CPUClass *cc = X86_CPU_CLASS(oc); 3862 CpuDefinitionInfoList **cpu_list = user_data; 3863 CpuDefinitionInfoList *entry; 3864 CpuDefinitionInfo *info; 3865 3866 info = g_malloc0(sizeof(*info)); 3867 info->name = x86_cpu_class_get_model_name(cc); 3868 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 3869 info->has_unavailable_features = true; 3870 info->q_typename = g_strdup(object_class_get_name(oc)); 3871 info->migration_safe = cc->migration_safe; 3872 info->has_migration_safe = true; 3873 info->q_static = cc->static_model; 3874 3875 entry = g_malloc0(sizeof(*entry)); 3876 entry->value = info; 3877 entry->next = *cpu_list; 3878 *cpu_list = entry; 3879 } 3880 3881 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 3882 { 3883 CpuDefinitionInfoList *cpu_list = NULL; 3884 GSList *list = get_sorted_cpu_model_list(); 3885 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 3886 g_slist_free(list); 3887 return cpu_list; 3888 } 3889 3890 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 3891 bool migratable_only) 3892 { 3893 FeatureWordInfo *wi = &feature_word_info[w]; 3894 uint32_t r = 0; 3895 3896 if (kvm_enabled()) { 3897 switch (wi->type) { 3898 case CPUID_FEATURE_WORD: 3899 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 3900 wi->cpuid.ecx, 3901 wi->cpuid.reg); 3902 break; 3903 case MSR_FEATURE_WORD: 3904 r = kvm_arch_get_supported_msr_feature(kvm_state, 3905 wi->msr.index); 3906 break; 3907 } 3908 } else if (hvf_enabled()) { 3909 if (wi->type != CPUID_FEATURE_WORD) { 3910 return 0; 3911 } 3912 r = hvf_get_supported_cpuid(wi->cpuid.eax, 3913 wi->cpuid.ecx, 3914 wi->cpuid.reg); 3915 } else if (tcg_enabled()) { 3916 r = wi->tcg_features; 3917 } else { 3918 return ~0; 3919 } 3920 if (migratable_only) { 3921 r &= x86_cpu_get_migratable_flags(w); 3922 } 3923 return r; 3924 } 3925 3926 static void x86_cpu_report_filtered_features(X86CPU *cpu) 3927 { 3928 FeatureWord w; 3929 3930 for (w = 0; w < FEATURE_WORDS; w++) { 3931 report_unavailable_features(w, cpu->filtered_features[w]); 3932 } 3933 } 3934 3935 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 3936 { 3937 PropValue *pv; 3938 for (pv = props; pv->prop; pv++) { 3939 if (!pv->value) { 3940 continue; 3941 } 3942 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 3943 &error_abort); 3944 } 3945 } 3946 3947 /* Load data from X86CPUDefinition into a X86CPU object 3948 */ 3949 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp) 3950 { 3951 CPUX86State *env = &cpu->env; 3952 const char *vendor; 3953 char host_vendor[CPUID_VENDOR_SZ + 1]; 3954 FeatureWord w; 3955 3956 /*NOTE: any property set by this function should be returned by 3957 * x86_cpu_static_props(), so static expansion of 3958 * query-cpu-model-expansion is always complete. 3959 */ 3960 3961 /* CPU models only set _minimum_ values for level/xlevel: */ 3962 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp); 3963 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp); 3964 3965 object_property_set_int(OBJECT(cpu), def->family, "family", errp); 3966 object_property_set_int(OBJECT(cpu), def->model, "model", errp); 3967 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp); 3968 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); 3969 for (w = 0; w < FEATURE_WORDS; w++) { 3970 env->features[w] = def->features[w]; 3971 } 3972 3973 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 3974 cpu->legacy_cache = !def->cache_info; 3975 3976 /* Special cases not set in the X86CPUDefinition structs: */ 3977 /* TODO: in-kernel irqchip for hvf */ 3978 if (kvm_enabled()) { 3979 if (!kvm_irqchip_in_kernel()) { 3980 x86_cpu_change_kvm_default("x2apic", "off"); 3981 } 3982 3983 x86_cpu_apply_props(cpu, kvm_default_props); 3984 } else if (tcg_enabled()) { 3985 x86_cpu_apply_props(cpu, tcg_default_props); 3986 } 3987 3988 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 3989 3990 /* sysenter isn't supported in compatibility mode on AMD, 3991 * syscall isn't supported in compatibility mode on Intel. 3992 * Normally we advertise the actual CPU vendor, but you can 3993 * override this using the 'vendor' property if you want to use 3994 * KVM's sysenter/syscall emulation in compatibility mode and 3995 * when doing cross vendor migration 3996 */ 3997 vendor = def->vendor; 3998 if (accel_uses_host_cpuid()) { 3999 uint32_t ebx = 0, ecx = 0, edx = 0; 4000 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 4001 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 4002 vendor = host_vendor; 4003 } 4004 4005 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp); 4006 4007 } 4008 4009 #ifndef CONFIG_USER_ONLY 4010 /* Return a QDict containing keys for all properties that can be included 4011 * in static expansion of CPU models. All properties set by x86_cpu_load_def() 4012 * must be included in the dictionary. 4013 */ 4014 static QDict *x86_cpu_static_props(void) 4015 { 4016 FeatureWord w; 4017 int i; 4018 static const char *props[] = { 4019 "min-level", 4020 "min-xlevel", 4021 "family", 4022 "model", 4023 "stepping", 4024 "model-id", 4025 "vendor", 4026 "lmce", 4027 NULL, 4028 }; 4029 static QDict *d; 4030 4031 if (d) { 4032 return d; 4033 } 4034 4035 d = qdict_new(); 4036 for (i = 0; props[i]; i++) { 4037 qdict_put_null(d, props[i]); 4038 } 4039 4040 for (w = 0; w < FEATURE_WORDS; w++) { 4041 FeatureWordInfo *fi = &feature_word_info[w]; 4042 int bit; 4043 for (bit = 0; bit < 32; bit++) { 4044 if (!fi->feat_names[bit]) { 4045 continue; 4046 } 4047 qdict_put_null(d, fi->feat_names[bit]); 4048 } 4049 } 4050 4051 return d; 4052 } 4053 4054 /* Add an entry to @props dict, with the value for property. */ 4055 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 4056 { 4057 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 4058 &error_abort); 4059 4060 qdict_put_obj(props, prop, value); 4061 } 4062 4063 /* Convert CPU model data from X86CPU object to a property dictionary 4064 * that can recreate exactly the same CPU model. 4065 */ 4066 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 4067 { 4068 QDict *sprops = x86_cpu_static_props(); 4069 const QDictEntry *e; 4070 4071 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 4072 const char *prop = qdict_entry_key(e); 4073 x86_cpu_expand_prop(cpu, props, prop); 4074 } 4075 } 4076 4077 /* Convert CPU model data from X86CPU object to a property dictionary 4078 * that can recreate exactly the same CPU model, including every 4079 * writeable QOM property. 4080 */ 4081 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 4082 { 4083 ObjectPropertyIterator iter; 4084 ObjectProperty *prop; 4085 4086 object_property_iter_init(&iter, OBJECT(cpu)); 4087 while ((prop = object_property_iter_next(&iter))) { 4088 /* skip read-only or write-only properties */ 4089 if (!prop->get || !prop->set) { 4090 continue; 4091 } 4092 4093 /* "hotplugged" is the only property that is configurable 4094 * on the command-line but will be set differently on CPUs 4095 * created using "-cpu ... -smp ..." and by CPUs created 4096 * on the fly by x86_cpu_from_model() for querying. Skip it. 4097 */ 4098 if (!strcmp(prop->name, "hotplugged")) { 4099 continue; 4100 } 4101 x86_cpu_expand_prop(cpu, props, prop->name); 4102 } 4103 } 4104 4105 static void object_apply_props(Object *obj, QDict *props, Error **errp) 4106 { 4107 const QDictEntry *prop; 4108 Error *err = NULL; 4109 4110 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 4111 object_property_set_qobject(obj, qdict_entry_value(prop), 4112 qdict_entry_key(prop), &err); 4113 if (err) { 4114 break; 4115 } 4116 } 4117 4118 error_propagate(errp, err); 4119 } 4120 4121 /* Create X86CPU object according to model+props specification */ 4122 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 4123 { 4124 X86CPU *xc = NULL; 4125 X86CPUClass *xcc; 4126 Error *err = NULL; 4127 4128 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 4129 if (xcc == NULL) { 4130 error_setg(&err, "CPU model '%s' not found", model); 4131 goto out; 4132 } 4133 4134 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 4135 if (props) { 4136 object_apply_props(OBJECT(xc), props, &err); 4137 if (err) { 4138 goto out; 4139 } 4140 } 4141 4142 x86_cpu_expand_features(xc, &err); 4143 if (err) { 4144 goto out; 4145 } 4146 4147 out: 4148 if (err) { 4149 error_propagate(errp, err); 4150 object_unref(OBJECT(xc)); 4151 xc = NULL; 4152 } 4153 return xc; 4154 } 4155 4156 CpuModelExpansionInfo * 4157 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 4158 CpuModelInfo *model, 4159 Error **errp) 4160 { 4161 X86CPU *xc = NULL; 4162 Error *err = NULL; 4163 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 4164 QDict *props = NULL; 4165 const char *base_name; 4166 4167 xc = x86_cpu_from_model(model->name, 4168 model->has_props ? 4169 qobject_to(QDict, model->props) : 4170 NULL, &err); 4171 if (err) { 4172 goto out; 4173 } 4174 4175 props = qdict_new(); 4176 ret->model = g_new0(CpuModelInfo, 1); 4177 ret->model->props = QOBJECT(props); 4178 ret->model->has_props = true; 4179 4180 switch (type) { 4181 case CPU_MODEL_EXPANSION_TYPE_STATIC: 4182 /* Static expansion will be based on "base" only */ 4183 base_name = "base"; 4184 x86_cpu_to_dict(xc, props); 4185 break; 4186 case CPU_MODEL_EXPANSION_TYPE_FULL: 4187 /* As we don't return every single property, full expansion needs 4188 * to keep the original model name+props, and add extra 4189 * properties on top of that. 4190 */ 4191 base_name = model->name; 4192 x86_cpu_to_dict_full(xc, props); 4193 break; 4194 default: 4195 error_setg(&err, "Unsupported expansion type"); 4196 goto out; 4197 } 4198 4199 x86_cpu_to_dict(xc, props); 4200 4201 ret->model->name = g_strdup(base_name); 4202 4203 out: 4204 object_unref(OBJECT(xc)); 4205 if (err) { 4206 error_propagate(errp, err); 4207 qapi_free_CpuModelExpansionInfo(ret); 4208 ret = NULL; 4209 } 4210 return ret; 4211 } 4212 #endif /* !CONFIG_USER_ONLY */ 4213 4214 static gchar *x86_gdb_arch_name(CPUState *cs) 4215 { 4216 #ifdef TARGET_X86_64 4217 return g_strdup("i386:x86-64"); 4218 #else 4219 return g_strdup("i386"); 4220 #endif 4221 } 4222 4223 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 4224 { 4225 X86CPUDefinition *cpudef = data; 4226 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4227 4228 xcc->cpu_def = cpudef; 4229 xcc->migration_safe = true; 4230 } 4231 4232 static void x86_register_cpudef_type(X86CPUDefinition *def) 4233 { 4234 char *typename = x86_cpu_type_name(def->name); 4235 TypeInfo ti = { 4236 .name = typename, 4237 .parent = TYPE_X86_CPU, 4238 .class_init = x86_cpu_cpudef_class_init, 4239 .class_data = def, 4240 }; 4241 4242 /* AMD aliases are handled at runtime based on CPUID vendor, so 4243 * they shouldn't be set on the CPU model table. 4244 */ 4245 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 4246 /* catch mistakes instead of silently truncating model_id when too long */ 4247 assert(def->model_id && strlen(def->model_id) <= 48); 4248 4249 4250 type_register(&ti); 4251 g_free(typename); 4252 } 4253 4254 #if !defined(CONFIG_USER_ONLY) 4255 4256 void cpu_clear_apic_feature(CPUX86State *env) 4257 { 4258 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 4259 } 4260 4261 #endif /* !CONFIG_USER_ONLY */ 4262 4263 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 4264 uint32_t *eax, uint32_t *ebx, 4265 uint32_t *ecx, uint32_t *edx) 4266 { 4267 X86CPU *cpu = env_archcpu(env); 4268 CPUState *cs = env_cpu(env); 4269 uint32_t pkg_offset; 4270 uint32_t limit; 4271 uint32_t signature[3]; 4272 4273 /* Calculate & apply limits for different index ranges */ 4274 if (index >= 0xC0000000) { 4275 limit = env->cpuid_xlevel2; 4276 } else if (index >= 0x80000000) { 4277 limit = env->cpuid_xlevel; 4278 } else if (index >= 0x40000000) { 4279 limit = 0x40000001; 4280 } else { 4281 limit = env->cpuid_level; 4282 } 4283 4284 if (index > limit) { 4285 /* Intel documentation states that invalid EAX input will 4286 * return the same information as EAX=cpuid_level 4287 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 4288 */ 4289 index = env->cpuid_level; 4290 } 4291 4292 switch(index) { 4293 case 0: 4294 *eax = env->cpuid_level; 4295 *ebx = env->cpuid_vendor1; 4296 *edx = env->cpuid_vendor2; 4297 *ecx = env->cpuid_vendor3; 4298 break; 4299 case 1: 4300 *eax = env->cpuid_version; 4301 *ebx = (cpu->apic_id << 24) | 4302 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 4303 *ecx = env->features[FEAT_1_ECX]; 4304 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 4305 *ecx |= CPUID_EXT_OSXSAVE; 4306 } 4307 *edx = env->features[FEAT_1_EDX]; 4308 if (cs->nr_cores * cs->nr_threads > 1) { 4309 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 4310 *edx |= CPUID_HT; 4311 } 4312 break; 4313 case 2: 4314 /* cache info: needed for Pentium Pro compatibility */ 4315 if (cpu->cache_info_passthrough) { 4316 host_cpuid(index, 0, eax, ebx, ecx, edx); 4317 break; 4318 } 4319 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 4320 *ebx = 0; 4321 if (!cpu->enable_l3_cache) { 4322 *ecx = 0; 4323 } else { 4324 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 4325 } 4326 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 4327 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 4328 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 4329 break; 4330 case 4: 4331 /* cache info: needed for Core compatibility */ 4332 if (cpu->cache_info_passthrough) { 4333 host_cpuid(index, count, eax, ebx, ecx, edx); 4334 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 4335 *eax &= ~0xFC000000; 4336 if ((*eax & 31) && cs->nr_cores > 1) { 4337 *eax |= (cs->nr_cores - 1) << 26; 4338 } 4339 } else { 4340 *eax = 0; 4341 switch (count) { 4342 case 0: /* L1 dcache info */ 4343 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 4344 1, cs->nr_cores, 4345 eax, ebx, ecx, edx); 4346 break; 4347 case 1: /* L1 icache info */ 4348 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 4349 1, cs->nr_cores, 4350 eax, ebx, ecx, edx); 4351 break; 4352 case 2: /* L2 cache info */ 4353 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 4354 cs->nr_threads, cs->nr_cores, 4355 eax, ebx, ecx, edx); 4356 break; 4357 case 3: /* L3 cache info */ 4358 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 4359 if (cpu->enable_l3_cache) { 4360 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 4361 (1 << pkg_offset), cs->nr_cores, 4362 eax, ebx, ecx, edx); 4363 break; 4364 } 4365 /* fall through */ 4366 default: /* end of info */ 4367 *eax = *ebx = *ecx = *edx = 0; 4368 break; 4369 } 4370 } 4371 break; 4372 case 5: 4373 /* MONITOR/MWAIT Leaf */ 4374 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 4375 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 4376 *ecx = cpu->mwait.ecx; /* flags */ 4377 *edx = cpu->mwait.edx; /* mwait substates */ 4378 break; 4379 case 6: 4380 /* Thermal and Power Leaf */ 4381 *eax = env->features[FEAT_6_EAX]; 4382 *ebx = 0; 4383 *ecx = 0; 4384 *edx = 0; 4385 break; 4386 case 7: 4387 /* Structured Extended Feature Flags Enumeration Leaf */ 4388 if (count == 0) { 4389 *eax = 0; /* Maximum ECX value for sub-leaves */ 4390 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 4391 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 4392 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 4393 *ecx |= CPUID_7_0_ECX_OSPKE; 4394 } 4395 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 4396 } else { 4397 *eax = 0; 4398 *ebx = 0; 4399 *ecx = 0; 4400 *edx = 0; 4401 } 4402 break; 4403 case 9: 4404 /* Direct Cache Access Information Leaf */ 4405 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 4406 *ebx = 0; 4407 *ecx = 0; 4408 *edx = 0; 4409 break; 4410 case 0xA: 4411 /* Architectural Performance Monitoring Leaf */ 4412 if (kvm_enabled() && cpu->enable_pmu) { 4413 KVMState *s = cs->kvm_state; 4414 4415 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 4416 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 4417 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 4418 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 4419 } else if (hvf_enabled() && cpu->enable_pmu) { 4420 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 4421 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 4422 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 4423 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 4424 } else { 4425 *eax = 0; 4426 *ebx = 0; 4427 *ecx = 0; 4428 *edx = 0; 4429 } 4430 break; 4431 case 0xB: 4432 /* Extended Topology Enumeration Leaf */ 4433 if (!cpu->enable_cpuid_0xb) { 4434 *eax = *ebx = *ecx = *edx = 0; 4435 break; 4436 } 4437 4438 *ecx = count & 0xff; 4439 *edx = cpu->apic_id; 4440 4441 switch (count) { 4442 case 0: 4443 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads); 4444 *ebx = cs->nr_threads; 4445 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 4446 break; 4447 case 1: 4448 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 4449 *ebx = cs->nr_cores * cs->nr_threads; 4450 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 4451 break; 4452 default: 4453 *eax = 0; 4454 *ebx = 0; 4455 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 4456 } 4457 4458 assert(!(*eax & ~0x1f)); 4459 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 4460 break; 4461 case 0xD: { 4462 /* Processor Extended State */ 4463 *eax = 0; 4464 *ebx = 0; 4465 *ecx = 0; 4466 *edx = 0; 4467 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 4468 break; 4469 } 4470 4471 if (count == 0) { 4472 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 4473 *eax = env->features[FEAT_XSAVE_COMP_LO]; 4474 *edx = env->features[FEAT_XSAVE_COMP_HI]; 4475 *ebx = xsave_area_size(env->xcr0); 4476 } else if (count == 1) { 4477 *eax = env->features[FEAT_XSAVE]; 4478 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 4479 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 4480 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 4481 *eax = esa->size; 4482 *ebx = esa->offset; 4483 } 4484 } 4485 break; 4486 } 4487 case 0x14: { 4488 /* Intel Processor Trace Enumeration */ 4489 *eax = 0; 4490 *ebx = 0; 4491 *ecx = 0; 4492 *edx = 0; 4493 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 4494 !kvm_enabled()) { 4495 break; 4496 } 4497 4498 if (count == 0) { 4499 *eax = INTEL_PT_MAX_SUBLEAF; 4500 *ebx = INTEL_PT_MINIMAL_EBX; 4501 *ecx = INTEL_PT_MINIMAL_ECX; 4502 } else if (count == 1) { 4503 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 4504 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 4505 } 4506 break; 4507 } 4508 case 0x40000000: 4509 /* 4510 * CPUID code in kvm_arch_init_vcpu() ignores stuff 4511 * set here, but we restrict to TCG none the less. 4512 */ 4513 if (tcg_enabled() && cpu->expose_tcg) { 4514 memcpy(signature, "TCGTCGTCGTCG", 12); 4515 *eax = 0x40000001; 4516 *ebx = signature[0]; 4517 *ecx = signature[1]; 4518 *edx = signature[2]; 4519 } else { 4520 *eax = 0; 4521 *ebx = 0; 4522 *ecx = 0; 4523 *edx = 0; 4524 } 4525 break; 4526 case 0x40000001: 4527 *eax = 0; 4528 *ebx = 0; 4529 *ecx = 0; 4530 *edx = 0; 4531 break; 4532 case 0x80000000: 4533 *eax = env->cpuid_xlevel; 4534 *ebx = env->cpuid_vendor1; 4535 *edx = env->cpuid_vendor2; 4536 *ecx = env->cpuid_vendor3; 4537 break; 4538 case 0x80000001: 4539 *eax = env->cpuid_version; 4540 *ebx = 0; 4541 *ecx = env->features[FEAT_8000_0001_ECX]; 4542 *edx = env->features[FEAT_8000_0001_EDX]; 4543 4544 /* The Linux kernel checks for the CMPLegacy bit and 4545 * discards multiple thread information if it is set. 4546 * So don't set it here for Intel to make Linux guests happy. 4547 */ 4548 if (cs->nr_cores * cs->nr_threads > 1) { 4549 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 4550 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 4551 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 4552 *ecx |= 1 << 1; /* CmpLegacy bit */ 4553 } 4554 } 4555 break; 4556 case 0x80000002: 4557 case 0x80000003: 4558 case 0x80000004: 4559 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 4560 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 4561 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 4562 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 4563 break; 4564 case 0x80000005: 4565 /* cache info (L1 cache) */ 4566 if (cpu->cache_info_passthrough) { 4567 host_cpuid(index, 0, eax, ebx, ecx, edx); 4568 break; 4569 } 4570 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ 4571 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 4572 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ 4573 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 4574 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 4575 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 4576 break; 4577 case 0x80000006: 4578 /* cache info (L2 cache) */ 4579 if (cpu->cache_info_passthrough) { 4580 host_cpuid(index, 0, eax, ebx, ecx, edx); 4581 break; 4582 } 4583 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ 4584 (L2_DTLB_2M_ENTRIES << 16) | \ 4585 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ 4586 (L2_ITLB_2M_ENTRIES); 4587 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ 4588 (L2_DTLB_4K_ENTRIES << 16) | \ 4589 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ 4590 (L2_ITLB_4K_ENTRIES); 4591 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 4592 cpu->enable_l3_cache ? 4593 env->cache_info_amd.l3_cache : NULL, 4594 ecx, edx); 4595 break; 4596 case 0x80000007: 4597 *eax = 0; 4598 *ebx = 0; 4599 *ecx = 0; 4600 *edx = env->features[FEAT_8000_0007_EDX]; 4601 break; 4602 case 0x80000008: 4603 /* virtual & phys address size in low 2 bytes. */ 4604 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 4605 /* 64 bit processor */ 4606 *eax = cpu->phys_bits; /* configurable physical bits */ 4607 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 4608 *eax |= 0x00003900; /* 57 bits virtual */ 4609 } else { 4610 *eax |= 0x00003000; /* 48 bits virtual */ 4611 } 4612 } else { 4613 *eax = cpu->phys_bits; 4614 } 4615 *ebx = env->features[FEAT_8000_0008_EBX]; 4616 *ecx = 0; 4617 *edx = 0; 4618 if (cs->nr_cores * cs->nr_threads > 1) { 4619 *ecx |= (cs->nr_cores * cs->nr_threads) - 1; 4620 } 4621 break; 4622 case 0x8000000A: 4623 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 4624 *eax = 0x00000001; /* SVM Revision */ 4625 *ebx = 0x00000010; /* nr of ASIDs */ 4626 *ecx = 0; 4627 *edx = env->features[FEAT_SVM]; /* optional features */ 4628 } else { 4629 *eax = 0; 4630 *ebx = 0; 4631 *ecx = 0; 4632 *edx = 0; 4633 } 4634 break; 4635 case 0x8000001D: 4636 *eax = 0; 4637 if (cpu->cache_info_passthrough) { 4638 host_cpuid(index, count, eax, ebx, ecx, edx); 4639 break; 4640 } 4641 switch (count) { 4642 case 0: /* L1 dcache info */ 4643 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs, 4644 eax, ebx, ecx, edx); 4645 break; 4646 case 1: /* L1 icache info */ 4647 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs, 4648 eax, ebx, ecx, edx); 4649 break; 4650 case 2: /* L2 cache info */ 4651 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs, 4652 eax, ebx, ecx, edx); 4653 break; 4654 case 3: /* L3 cache info */ 4655 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs, 4656 eax, ebx, ecx, edx); 4657 break; 4658 default: /* end of info */ 4659 *eax = *ebx = *ecx = *edx = 0; 4660 break; 4661 } 4662 break; 4663 case 0x8000001E: 4664 assert(cpu->core_id <= 255); 4665 encode_topo_cpuid8000001e(cs, cpu, 4666 eax, ebx, ecx, edx); 4667 break; 4668 case 0xC0000000: 4669 *eax = env->cpuid_xlevel2; 4670 *ebx = 0; 4671 *ecx = 0; 4672 *edx = 0; 4673 break; 4674 case 0xC0000001: 4675 /* Support for VIA CPU's CPUID instruction */ 4676 *eax = env->cpuid_version; 4677 *ebx = 0; 4678 *ecx = 0; 4679 *edx = env->features[FEAT_C000_0001_EDX]; 4680 break; 4681 case 0xC0000002: 4682 case 0xC0000003: 4683 case 0xC0000004: 4684 /* Reserved for the future, and now filled with zero */ 4685 *eax = 0; 4686 *ebx = 0; 4687 *ecx = 0; 4688 *edx = 0; 4689 break; 4690 case 0x8000001F: 4691 *eax = sev_enabled() ? 0x2 : 0; 4692 *ebx = sev_get_cbit_position(); 4693 *ebx |= sev_get_reduced_phys_bits() << 6; 4694 *ecx = 0; 4695 *edx = 0; 4696 break; 4697 default: 4698 /* reserved values: zero */ 4699 *eax = 0; 4700 *ebx = 0; 4701 *ecx = 0; 4702 *edx = 0; 4703 break; 4704 } 4705 } 4706 4707 /* CPUClass::reset() */ 4708 static void x86_cpu_reset(CPUState *s) 4709 { 4710 X86CPU *cpu = X86_CPU(s); 4711 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 4712 CPUX86State *env = &cpu->env; 4713 target_ulong cr4; 4714 uint64_t xcr0; 4715 int i; 4716 4717 xcc->parent_reset(s); 4718 4719 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 4720 4721 env->old_exception = -1; 4722 4723 /* init to reset state */ 4724 4725 env->hflags2 |= HF2_GIF_MASK; 4726 4727 cpu_x86_update_cr0(env, 0x60000010); 4728 env->a20_mask = ~0x0; 4729 env->smbase = 0x30000; 4730 env->msr_smi_count = 0; 4731 4732 env->idt.limit = 0xffff; 4733 env->gdt.limit = 0xffff; 4734 env->ldt.limit = 0xffff; 4735 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 4736 env->tr.limit = 0xffff; 4737 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 4738 4739 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 4740 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 4741 DESC_R_MASK | DESC_A_MASK); 4742 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 4743 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4744 DESC_A_MASK); 4745 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 4746 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4747 DESC_A_MASK); 4748 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 4749 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4750 DESC_A_MASK); 4751 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 4752 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4753 DESC_A_MASK); 4754 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 4755 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4756 DESC_A_MASK); 4757 4758 env->eip = 0xfff0; 4759 env->regs[R_EDX] = env->cpuid_version; 4760 4761 env->eflags = 0x2; 4762 4763 /* FPU init */ 4764 for (i = 0; i < 8; i++) { 4765 env->fptags[i] = 1; 4766 } 4767 cpu_set_fpuc(env, 0x37f); 4768 4769 env->mxcsr = 0x1f80; 4770 /* All units are in INIT state. */ 4771 env->xstate_bv = 0; 4772 4773 env->pat = 0x0007040600070406ULL; 4774 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 4775 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 4776 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 4777 } 4778 4779 memset(env->dr, 0, sizeof(env->dr)); 4780 env->dr[6] = DR6_FIXED_1; 4781 env->dr[7] = DR7_FIXED_1; 4782 cpu_breakpoint_remove_all(s, BP_CPU); 4783 cpu_watchpoint_remove_all(s, BP_CPU); 4784 4785 cr4 = 0; 4786 xcr0 = XSTATE_FP_MASK; 4787 4788 #ifdef CONFIG_USER_ONLY 4789 /* Enable all the features for user-mode. */ 4790 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 4791 xcr0 |= XSTATE_SSE_MASK; 4792 } 4793 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 4794 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 4795 if (env->features[esa->feature] & esa->bits) { 4796 xcr0 |= 1ull << i; 4797 } 4798 } 4799 4800 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 4801 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 4802 } 4803 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 4804 cr4 |= CR4_FSGSBASE_MASK; 4805 } 4806 #endif 4807 4808 env->xcr0 = xcr0; 4809 cpu_x86_update_cr4(env, cr4); 4810 4811 /* 4812 * SDM 11.11.5 requires: 4813 * - IA32_MTRR_DEF_TYPE MSR.E = 0 4814 * - IA32_MTRR_PHYSMASKn.V = 0 4815 * All other bits are undefined. For simplification, zero it all. 4816 */ 4817 env->mtrr_deftype = 0; 4818 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 4819 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 4820 4821 env->interrupt_injected = -1; 4822 env->exception_nr = -1; 4823 env->exception_pending = 0; 4824 env->exception_injected = 0; 4825 env->exception_has_payload = false; 4826 env->exception_payload = 0; 4827 env->nmi_injected = false; 4828 #if !defined(CONFIG_USER_ONLY) 4829 /* We hard-wire the BSP to the first CPU. */ 4830 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 4831 4832 s->halted = !cpu_is_bsp(cpu); 4833 4834 if (kvm_enabled()) { 4835 kvm_arch_reset_vcpu(cpu); 4836 } 4837 else if (hvf_enabled()) { 4838 hvf_reset_vcpu(s); 4839 } 4840 #endif 4841 } 4842 4843 #ifndef CONFIG_USER_ONLY 4844 bool cpu_is_bsp(X86CPU *cpu) 4845 { 4846 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 4847 } 4848 4849 /* TODO: remove me, when reset over QOM tree is implemented */ 4850 static void x86_cpu_machine_reset_cb(void *opaque) 4851 { 4852 X86CPU *cpu = opaque; 4853 cpu_reset(CPU(cpu)); 4854 } 4855 #endif 4856 4857 static void mce_init(X86CPU *cpu) 4858 { 4859 CPUX86State *cenv = &cpu->env; 4860 unsigned int bank; 4861 4862 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 4863 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 4864 (CPUID_MCE | CPUID_MCA)) { 4865 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 4866 (cpu->enable_lmce ? MCG_LMCE_P : 0); 4867 cenv->mcg_ctl = ~(uint64_t)0; 4868 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 4869 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 4870 } 4871 } 4872 } 4873 4874 #ifndef CONFIG_USER_ONLY 4875 APICCommonClass *apic_get_class(void) 4876 { 4877 const char *apic_type = "apic"; 4878 4879 /* TODO: in-kernel irqchip for hvf */ 4880 if (kvm_apic_in_kernel()) { 4881 apic_type = "kvm-apic"; 4882 } else if (xen_enabled()) { 4883 apic_type = "xen-apic"; 4884 } 4885 4886 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 4887 } 4888 4889 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 4890 { 4891 APICCommonState *apic; 4892 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 4893 4894 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class))); 4895 4896 object_property_add_child(OBJECT(cpu), "lapic", 4897 OBJECT(cpu->apic_state), &error_abort); 4898 object_unref(OBJECT(cpu->apic_state)); 4899 4900 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 4901 /* TODO: convert to link<> */ 4902 apic = APIC_COMMON(cpu->apic_state); 4903 apic->cpu = cpu; 4904 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 4905 } 4906 4907 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4908 { 4909 APICCommonState *apic; 4910 static bool apic_mmio_map_once; 4911 4912 if (cpu->apic_state == NULL) { 4913 return; 4914 } 4915 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized", 4916 errp); 4917 4918 /* Map APIC MMIO area */ 4919 apic = APIC_COMMON(cpu->apic_state); 4920 if (!apic_mmio_map_once) { 4921 memory_region_add_subregion_overlap(get_system_memory(), 4922 apic->apicbase & 4923 MSR_IA32_APICBASE_BASE, 4924 &apic->io_memory, 4925 0x1000); 4926 apic_mmio_map_once = true; 4927 } 4928 } 4929 4930 static void x86_cpu_machine_done(Notifier *n, void *unused) 4931 { 4932 X86CPU *cpu = container_of(n, X86CPU, machine_done); 4933 MemoryRegion *smram = 4934 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 4935 4936 if (smram) { 4937 cpu->smram = g_new(MemoryRegion, 1); 4938 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 4939 smram, 0, 1ull << 32); 4940 memory_region_set_enabled(cpu->smram, true); 4941 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 4942 } 4943 } 4944 #else 4945 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4946 { 4947 } 4948 #endif 4949 4950 /* Note: Only safe for use on x86(-64) hosts */ 4951 static uint32_t x86_host_phys_bits(void) 4952 { 4953 uint32_t eax; 4954 uint32_t host_phys_bits; 4955 4956 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 4957 if (eax >= 0x80000008) { 4958 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 4959 /* Note: According to AMD doc 25481 rev 2.34 they have a field 4960 * at 23:16 that can specify a maximum physical address bits for 4961 * the guest that can override this value; but I've not seen 4962 * anything with that set. 4963 */ 4964 host_phys_bits = eax & 0xff; 4965 } else { 4966 /* It's an odd 64 bit machine that doesn't have the leaf for 4967 * physical address bits; fall back to 36 that's most older 4968 * Intel. 4969 */ 4970 host_phys_bits = 36; 4971 } 4972 4973 return host_phys_bits; 4974 } 4975 4976 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 4977 { 4978 if (*min < value) { 4979 *min = value; 4980 } 4981 } 4982 4983 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 4984 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 4985 { 4986 CPUX86State *env = &cpu->env; 4987 FeatureWordInfo *fi = &feature_word_info[w]; 4988 uint32_t eax = fi->cpuid.eax; 4989 uint32_t region = eax & 0xF0000000; 4990 4991 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 4992 if (!env->features[w]) { 4993 return; 4994 } 4995 4996 switch (region) { 4997 case 0x00000000: 4998 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 4999 break; 5000 case 0x80000000: 5001 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 5002 break; 5003 case 0xC0000000: 5004 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 5005 break; 5006 } 5007 } 5008 5009 /* Calculate XSAVE components based on the configured CPU feature flags */ 5010 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 5011 { 5012 CPUX86State *env = &cpu->env; 5013 int i; 5014 uint64_t mask; 5015 5016 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 5017 return; 5018 } 5019 5020 mask = 0; 5021 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 5022 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 5023 if (env->features[esa->feature] & esa->bits) { 5024 mask |= (1ULL << i); 5025 } 5026 } 5027 5028 env->features[FEAT_XSAVE_COMP_LO] = mask; 5029 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 5030 } 5031 5032 /***** Steps involved on loading and filtering CPUID data 5033 * 5034 * When initializing and realizing a CPU object, the steps 5035 * involved in setting up CPUID data are: 5036 * 5037 * 1) Loading CPU model definition (X86CPUDefinition). This is 5038 * implemented by x86_cpu_load_def() and should be completely 5039 * transparent, as it is done automatically by instance_init. 5040 * No code should need to look at X86CPUDefinition structs 5041 * outside instance_init. 5042 * 5043 * 2) CPU expansion. This is done by realize before CPUID 5044 * filtering, and will make sure host/accelerator data is 5045 * loaded for CPU models that depend on host capabilities 5046 * (e.g. "host"). Done by x86_cpu_expand_features(). 5047 * 5048 * 3) CPUID filtering. This initializes extra data related to 5049 * CPUID, and checks if the host supports all capabilities 5050 * required by the CPU. Runnability of a CPU model is 5051 * determined at this step. Done by x86_cpu_filter_features(). 5052 * 5053 * Some operations don't require all steps to be performed. 5054 * More precisely: 5055 * 5056 * - CPU instance creation (instance_init) will run only CPU 5057 * model loading. CPU expansion can't run at instance_init-time 5058 * because host/accelerator data may be not available yet. 5059 * - CPU realization will perform both CPU model expansion and CPUID 5060 * filtering, and return an error in case one of them fails. 5061 * - query-cpu-definitions needs to run all 3 steps. It needs 5062 * to run CPUID filtering, as the 'unavailable-features' 5063 * field is set based on the filtering results. 5064 * - The query-cpu-model-expansion QMP command only needs to run 5065 * CPU model loading and CPU expansion. It should not filter 5066 * any CPUID data based on host capabilities. 5067 */ 5068 5069 /* Expand CPU configuration data, based on configured features 5070 * and host/accelerator capabilities when appropriate. 5071 */ 5072 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 5073 { 5074 CPUX86State *env = &cpu->env; 5075 FeatureWord w; 5076 GList *l; 5077 Error *local_err = NULL; 5078 5079 /*TODO: Now cpu->max_features doesn't overwrite features 5080 * set using QOM properties, and we can convert 5081 * plus_features & minus_features to global properties 5082 * inside x86_cpu_parse_featurestr() too. 5083 */ 5084 if (cpu->max_features) { 5085 for (w = 0; w < FEATURE_WORDS; w++) { 5086 /* Override only features that weren't set explicitly 5087 * by the user. 5088 */ 5089 env->features[w] |= 5090 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 5091 ~env->user_features[w] & \ 5092 ~feature_word_info[w].no_autoenable_flags; 5093 } 5094 } 5095 5096 for (l = plus_features; l; l = l->next) { 5097 const char *prop = l->data; 5098 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 5099 if (local_err) { 5100 goto out; 5101 } 5102 } 5103 5104 for (l = minus_features; l; l = l->next) { 5105 const char *prop = l->data; 5106 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 5107 if (local_err) { 5108 goto out; 5109 } 5110 } 5111 5112 if (!kvm_enabled() || !cpu->expose_kvm) { 5113 env->features[FEAT_KVM] = 0; 5114 } 5115 5116 x86_cpu_enable_xsave_components(cpu); 5117 5118 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 5119 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 5120 if (cpu->full_cpuid_auto_level) { 5121 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 5122 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 5123 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 5124 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 5125 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 5126 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 5127 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 5128 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 5129 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 5130 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 5131 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 5132 5133 /* Intel Processor Trace requires CPUID[0x14] */ 5134 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 5135 kvm_enabled() && cpu->intel_pt_auto_level) { 5136 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 5137 } 5138 5139 /* SVM requires CPUID[0x8000000A] */ 5140 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5141 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 5142 } 5143 5144 /* SEV requires CPUID[0x8000001F] */ 5145 if (sev_enabled()) { 5146 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 5147 } 5148 } 5149 5150 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 5151 if (env->cpuid_level == UINT32_MAX) { 5152 env->cpuid_level = env->cpuid_min_level; 5153 } 5154 if (env->cpuid_xlevel == UINT32_MAX) { 5155 env->cpuid_xlevel = env->cpuid_min_xlevel; 5156 } 5157 if (env->cpuid_xlevel2 == UINT32_MAX) { 5158 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 5159 } 5160 5161 out: 5162 if (local_err != NULL) { 5163 error_propagate(errp, local_err); 5164 } 5165 } 5166 5167 /* 5168 * Finishes initialization of CPUID data, filters CPU feature 5169 * words based on host availability of each feature. 5170 * 5171 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 5172 */ 5173 static int x86_cpu_filter_features(X86CPU *cpu) 5174 { 5175 CPUX86State *env = &cpu->env; 5176 FeatureWord w; 5177 int rv = 0; 5178 5179 for (w = 0; w < FEATURE_WORDS; w++) { 5180 uint32_t host_feat = 5181 x86_cpu_get_supported_feature_word(w, false); 5182 uint32_t requested_features = env->features[w]; 5183 env->features[w] &= host_feat; 5184 cpu->filtered_features[w] = requested_features & ~env->features[w]; 5185 if (cpu->filtered_features[w]) { 5186 rv = 1; 5187 } 5188 } 5189 5190 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 5191 kvm_enabled()) { 5192 KVMState *s = CPU(cpu)->kvm_state; 5193 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 5194 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 5195 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 5196 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 5197 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 5198 5199 if (!eax_0 || 5200 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 5201 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 5202 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 5203 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 5204 INTEL_PT_ADDR_RANGES_NUM) || 5205 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 5206 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 5207 (ecx_0 & INTEL_PT_IP_LIP)) { 5208 /* 5209 * Processor Trace capabilities aren't configurable, so if the 5210 * host can't emulate the capabilities we report on 5211 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 5212 */ 5213 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT; 5214 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT; 5215 rv = 1; 5216 } 5217 } 5218 5219 return rv; 5220 } 5221 5222 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 5223 { 5224 CPUState *cs = CPU(dev); 5225 X86CPU *cpu = X86_CPU(dev); 5226 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 5227 CPUX86State *env = &cpu->env; 5228 Error *local_err = NULL; 5229 static bool ht_warned; 5230 5231 if (xcc->host_cpuid_required) { 5232 if (!accel_uses_host_cpuid()) { 5233 char *name = x86_cpu_class_get_model_name(xcc); 5234 error_setg(&local_err, "CPU model '%s' requires KVM", name); 5235 g_free(name); 5236 goto out; 5237 } 5238 5239 if (enable_cpu_pm) { 5240 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 5241 &cpu->mwait.ecx, &cpu->mwait.edx); 5242 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 5243 } 5244 } 5245 5246 /* mwait extended info: needed for Core compatibility */ 5247 /* We always wake on interrupt even if host does not have the capability */ 5248 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 5249 5250 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 5251 error_setg(errp, "apic-id property was not initialized properly"); 5252 return; 5253 } 5254 5255 x86_cpu_expand_features(cpu, &local_err); 5256 if (local_err) { 5257 goto out; 5258 } 5259 5260 if (x86_cpu_filter_features(cpu) && 5261 (cpu->check_cpuid || cpu->enforce_cpuid)) { 5262 x86_cpu_report_filtered_features(cpu); 5263 if (cpu->enforce_cpuid) { 5264 error_setg(&local_err, 5265 accel_uses_host_cpuid() ? 5266 "Host doesn't support requested features" : 5267 "TCG doesn't support requested features"); 5268 goto out; 5269 } 5270 } 5271 5272 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 5273 * CPUID[1].EDX. 5274 */ 5275 if (IS_AMD_CPU(env)) { 5276 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 5277 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 5278 & CPUID_EXT2_AMD_ALIASES); 5279 } 5280 5281 /* For 64bit systems think about the number of physical bits to present. 5282 * ideally this should be the same as the host; anything other than matching 5283 * the host can cause incorrect guest behaviour. 5284 * QEMU used to pick the magic value of 40 bits that corresponds to 5285 * consumer AMD devices but nothing else. 5286 */ 5287 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5288 if (accel_uses_host_cpuid()) { 5289 uint32_t host_phys_bits = x86_host_phys_bits(); 5290 static bool warned; 5291 5292 if (cpu->host_phys_bits) { 5293 /* The user asked for us to use the host physical bits */ 5294 cpu->phys_bits = host_phys_bits; 5295 if (cpu->host_phys_bits_limit && 5296 cpu->phys_bits > cpu->host_phys_bits_limit) { 5297 cpu->phys_bits = cpu->host_phys_bits_limit; 5298 } 5299 } 5300 5301 /* Print a warning if the user set it to a value that's not the 5302 * host value. 5303 */ 5304 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 5305 !warned) { 5306 warn_report("Host physical bits (%u)" 5307 " does not match phys-bits property (%u)", 5308 host_phys_bits, cpu->phys_bits); 5309 warned = true; 5310 } 5311 5312 if (cpu->phys_bits && 5313 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 5314 cpu->phys_bits < 32)) { 5315 error_setg(errp, "phys-bits should be between 32 and %u " 5316 " (but is %u)", 5317 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 5318 return; 5319 } 5320 } else { 5321 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 5322 error_setg(errp, "TCG only supports phys-bits=%u", 5323 TCG_PHYS_ADDR_BITS); 5324 return; 5325 } 5326 } 5327 /* 0 means it was not explicitly set by the user (or by machine 5328 * compat_props or by the host code above). In this case, the default 5329 * is the value used by TCG (40). 5330 */ 5331 if (cpu->phys_bits == 0) { 5332 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 5333 } 5334 } else { 5335 /* For 32 bit systems don't use the user set value, but keep 5336 * phys_bits consistent with what we tell the guest. 5337 */ 5338 if (cpu->phys_bits != 0) { 5339 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 5340 return; 5341 } 5342 5343 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 5344 cpu->phys_bits = 36; 5345 } else { 5346 cpu->phys_bits = 32; 5347 } 5348 } 5349 5350 /* Cache information initialization */ 5351 if (!cpu->legacy_cache) { 5352 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) { 5353 char *name = x86_cpu_class_get_model_name(xcc); 5354 error_setg(errp, 5355 "CPU model '%s' doesn't support legacy-cache=off", name); 5356 g_free(name); 5357 return; 5358 } 5359 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 5360 *xcc->cpu_def->cache_info; 5361 } else { 5362 /* Build legacy cache information */ 5363 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 5364 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 5365 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 5366 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 5367 5368 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 5369 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 5370 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 5371 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 5372 5373 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 5374 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 5375 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 5376 env->cache_info_amd.l3_cache = &legacy_l3_cache; 5377 } 5378 5379 5380 cpu_exec_realizefn(cs, &local_err); 5381 if (local_err != NULL) { 5382 error_propagate(errp, local_err); 5383 return; 5384 } 5385 5386 #ifndef CONFIG_USER_ONLY 5387 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 5388 5389 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) { 5390 x86_cpu_apic_create(cpu, &local_err); 5391 if (local_err != NULL) { 5392 goto out; 5393 } 5394 } 5395 #endif 5396 5397 mce_init(cpu); 5398 5399 #ifndef CONFIG_USER_ONLY 5400 if (tcg_enabled()) { 5401 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 5402 cpu->cpu_as_root = g_new(MemoryRegion, 1); 5403 5404 /* Outer container... */ 5405 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 5406 memory_region_set_enabled(cpu->cpu_as_root, true); 5407 5408 /* ... with two regions inside: normal system memory with low 5409 * priority, and... 5410 */ 5411 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 5412 get_system_memory(), 0, ~0ull); 5413 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 5414 memory_region_set_enabled(cpu->cpu_as_mem, true); 5415 5416 cs->num_ases = 2; 5417 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 5418 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 5419 5420 /* ... SMRAM with higher priority, linked from /machine/smram. */ 5421 cpu->machine_done.notify = x86_cpu_machine_done; 5422 qemu_add_machine_init_done_notifier(&cpu->machine_done); 5423 } 5424 #endif 5425 5426 qemu_init_vcpu(cs); 5427 5428 /* 5429 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 5430 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 5431 * based on inputs (sockets,cores,threads), it is still better to give 5432 * users a warning. 5433 * 5434 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 5435 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 5436 */ 5437 if (IS_AMD_CPU(env) && 5438 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 5439 cs->nr_threads > 1 && !ht_warned) { 5440 warn_report("This family of AMD CPU doesn't support " 5441 "hyperthreading(%d)", 5442 cs->nr_threads); 5443 error_printf("Please configure -smp options properly" 5444 " or try enabling topoext feature.\n"); 5445 ht_warned = true; 5446 } 5447 5448 x86_cpu_apic_realize(cpu, &local_err); 5449 if (local_err != NULL) { 5450 goto out; 5451 } 5452 cpu_reset(cs); 5453 5454 xcc->parent_realize(dev, &local_err); 5455 5456 out: 5457 if (local_err != NULL) { 5458 error_propagate(errp, local_err); 5459 return; 5460 } 5461 } 5462 5463 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) 5464 { 5465 X86CPU *cpu = X86_CPU(dev); 5466 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 5467 Error *local_err = NULL; 5468 5469 #ifndef CONFIG_USER_ONLY 5470 cpu_remove_sync(CPU(dev)); 5471 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 5472 #endif 5473 5474 if (cpu->apic_state) { 5475 object_unparent(OBJECT(cpu->apic_state)); 5476 cpu->apic_state = NULL; 5477 } 5478 5479 xcc->parent_unrealize(dev, &local_err); 5480 if (local_err != NULL) { 5481 error_propagate(errp, local_err); 5482 return; 5483 } 5484 } 5485 5486 typedef struct BitProperty { 5487 FeatureWord w; 5488 uint32_t mask; 5489 } BitProperty; 5490 5491 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 5492 void *opaque, Error **errp) 5493 { 5494 X86CPU *cpu = X86_CPU(obj); 5495 BitProperty *fp = opaque; 5496 uint32_t f = cpu->env.features[fp->w]; 5497 bool value = (f & fp->mask) == fp->mask; 5498 visit_type_bool(v, name, &value, errp); 5499 } 5500 5501 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 5502 void *opaque, Error **errp) 5503 { 5504 DeviceState *dev = DEVICE(obj); 5505 X86CPU *cpu = X86_CPU(obj); 5506 BitProperty *fp = opaque; 5507 Error *local_err = NULL; 5508 bool value; 5509 5510 if (dev->realized) { 5511 qdev_prop_set_after_realize(dev, name, errp); 5512 return; 5513 } 5514 5515 visit_type_bool(v, name, &value, &local_err); 5516 if (local_err) { 5517 error_propagate(errp, local_err); 5518 return; 5519 } 5520 5521 if (value) { 5522 cpu->env.features[fp->w] |= fp->mask; 5523 } else { 5524 cpu->env.features[fp->w] &= ~fp->mask; 5525 } 5526 cpu->env.user_features[fp->w] |= fp->mask; 5527 } 5528 5529 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 5530 void *opaque) 5531 { 5532 BitProperty *prop = opaque; 5533 g_free(prop); 5534 } 5535 5536 /* Register a boolean property to get/set a single bit in a uint32_t field. 5537 * 5538 * The same property name can be registered multiple times to make it affect 5539 * multiple bits in the same FeatureWord. In that case, the getter will return 5540 * true only if all bits are set. 5541 */ 5542 static void x86_cpu_register_bit_prop(X86CPU *cpu, 5543 const char *prop_name, 5544 FeatureWord w, 5545 int bitnr) 5546 { 5547 BitProperty *fp; 5548 ObjectProperty *op; 5549 uint32_t mask = (1UL << bitnr); 5550 5551 op = object_property_find(OBJECT(cpu), prop_name, NULL); 5552 if (op) { 5553 fp = op->opaque; 5554 assert(fp->w == w); 5555 fp->mask |= mask; 5556 } else { 5557 fp = g_new0(BitProperty, 1); 5558 fp->w = w; 5559 fp->mask = mask; 5560 object_property_add(OBJECT(cpu), prop_name, "bool", 5561 x86_cpu_get_bit_prop, 5562 x86_cpu_set_bit_prop, 5563 x86_cpu_release_bit_prop, fp, &error_abort); 5564 } 5565 } 5566 5567 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 5568 FeatureWord w, 5569 int bitnr) 5570 { 5571 FeatureWordInfo *fi = &feature_word_info[w]; 5572 const char *name = fi->feat_names[bitnr]; 5573 5574 if (!name) { 5575 return; 5576 } 5577 5578 /* Property names should use "-" instead of "_". 5579 * Old names containing underscores are registered as aliases 5580 * using object_property_add_alias() 5581 */ 5582 assert(!strchr(name, '_')); 5583 /* aliases don't use "|" delimiters anymore, they are registered 5584 * manually using object_property_add_alias() */ 5585 assert(!strchr(name, '|')); 5586 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 5587 } 5588 5589 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 5590 { 5591 X86CPU *cpu = X86_CPU(cs); 5592 CPUX86State *env = &cpu->env; 5593 GuestPanicInformation *panic_info = NULL; 5594 5595 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 5596 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 5597 5598 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 5599 5600 assert(HV_CRASH_PARAMS >= 5); 5601 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 5602 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 5603 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 5604 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 5605 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 5606 } 5607 5608 return panic_info; 5609 } 5610 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 5611 const char *name, void *opaque, 5612 Error **errp) 5613 { 5614 CPUState *cs = CPU(obj); 5615 GuestPanicInformation *panic_info; 5616 5617 if (!cs->crash_occurred) { 5618 error_setg(errp, "No crash occured"); 5619 return; 5620 } 5621 5622 panic_info = x86_cpu_get_crash_info(cs); 5623 if (panic_info == NULL) { 5624 error_setg(errp, "No crash information"); 5625 return; 5626 } 5627 5628 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 5629 errp); 5630 qapi_free_GuestPanicInformation(panic_info); 5631 } 5632 5633 static void x86_cpu_initfn(Object *obj) 5634 { 5635 X86CPU *cpu = X86_CPU(obj); 5636 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 5637 CPUX86State *env = &cpu->env; 5638 FeatureWord w; 5639 5640 cpu_set_cpustate_pointers(cpu); 5641 5642 object_property_add(obj, "family", "int", 5643 x86_cpuid_version_get_family, 5644 x86_cpuid_version_set_family, NULL, NULL, NULL); 5645 object_property_add(obj, "model", "int", 5646 x86_cpuid_version_get_model, 5647 x86_cpuid_version_set_model, NULL, NULL, NULL); 5648 object_property_add(obj, "stepping", "int", 5649 x86_cpuid_version_get_stepping, 5650 x86_cpuid_version_set_stepping, NULL, NULL, NULL); 5651 object_property_add_str(obj, "vendor", 5652 x86_cpuid_get_vendor, 5653 x86_cpuid_set_vendor, NULL); 5654 object_property_add_str(obj, "model-id", 5655 x86_cpuid_get_model_id, 5656 x86_cpuid_set_model_id, NULL); 5657 object_property_add(obj, "tsc-frequency", "int", 5658 x86_cpuid_get_tsc_freq, 5659 x86_cpuid_set_tsc_freq, NULL, NULL, NULL); 5660 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 5661 x86_cpu_get_feature_words, 5662 NULL, NULL, (void *)env->features, NULL); 5663 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 5664 x86_cpu_get_feature_words, 5665 NULL, NULL, (void *)cpu->filtered_features, NULL); 5666 /* 5667 * The "unavailable-features" property has the same semantics as 5668 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 5669 * QMP command: they list the features that would have prevented the 5670 * CPU from running if the "enforce" flag was set. 5671 */ 5672 object_property_add(obj, "unavailable-features", "strList", 5673 x86_cpu_get_unavailable_features, 5674 NULL, NULL, NULL, &error_abort); 5675 5676 object_property_add(obj, "crash-information", "GuestPanicInformation", 5677 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); 5678 5679 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY; 5680 5681 for (w = 0; w < FEATURE_WORDS; w++) { 5682 int bitnr; 5683 5684 for (bitnr = 0; bitnr < 32; bitnr++) { 5685 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 5686 } 5687 } 5688 5689 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort); 5690 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort); 5691 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort); 5692 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort); 5693 object_property_add_alias(obj, "xd", obj, "nx", &error_abort); 5694 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort); 5695 object_property_add_alias(obj, "i64", obj, "lm", &error_abort); 5696 5697 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort); 5698 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort); 5699 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort); 5700 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort); 5701 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort); 5702 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort); 5703 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort); 5704 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort); 5705 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort); 5706 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort); 5707 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort); 5708 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort); 5709 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort); 5710 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort); 5711 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort); 5712 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort); 5713 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort); 5714 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort); 5715 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort); 5716 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort); 5717 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort); 5718 5719 if (xcc->cpu_def) { 5720 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort); 5721 } 5722 } 5723 5724 static int64_t x86_cpu_get_arch_id(CPUState *cs) 5725 { 5726 X86CPU *cpu = X86_CPU(cs); 5727 5728 return cpu->apic_id; 5729 } 5730 5731 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 5732 { 5733 X86CPU *cpu = X86_CPU(cs); 5734 5735 return cpu->env.cr[0] & CR0_PG_MASK; 5736 } 5737 5738 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 5739 { 5740 X86CPU *cpu = X86_CPU(cs); 5741 5742 cpu->env.eip = value; 5743 } 5744 5745 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 5746 { 5747 X86CPU *cpu = X86_CPU(cs); 5748 5749 cpu->env.eip = tb->pc - tb->cs_base; 5750 } 5751 5752 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 5753 { 5754 X86CPU *cpu = X86_CPU(cs); 5755 CPUX86State *env = &cpu->env; 5756 5757 #if !defined(CONFIG_USER_ONLY) 5758 if (interrupt_request & CPU_INTERRUPT_POLL) { 5759 return CPU_INTERRUPT_POLL; 5760 } 5761 #endif 5762 if (interrupt_request & CPU_INTERRUPT_SIPI) { 5763 return CPU_INTERRUPT_SIPI; 5764 } 5765 5766 if (env->hflags2 & HF2_GIF_MASK) { 5767 if ((interrupt_request & CPU_INTERRUPT_SMI) && 5768 !(env->hflags & HF_SMM_MASK)) { 5769 return CPU_INTERRUPT_SMI; 5770 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 5771 !(env->hflags2 & HF2_NMI_MASK)) { 5772 return CPU_INTERRUPT_NMI; 5773 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 5774 return CPU_INTERRUPT_MCE; 5775 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 5776 (((env->hflags2 & HF2_VINTR_MASK) && 5777 (env->hflags2 & HF2_HIF_MASK)) || 5778 (!(env->hflags2 & HF2_VINTR_MASK) && 5779 (env->eflags & IF_MASK && 5780 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 5781 return CPU_INTERRUPT_HARD; 5782 #if !defined(CONFIG_USER_ONLY) 5783 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 5784 (env->eflags & IF_MASK) && 5785 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 5786 return CPU_INTERRUPT_VIRQ; 5787 #endif 5788 } 5789 } 5790 5791 return 0; 5792 } 5793 5794 static bool x86_cpu_has_work(CPUState *cs) 5795 { 5796 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 5797 } 5798 5799 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 5800 { 5801 X86CPU *cpu = X86_CPU(cs); 5802 CPUX86State *env = &cpu->env; 5803 5804 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 5805 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 5806 : bfd_mach_i386_i8086); 5807 info->print_insn = print_insn_i386; 5808 5809 info->cap_arch = CS_ARCH_X86; 5810 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 5811 : env->hflags & HF_CS32_MASK ? CS_MODE_32 5812 : CS_MODE_16); 5813 info->cap_insn_unit = 1; 5814 info->cap_insn_split = 8; 5815 } 5816 5817 void x86_update_hflags(CPUX86State *env) 5818 { 5819 uint32_t hflags; 5820 #define HFLAG_COPY_MASK \ 5821 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 5822 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 5823 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 5824 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 5825 5826 hflags = env->hflags & HFLAG_COPY_MASK; 5827 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 5828 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 5829 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 5830 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 5831 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 5832 5833 if (env->cr[4] & CR4_OSFXSR_MASK) { 5834 hflags |= HF_OSFXSR_MASK; 5835 } 5836 5837 if (env->efer & MSR_EFER_LMA) { 5838 hflags |= HF_LMA_MASK; 5839 } 5840 5841 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 5842 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 5843 } else { 5844 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 5845 (DESC_B_SHIFT - HF_CS32_SHIFT); 5846 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 5847 (DESC_B_SHIFT - HF_SS32_SHIFT); 5848 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 5849 !(hflags & HF_CS32_MASK)) { 5850 hflags |= HF_ADDSEG_MASK; 5851 } else { 5852 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 5853 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 5854 } 5855 } 5856 env->hflags = hflags; 5857 } 5858 5859 static Property x86_cpu_properties[] = { 5860 #ifdef CONFIG_USER_ONLY 5861 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 5862 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 5863 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 5864 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 5865 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 5866 #else 5867 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 5868 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 5869 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 5870 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 5871 #endif 5872 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 5873 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 5874 5875 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks }, 5876 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 5877 HYPERV_FEAT_RELAXED, 0), 5878 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 5879 HYPERV_FEAT_VAPIC, 0), 5880 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 5881 HYPERV_FEAT_TIME, 0), 5882 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 5883 HYPERV_FEAT_CRASH, 0), 5884 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 5885 HYPERV_FEAT_RESET, 0), 5886 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 5887 HYPERV_FEAT_VPINDEX, 0), 5888 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 5889 HYPERV_FEAT_RUNTIME, 0), 5890 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 5891 HYPERV_FEAT_SYNIC, 0), 5892 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 5893 HYPERV_FEAT_STIMER, 0), 5894 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 5895 HYPERV_FEAT_FREQUENCIES, 0), 5896 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 5897 HYPERV_FEAT_REENLIGHTENMENT, 0), 5898 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 5899 HYPERV_FEAT_TLBFLUSH, 0), 5900 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 5901 HYPERV_FEAT_EVMCS, 0), 5902 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 5903 HYPERV_FEAT_IPI, 0), 5904 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 5905 HYPERV_FEAT_STIMER_DIRECT, 0), 5906 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 5907 5908 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 5909 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 5910 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 5911 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 5912 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 5913 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 5914 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 5915 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 5916 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 5917 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 5918 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 5919 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 5920 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 5921 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 5922 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 5923 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 5924 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 5925 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 5926 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 5927 false), 5928 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 5929 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 5930 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 5931 true), 5932 /* 5933 * lecacy_cache defaults to true unless the CPU model provides its 5934 * own cache information (see x86_cpu_load_def()). 5935 */ 5936 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 5937 5938 /* 5939 * From "Requirements for Implementing the Microsoft 5940 * Hypervisor Interface": 5941 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 5942 * 5943 * "Starting with Windows Server 2012 and Windows 8, if 5944 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 5945 * the hypervisor imposes no specific limit to the number of VPs. 5946 * In this case, Windows Server 2012 guest VMs may use more than 5947 * 64 VPs, up to the maximum supported number of processors applicable 5948 * to the specific Windows version being used." 5949 */ 5950 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 5951 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 5952 false), 5953 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 5954 true), 5955 DEFINE_PROP_END_OF_LIST() 5956 }; 5957 5958 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 5959 { 5960 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5961 CPUClass *cc = CPU_CLASS(oc); 5962 DeviceClass *dc = DEVICE_CLASS(oc); 5963 5964 device_class_set_parent_realize(dc, x86_cpu_realizefn, 5965 &xcc->parent_realize); 5966 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 5967 &xcc->parent_unrealize); 5968 dc->props = x86_cpu_properties; 5969 5970 xcc->parent_reset = cc->reset; 5971 cc->reset = x86_cpu_reset; 5972 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 5973 5974 cc->class_by_name = x86_cpu_class_by_name; 5975 cc->parse_features = x86_cpu_parse_featurestr; 5976 cc->has_work = x86_cpu_has_work; 5977 #ifdef CONFIG_TCG 5978 cc->do_interrupt = x86_cpu_do_interrupt; 5979 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 5980 #endif 5981 cc->dump_state = x86_cpu_dump_state; 5982 cc->get_crash_info = x86_cpu_get_crash_info; 5983 cc->set_pc = x86_cpu_set_pc; 5984 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 5985 cc->gdb_read_register = x86_cpu_gdb_read_register; 5986 cc->gdb_write_register = x86_cpu_gdb_write_register; 5987 cc->get_arch_id = x86_cpu_get_arch_id; 5988 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 5989 #ifndef CONFIG_USER_ONLY 5990 cc->asidx_from_attrs = x86_asidx_from_attrs; 5991 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 5992 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; 5993 cc->write_elf64_note = x86_cpu_write_elf64_note; 5994 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 5995 cc->write_elf32_note = x86_cpu_write_elf32_note; 5996 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 5997 cc->vmsd = &vmstate_x86_cpu; 5998 #endif 5999 cc->gdb_arch_name = x86_gdb_arch_name; 6000 #ifdef TARGET_X86_64 6001 cc->gdb_core_xml_file = "i386-64bit.xml"; 6002 cc->gdb_num_core_regs = 66; 6003 #else 6004 cc->gdb_core_xml_file = "i386-32bit.xml"; 6005 cc->gdb_num_core_regs = 50; 6006 #endif 6007 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 6008 cc->debug_excp_handler = breakpoint_handler; 6009 #endif 6010 cc->cpu_exec_enter = x86_cpu_exec_enter; 6011 cc->cpu_exec_exit = x86_cpu_exec_exit; 6012 #ifdef CONFIG_TCG 6013 cc->tcg_initialize = tcg_x86_init; 6014 cc->tlb_fill = x86_cpu_tlb_fill; 6015 #endif 6016 cc->disas_set_info = x86_disas_set_info; 6017 6018 dc->user_creatable = true; 6019 } 6020 6021 static const TypeInfo x86_cpu_type_info = { 6022 .name = TYPE_X86_CPU, 6023 .parent = TYPE_CPU, 6024 .instance_size = sizeof(X86CPU), 6025 .instance_init = x86_cpu_initfn, 6026 .abstract = true, 6027 .class_size = sizeof(X86CPUClass), 6028 .class_init = x86_cpu_common_class_init, 6029 }; 6030 6031 6032 /* "base" CPU model, used by query-cpu-model-expansion */ 6033 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 6034 { 6035 X86CPUClass *xcc = X86_CPU_CLASS(oc); 6036 6037 xcc->static_model = true; 6038 xcc->migration_safe = true; 6039 xcc->model_description = "base CPU model type with no features enabled"; 6040 xcc->ordering = 8; 6041 } 6042 6043 static const TypeInfo x86_base_cpu_type_info = { 6044 .name = X86_CPU_TYPE_NAME("base"), 6045 .parent = TYPE_X86_CPU, 6046 .class_init = x86_cpu_base_class_init, 6047 }; 6048 6049 static void x86_cpu_register_types(void) 6050 { 6051 int i; 6052 6053 type_register_static(&x86_cpu_type_info); 6054 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 6055 x86_register_cpudef_type(&builtin_x86_defs[i]); 6056 } 6057 type_register_static(&max_x86_cpu_type_info); 6058 type_register_static(&x86_base_cpu_type_info); 6059 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 6060 type_register_static(&host_x86_cpu_type_info); 6061 #endif 6062 } 6063 6064 type_init(x86_cpu_register_types) 6065