1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "exec/exec-all.h" 28 #include "sysemu/kvm.h" 29 #include "sysemu/hvf.h" 30 #include "sysemu/cpus.h" 31 #include "kvm_i386.h" 32 #include "sev_i386.h" 33 34 #include "qemu/error-report.h" 35 #include "qemu/option.h" 36 #include "qemu/config-file.h" 37 #include "qapi/error.h" 38 #include "qapi/qapi-visit-misc.h" 39 #include "qapi/qapi-visit-run-state.h" 40 #include "qapi/qmp/qdict.h" 41 #include "qapi/qmp/qerror.h" 42 #include "qapi/visitor.h" 43 #include "qom/qom-qobject.h" 44 #include "sysemu/arch_init.h" 45 #include "qapi/qapi-commands-target.h" 46 47 #include "standard-headers/asm-x86/kvm_para.h" 48 49 #include "sysemu/sysemu.h" 50 #include "hw/qdev-properties.h" 51 #include "hw/i386/topology.h" 52 #ifndef CONFIG_USER_ONLY 53 #include "exec/address-spaces.h" 54 #include "hw/hw.h" 55 #include "hw/xen/xen.h" 56 #include "hw/i386/apic_internal.h" 57 #endif 58 59 #include "disas/capstone.h" 60 61 /* Helpers for building CPUID[2] descriptors: */ 62 63 struct CPUID2CacheDescriptorInfo { 64 enum CacheType type; 65 int level; 66 int size; 67 int line_size; 68 int associativity; 69 }; 70 71 /* 72 * Known CPUID 2 cache descriptors. 73 * From Intel SDM Volume 2A, CPUID instruction 74 */ 75 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 76 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 77 .associativity = 4, .line_size = 32, }, 78 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 79 .associativity = 4, .line_size = 32, }, 80 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 81 .associativity = 4, .line_size = 64, }, 82 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 83 .associativity = 2, .line_size = 32, }, 84 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 85 .associativity = 4, .line_size = 32, }, 86 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 87 .associativity = 4, .line_size = 64, }, 88 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 89 .associativity = 6, .line_size = 64, }, 90 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 91 .associativity = 2, .line_size = 64, }, 92 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 93 .associativity = 8, .line_size = 64, }, 94 /* lines per sector is not supported cpuid2_cache_descriptor(), 95 * so descriptors 0x22, 0x23 are not included 96 */ 97 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 98 .associativity = 16, .line_size = 64, }, 99 /* lines per sector is not supported cpuid2_cache_descriptor(), 100 * so descriptors 0x25, 0x20 are not included 101 */ 102 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 103 .associativity = 8, .line_size = 64, }, 104 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 105 .associativity = 8, .line_size = 64, }, 106 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 107 .associativity = 4, .line_size = 32, }, 108 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 109 .associativity = 4, .line_size = 32, }, 110 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 111 .associativity = 4, .line_size = 32, }, 112 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 113 .associativity = 4, .line_size = 32, }, 114 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 115 .associativity = 4, .line_size = 32, }, 116 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 117 .associativity = 4, .line_size = 64, }, 118 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 119 .associativity = 8, .line_size = 64, }, 120 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 121 .associativity = 12, .line_size = 64, }, 122 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 123 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 124 .associativity = 12, .line_size = 64, }, 125 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 126 .associativity = 16, .line_size = 64, }, 127 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 128 .associativity = 12, .line_size = 64, }, 129 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 130 .associativity = 16, .line_size = 64, }, 131 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 132 .associativity = 24, .line_size = 64, }, 133 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 134 .associativity = 8, .line_size = 64, }, 135 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 136 .associativity = 4, .line_size = 64, }, 137 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 138 .associativity = 4, .line_size = 64, }, 139 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 140 .associativity = 4, .line_size = 64, }, 141 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 142 .associativity = 4, .line_size = 64, }, 143 /* lines per sector is not supported cpuid2_cache_descriptor(), 144 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 145 */ 146 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 147 .associativity = 8, .line_size = 64, }, 148 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 149 .associativity = 2, .line_size = 64, }, 150 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 151 .associativity = 8, .line_size = 64, }, 152 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 153 .associativity = 8, .line_size = 32, }, 154 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 155 .associativity = 8, .line_size = 32, }, 156 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 157 .associativity = 8, .line_size = 32, }, 158 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 159 .associativity = 8, .line_size = 32, }, 160 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 161 .associativity = 4, .line_size = 64, }, 162 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 163 .associativity = 8, .line_size = 64, }, 164 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 165 .associativity = 4, .line_size = 64, }, 166 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 167 .associativity = 4, .line_size = 64, }, 168 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 169 .associativity = 4, .line_size = 64, }, 170 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 171 .associativity = 8, .line_size = 64, }, 172 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 173 .associativity = 8, .line_size = 64, }, 174 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 175 .associativity = 8, .line_size = 64, }, 176 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 177 .associativity = 12, .line_size = 64, }, 178 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 179 .associativity = 12, .line_size = 64, }, 180 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 181 .associativity = 12, .line_size = 64, }, 182 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 183 .associativity = 16, .line_size = 64, }, 184 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 185 .associativity = 16, .line_size = 64, }, 186 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 187 .associativity = 16, .line_size = 64, }, 188 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 189 .associativity = 24, .line_size = 64, }, 190 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 191 .associativity = 24, .line_size = 64, }, 192 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 193 .associativity = 24, .line_size = 64, }, 194 }; 195 196 /* 197 * "CPUID leaf 2 does not report cache descriptor information, 198 * use CPUID leaf 4 to query cache parameters" 199 */ 200 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 201 202 /* 203 * Return a CPUID 2 cache descriptor for a given cache. 204 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 205 */ 206 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 207 { 208 int i; 209 210 assert(cache->size > 0); 211 assert(cache->level > 0); 212 assert(cache->line_size > 0); 213 assert(cache->associativity > 0); 214 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 215 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 216 if (d->level == cache->level && d->type == cache->type && 217 d->size == cache->size && d->line_size == cache->line_size && 218 d->associativity == cache->associativity) { 219 return i; 220 } 221 } 222 223 return CACHE_DESCRIPTOR_UNAVAILABLE; 224 } 225 226 /* CPUID Leaf 4 constants: */ 227 228 /* EAX: */ 229 #define CACHE_TYPE_D 1 230 #define CACHE_TYPE_I 2 231 #define CACHE_TYPE_UNIFIED 3 232 233 #define CACHE_LEVEL(l) (l << 5) 234 235 #define CACHE_SELF_INIT_LEVEL (1 << 8) 236 237 /* EDX: */ 238 #define CACHE_NO_INVD_SHARING (1 << 0) 239 #define CACHE_INCLUSIVE (1 << 1) 240 #define CACHE_COMPLEX_IDX (1 << 2) 241 242 /* Encode CacheType for CPUID[4].EAX */ 243 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 244 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 245 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 246 0 /* Invalid value */) 247 248 249 /* Encode cache info for CPUID[4] */ 250 static void encode_cache_cpuid4(CPUCacheInfo *cache, 251 int num_apic_ids, int num_cores, 252 uint32_t *eax, uint32_t *ebx, 253 uint32_t *ecx, uint32_t *edx) 254 { 255 assert(cache->size == cache->line_size * cache->associativity * 256 cache->partitions * cache->sets); 257 258 assert(num_apic_ids > 0); 259 *eax = CACHE_TYPE(cache->type) | 260 CACHE_LEVEL(cache->level) | 261 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 262 ((num_cores - 1) << 26) | 263 ((num_apic_ids - 1) << 14); 264 265 assert(cache->line_size > 0); 266 assert(cache->partitions > 0); 267 assert(cache->associativity > 0); 268 /* We don't implement fully-associative caches */ 269 assert(cache->associativity < cache->sets); 270 *ebx = (cache->line_size - 1) | 271 ((cache->partitions - 1) << 12) | 272 ((cache->associativity - 1) << 22); 273 274 assert(cache->sets > 0); 275 *ecx = cache->sets - 1; 276 277 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 278 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 279 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 280 } 281 282 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 283 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 284 { 285 assert(cache->size % 1024 == 0); 286 assert(cache->lines_per_tag > 0); 287 assert(cache->associativity > 0); 288 assert(cache->line_size > 0); 289 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 290 (cache->lines_per_tag << 8) | (cache->line_size); 291 } 292 293 #define ASSOC_FULL 0xFF 294 295 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 296 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 297 a == 2 ? 0x2 : \ 298 a == 4 ? 0x4 : \ 299 a == 8 ? 0x6 : \ 300 a == 16 ? 0x8 : \ 301 a == 32 ? 0xA : \ 302 a == 48 ? 0xB : \ 303 a == 64 ? 0xC : \ 304 a == 96 ? 0xD : \ 305 a == 128 ? 0xE : \ 306 a == ASSOC_FULL ? 0xF : \ 307 0 /* invalid value */) 308 309 /* 310 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 311 * @l3 can be NULL. 312 */ 313 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 314 CPUCacheInfo *l3, 315 uint32_t *ecx, uint32_t *edx) 316 { 317 assert(l2->size % 1024 == 0); 318 assert(l2->associativity > 0); 319 assert(l2->lines_per_tag > 0); 320 assert(l2->line_size > 0); 321 *ecx = ((l2->size / 1024) << 16) | 322 (AMD_ENC_ASSOC(l2->associativity) << 12) | 323 (l2->lines_per_tag << 8) | (l2->line_size); 324 325 if (l3) { 326 assert(l3->size % (512 * 1024) == 0); 327 assert(l3->associativity > 0); 328 assert(l3->lines_per_tag > 0); 329 assert(l3->line_size > 0); 330 *edx = ((l3->size / (512 * 1024)) << 18) | 331 (AMD_ENC_ASSOC(l3->associativity) << 12) | 332 (l3->lines_per_tag << 8) | (l3->line_size); 333 } else { 334 *edx = 0; 335 } 336 } 337 338 /* 339 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E 340 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3. 341 * Define the constants to build the cpu topology. Right now, TOPOEXT 342 * feature is enabled only on EPYC. So, these constants are based on 343 * EPYC supported configurations. We may need to handle the cases if 344 * these values change in future. 345 */ 346 /* Maximum core complexes in a node */ 347 #define MAX_CCX 2 348 /* Maximum cores in a core complex */ 349 #define MAX_CORES_IN_CCX 4 350 /* Maximum cores in a node */ 351 #define MAX_CORES_IN_NODE 8 352 /* Maximum nodes in a socket */ 353 #define MAX_NODES_PER_SOCKET 4 354 355 /* 356 * Figure out the number of nodes required to build this config. 357 * Max cores in a node is 8 358 */ 359 static int nodes_in_socket(int nr_cores) 360 { 361 int nodes; 362 363 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE); 364 365 /* Hardware does not support config with 3 nodes, return 4 in that case */ 366 return (nodes == 3) ? 4 : nodes; 367 } 368 369 /* 370 * Decide the number of cores in a core complex with the given nr_cores using 371 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and 372 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible 373 * L3 cache is shared across all cores in a core complex. So, this will also 374 * tell us how many cores are sharing the L3 cache. 375 */ 376 static int cores_in_core_complex(int nr_cores) 377 { 378 int nodes; 379 380 /* Check if we can fit all the cores in one core complex */ 381 if (nr_cores <= MAX_CORES_IN_CCX) { 382 return nr_cores; 383 } 384 /* Get the number of nodes required to build this config */ 385 nodes = nodes_in_socket(nr_cores); 386 387 /* 388 * Divide the cores accros all the core complexes 389 * Return rounded up value 390 */ 391 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX); 392 } 393 394 /* Encode cache info for CPUID[8000001D] */ 395 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs, 396 uint32_t *eax, uint32_t *ebx, 397 uint32_t *ecx, uint32_t *edx) 398 { 399 uint32_t l3_cores; 400 assert(cache->size == cache->line_size * cache->associativity * 401 cache->partitions * cache->sets); 402 403 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 404 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 405 406 /* L3 is shared among multiple cores */ 407 if (cache->level == 3) { 408 l3_cores = cores_in_core_complex(cs->nr_cores); 409 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14; 410 } else { 411 *eax |= ((cs->nr_threads - 1) << 14); 412 } 413 414 assert(cache->line_size > 0); 415 assert(cache->partitions > 0); 416 assert(cache->associativity > 0); 417 /* We don't implement fully-associative caches */ 418 assert(cache->associativity < cache->sets); 419 *ebx = (cache->line_size - 1) | 420 ((cache->partitions - 1) << 12) | 421 ((cache->associativity - 1) << 22); 422 423 assert(cache->sets > 0); 424 *ecx = cache->sets - 1; 425 426 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 427 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 428 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 429 } 430 431 /* Data structure to hold the configuration info for a given core index */ 432 struct core_topology { 433 /* core complex id of the current core index */ 434 int ccx_id; 435 /* 436 * Adjusted core index for this core in the topology 437 * This can be 0,1,2,3 with max 4 cores in a core complex 438 */ 439 int core_id; 440 /* Node id for this core index */ 441 int node_id; 442 /* Number of nodes in this config */ 443 int num_nodes; 444 }; 445 446 /* 447 * Build the configuration closely match the EPYC hardware. Using the EPYC 448 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE) 449 * right now. This could change in future. 450 * nr_cores : Total number of cores in the config 451 * core_id : Core index of the current CPU 452 * topo : Data structure to hold all the config info for this core index 453 */ 454 static void build_core_topology(int nr_cores, int core_id, 455 struct core_topology *topo) 456 { 457 int nodes, cores_in_ccx; 458 459 /* First get the number of nodes required */ 460 nodes = nodes_in_socket(nr_cores); 461 462 cores_in_ccx = cores_in_core_complex(nr_cores); 463 464 topo->node_id = core_id / (cores_in_ccx * MAX_CCX); 465 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx; 466 topo->core_id = core_id % cores_in_ccx; 467 topo->num_nodes = nodes; 468 } 469 470 /* Encode cache info for CPUID[8000001E] */ 471 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu, 472 uint32_t *eax, uint32_t *ebx, 473 uint32_t *ecx, uint32_t *edx) 474 { 475 struct core_topology topo = {0}; 476 unsigned long nodes; 477 int shift; 478 479 build_core_topology(cs->nr_cores, cpu->core_id, &topo); 480 *eax = cpu->apic_id; 481 /* 482 * CPUID_Fn8000001E_EBX 483 * 31:16 Reserved 484 * 15:8 Threads per core (The number of threads per core is 485 * Threads per core + 1) 486 * 7:0 Core id (see bit decoding below) 487 * SMT: 488 * 4:3 node id 489 * 2 Core complex id 490 * 1:0 Core id 491 * Non SMT: 492 * 5:4 node id 493 * 3 Core complex id 494 * 1:0 Core id 495 */ 496 if (cs->nr_threads - 1) { 497 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) | 498 (topo.ccx_id << 2) | topo.core_id; 499 } else { 500 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id; 501 } 502 /* 503 * CPUID_Fn8000001E_ECX 504 * 31:11 Reserved 505 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) 506 * 7:0 Node id (see bit decoding below) 507 * 2 Socket id 508 * 1:0 Node id 509 */ 510 if (topo.num_nodes <= 4) { 511 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) | 512 topo.node_id; 513 } else { 514 /* 515 * Node id fix up. Actual hardware supports up to 4 nodes. But with 516 * more than 32 cores, we may end up with more than 4 nodes. 517 * Node id is a combination of socket id and node id. Only requirement 518 * here is that this number should be unique accross the system. 519 * Shift the socket id to accommodate more nodes. We dont expect both 520 * socket id and node id to be big number at the same time. This is not 521 * an ideal config but we need to to support it. Max nodes we can have 522 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need 523 * 5 bits for nodes. Find the left most set bit to represent the total 524 * number of nodes. find_last_bit returns last set bit(0 based). Left 525 * shift(+1) the socket id to represent all the nodes. 526 */ 527 nodes = topo.num_nodes - 1; 528 shift = find_last_bit(&nodes, 8); 529 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) | 530 topo.node_id; 531 } 532 *edx = 0; 533 } 534 535 /* 536 * Definitions of the hardcoded cache entries we expose: 537 * These are legacy cache values. If there is a need to change any 538 * of these values please use builtin_x86_defs 539 */ 540 541 /* L1 data cache: */ 542 static CPUCacheInfo legacy_l1d_cache = { 543 .type = DATA_CACHE, 544 .level = 1, 545 .size = 32 * KiB, 546 .self_init = 1, 547 .line_size = 64, 548 .associativity = 8, 549 .sets = 64, 550 .partitions = 1, 551 .no_invd_sharing = true, 552 }; 553 554 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 555 static CPUCacheInfo legacy_l1d_cache_amd = { 556 .type = DATA_CACHE, 557 .level = 1, 558 .size = 64 * KiB, 559 .self_init = 1, 560 .line_size = 64, 561 .associativity = 2, 562 .sets = 512, 563 .partitions = 1, 564 .lines_per_tag = 1, 565 .no_invd_sharing = true, 566 }; 567 568 /* L1 instruction cache: */ 569 static CPUCacheInfo legacy_l1i_cache = { 570 .type = INSTRUCTION_CACHE, 571 .level = 1, 572 .size = 32 * KiB, 573 .self_init = 1, 574 .line_size = 64, 575 .associativity = 8, 576 .sets = 64, 577 .partitions = 1, 578 .no_invd_sharing = true, 579 }; 580 581 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 582 static CPUCacheInfo legacy_l1i_cache_amd = { 583 .type = INSTRUCTION_CACHE, 584 .level = 1, 585 .size = 64 * KiB, 586 .self_init = 1, 587 .line_size = 64, 588 .associativity = 2, 589 .sets = 512, 590 .partitions = 1, 591 .lines_per_tag = 1, 592 .no_invd_sharing = true, 593 }; 594 595 /* Level 2 unified cache: */ 596 static CPUCacheInfo legacy_l2_cache = { 597 .type = UNIFIED_CACHE, 598 .level = 2, 599 .size = 4 * MiB, 600 .self_init = 1, 601 .line_size = 64, 602 .associativity = 16, 603 .sets = 4096, 604 .partitions = 1, 605 .no_invd_sharing = true, 606 }; 607 608 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 609 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 610 .type = UNIFIED_CACHE, 611 .level = 2, 612 .size = 2 * MiB, 613 .line_size = 64, 614 .associativity = 8, 615 }; 616 617 618 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 619 static CPUCacheInfo legacy_l2_cache_amd = { 620 .type = UNIFIED_CACHE, 621 .level = 2, 622 .size = 512 * KiB, 623 .line_size = 64, 624 .lines_per_tag = 1, 625 .associativity = 16, 626 .sets = 512, 627 .partitions = 1, 628 }; 629 630 /* Level 3 unified cache: */ 631 static CPUCacheInfo legacy_l3_cache = { 632 .type = UNIFIED_CACHE, 633 .level = 3, 634 .size = 16 * MiB, 635 .line_size = 64, 636 .associativity = 16, 637 .sets = 16384, 638 .partitions = 1, 639 .lines_per_tag = 1, 640 .self_init = true, 641 .inclusive = true, 642 .complex_indexing = true, 643 }; 644 645 /* TLB definitions: */ 646 647 #define L1_DTLB_2M_ASSOC 1 648 #define L1_DTLB_2M_ENTRIES 255 649 #define L1_DTLB_4K_ASSOC 1 650 #define L1_DTLB_4K_ENTRIES 255 651 652 #define L1_ITLB_2M_ASSOC 1 653 #define L1_ITLB_2M_ENTRIES 255 654 #define L1_ITLB_4K_ASSOC 1 655 #define L1_ITLB_4K_ENTRIES 255 656 657 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 658 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 659 #define L2_DTLB_4K_ASSOC 4 660 #define L2_DTLB_4K_ENTRIES 512 661 662 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 663 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 664 #define L2_ITLB_4K_ASSOC 4 665 #define L2_ITLB_4K_ENTRIES 512 666 667 /* CPUID Leaf 0x14 constants: */ 668 #define INTEL_PT_MAX_SUBLEAF 0x1 669 /* 670 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 671 * MSR can be accessed; 672 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 673 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 674 * of Intel PT MSRs across warm reset; 675 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 676 */ 677 #define INTEL_PT_MINIMAL_EBX 0xf 678 /* 679 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 680 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 681 * accessed; 682 * bit[01]: ToPA tables can hold any number of output entries, up to the 683 * maximum allowed by the MaskOrTableOffset field of 684 * IA32_RTIT_OUTPUT_MASK_PTRS; 685 * bit[02]: Support Single-Range Output scheme; 686 */ 687 #define INTEL_PT_MINIMAL_ECX 0x7 688 /* generated packets which contain IP payloads have LIP values */ 689 #define INTEL_PT_IP_LIP (1 << 31) 690 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 691 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 692 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 693 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 694 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 695 696 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 697 uint32_t vendor2, uint32_t vendor3) 698 { 699 int i; 700 for (i = 0; i < 4; i++) { 701 dst[i] = vendor1 >> (8 * i); 702 dst[i + 4] = vendor2 >> (8 * i); 703 dst[i + 8] = vendor3 >> (8 * i); 704 } 705 dst[CPUID_VENDOR_SZ] = '\0'; 706 } 707 708 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 709 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 710 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 711 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 712 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 713 CPUID_PSE36 | CPUID_FXSR) 714 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 715 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 716 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 717 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 718 CPUID_PAE | CPUID_SEP | CPUID_APIC) 719 720 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 721 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 722 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 723 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 724 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 725 /* partly implemented: 726 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 727 /* missing: 728 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 729 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 730 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 731 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 732 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 733 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 734 CPUID_EXT_RDRAND) 735 /* missing: 736 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 737 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 738 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 739 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 740 CPUID_EXT_F16C */ 741 742 #ifdef TARGET_X86_64 743 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 744 #else 745 #define TCG_EXT2_X86_64_FEATURES 0 746 #endif 747 748 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 749 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 750 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 751 TCG_EXT2_X86_64_FEATURES) 752 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 753 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 754 #define TCG_EXT4_FEATURES 0 755 #define TCG_SVM_FEATURES CPUID_SVM_NPT 756 #define TCG_KVM_FEATURES 0 757 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 758 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 759 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 760 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 761 CPUID_7_0_EBX_ERMS) 762 /* missing: 763 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 764 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 765 CPUID_7_0_EBX_RDSEED */ 766 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 767 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 768 CPUID_7_0_ECX_LA57) 769 #define TCG_7_0_EDX_FEATURES 0 770 #define TCG_APM_FEATURES 0 771 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 772 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 773 /* missing: 774 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 775 776 typedef enum FeatureWordType { 777 CPUID_FEATURE_WORD, 778 MSR_FEATURE_WORD, 779 } FeatureWordType; 780 781 typedef struct FeatureWordInfo { 782 FeatureWordType type; 783 /* feature flags names are taken from "Intel Processor Identification and 784 * the CPUID Instruction" and AMD's "CPUID Specification". 785 * In cases of disagreement between feature naming conventions, 786 * aliases may be added. 787 */ 788 const char *feat_names[32]; 789 union { 790 /* If type==CPUID_FEATURE_WORD */ 791 struct { 792 uint32_t eax; /* Input EAX for CPUID */ 793 bool needs_ecx; /* CPUID instruction uses ECX as input */ 794 uint32_t ecx; /* Input ECX value for CPUID */ 795 int reg; /* output register (R_* constant) */ 796 } cpuid; 797 /* If type==MSR_FEATURE_WORD */ 798 struct { 799 uint32_t index; 800 struct { /*CPUID that enumerate this MSR*/ 801 FeatureWord cpuid_class; 802 uint32_t cpuid_flag; 803 } cpuid_dep; 804 } msr; 805 }; 806 uint32_t tcg_features; /* Feature flags supported by TCG */ 807 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */ 808 uint32_t migratable_flags; /* Feature flags known to be migratable */ 809 /* Features that shouldn't be auto-enabled by "-cpu host" */ 810 uint32_t no_autoenable_flags; 811 } FeatureWordInfo; 812 813 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 814 [FEAT_1_EDX] = { 815 .type = CPUID_FEATURE_WORD, 816 .feat_names = { 817 "fpu", "vme", "de", "pse", 818 "tsc", "msr", "pae", "mce", 819 "cx8", "apic", NULL, "sep", 820 "mtrr", "pge", "mca", "cmov", 821 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 822 NULL, "ds" /* Intel dts */, "acpi", "mmx", 823 "fxsr", "sse", "sse2", "ss", 824 "ht" /* Intel htt */, "tm", "ia64", "pbe", 825 }, 826 .cpuid = {.eax = 1, .reg = R_EDX, }, 827 .tcg_features = TCG_FEATURES, 828 }, 829 [FEAT_1_ECX] = { 830 .type = CPUID_FEATURE_WORD, 831 .feat_names = { 832 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 833 "ds-cpl", "vmx", "smx", "est", 834 "tm2", "ssse3", "cid", NULL, 835 "fma", "cx16", "xtpr", "pdcm", 836 NULL, "pcid", "dca", "sse4.1", 837 "sse4.2", "x2apic", "movbe", "popcnt", 838 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 839 "avx", "f16c", "rdrand", "hypervisor", 840 }, 841 .cpuid = { .eax = 1, .reg = R_ECX, }, 842 .tcg_features = TCG_EXT_FEATURES, 843 }, 844 /* Feature names that are already defined on feature_name[] but 845 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 846 * names on feat_names below. They are copied automatically 847 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 848 */ 849 [FEAT_8000_0001_EDX] = { 850 .type = CPUID_FEATURE_WORD, 851 .feat_names = { 852 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 853 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 854 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 855 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 856 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 857 "nx", NULL, "mmxext", NULL /* mmx */, 858 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 859 NULL, "lm", "3dnowext", "3dnow", 860 }, 861 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 862 .tcg_features = TCG_EXT2_FEATURES, 863 }, 864 [FEAT_8000_0001_ECX] = { 865 .type = CPUID_FEATURE_WORD, 866 .feat_names = { 867 "lahf-lm", "cmp-legacy", "svm", "extapic", 868 "cr8legacy", "abm", "sse4a", "misalignsse", 869 "3dnowprefetch", "osvw", "ibs", "xop", 870 "skinit", "wdt", NULL, "lwp", 871 "fma4", "tce", NULL, "nodeid-msr", 872 NULL, "tbm", "topoext", "perfctr-core", 873 "perfctr-nb", NULL, NULL, NULL, 874 NULL, NULL, NULL, NULL, 875 }, 876 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 877 .tcg_features = TCG_EXT3_FEATURES, 878 /* 879 * TOPOEXT is always allowed but can't be enabled blindly by 880 * "-cpu host", as it requires consistent cache topology info 881 * to be provided so it doesn't confuse guests. 882 */ 883 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 884 }, 885 [FEAT_C000_0001_EDX] = { 886 .type = CPUID_FEATURE_WORD, 887 .feat_names = { 888 NULL, NULL, "xstore", "xstore-en", 889 NULL, NULL, "xcrypt", "xcrypt-en", 890 "ace2", "ace2-en", "phe", "phe-en", 891 "pmm", "pmm-en", NULL, NULL, 892 NULL, NULL, NULL, NULL, 893 NULL, NULL, NULL, NULL, 894 NULL, NULL, NULL, NULL, 895 NULL, NULL, NULL, NULL, 896 }, 897 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 898 .tcg_features = TCG_EXT4_FEATURES, 899 }, 900 [FEAT_KVM] = { 901 .type = CPUID_FEATURE_WORD, 902 .feat_names = { 903 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 904 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 905 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 906 NULL, NULL, NULL, NULL, 907 NULL, NULL, NULL, NULL, 908 NULL, NULL, NULL, NULL, 909 "kvmclock-stable-bit", NULL, NULL, NULL, 910 NULL, NULL, NULL, NULL, 911 }, 912 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 913 .tcg_features = TCG_KVM_FEATURES, 914 }, 915 [FEAT_KVM_HINTS] = { 916 .type = CPUID_FEATURE_WORD, 917 .feat_names = { 918 "kvm-hint-dedicated", NULL, NULL, NULL, 919 NULL, NULL, NULL, NULL, 920 NULL, NULL, NULL, NULL, 921 NULL, NULL, NULL, NULL, 922 NULL, NULL, NULL, NULL, 923 NULL, NULL, NULL, NULL, 924 NULL, NULL, NULL, NULL, 925 NULL, NULL, NULL, NULL, 926 }, 927 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 928 .tcg_features = TCG_KVM_FEATURES, 929 /* 930 * KVM hints aren't auto-enabled by -cpu host, they need to be 931 * explicitly enabled in the command-line. 932 */ 933 .no_autoenable_flags = ~0U, 934 }, 935 /* 936 * .feat_names are commented out for Hyper-V enlightenments because we 937 * don't want to have two different ways for enabling them on QEMU command 938 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 939 * enabling several feature bits simultaneously, exposing these bits 940 * individually may just confuse guests. 941 */ 942 [FEAT_HYPERV_EAX] = { 943 .type = CPUID_FEATURE_WORD, 944 .feat_names = { 945 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 946 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 947 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 948 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 949 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 950 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 951 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 952 NULL, NULL, 953 NULL, NULL, NULL, NULL, 954 NULL, NULL, NULL, NULL, 955 NULL, NULL, NULL, NULL, 956 NULL, NULL, NULL, NULL, 957 }, 958 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 959 }, 960 [FEAT_HYPERV_EBX] = { 961 .type = CPUID_FEATURE_WORD, 962 .feat_names = { 963 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 964 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 965 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 966 NULL /* hv_create_port */, NULL /* hv_connect_port */, 967 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 968 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 969 NULL, NULL, 970 NULL, NULL, NULL, NULL, 971 NULL, NULL, NULL, NULL, 972 NULL, NULL, NULL, NULL, 973 NULL, NULL, NULL, NULL, 974 }, 975 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 976 }, 977 [FEAT_HYPERV_EDX] = { 978 .type = CPUID_FEATURE_WORD, 979 .feat_names = { 980 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 981 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 982 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 983 NULL, NULL, 984 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 985 NULL, NULL, NULL, NULL, 986 NULL, NULL, NULL, NULL, 987 NULL, NULL, NULL, NULL, 988 NULL, NULL, NULL, NULL, 989 NULL, NULL, NULL, NULL, 990 }, 991 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 992 }, 993 [FEAT_HV_RECOMM_EAX] = { 994 .type = CPUID_FEATURE_WORD, 995 .feat_names = { 996 NULL /* hv_recommend_pv_as_switch */, 997 NULL /* hv_recommend_pv_tlbflush_local */, 998 NULL /* hv_recommend_pv_tlbflush_remote */, 999 NULL /* hv_recommend_msr_apic_access */, 1000 NULL /* hv_recommend_msr_reset */, 1001 NULL /* hv_recommend_relaxed_timing */, 1002 NULL /* hv_recommend_dma_remapping */, 1003 NULL /* hv_recommend_int_remapping */, 1004 NULL /* hv_recommend_x2apic_msrs */, 1005 NULL /* hv_recommend_autoeoi_deprecation */, 1006 NULL /* hv_recommend_pv_ipi */, 1007 NULL /* hv_recommend_ex_hypercalls */, 1008 NULL /* hv_hypervisor_is_nested */, 1009 NULL /* hv_recommend_int_mbec */, 1010 NULL /* hv_recommend_evmcs */, 1011 NULL, 1012 NULL, NULL, NULL, NULL, 1013 NULL, NULL, NULL, NULL, 1014 NULL, NULL, NULL, NULL, 1015 NULL, NULL, NULL, NULL, 1016 }, 1017 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 1018 }, 1019 [FEAT_HV_NESTED_EAX] = { 1020 .type = CPUID_FEATURE_WORD, 1021 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 1022 }, 1023 [FEAT_SVM] = { 1024 .type = CPUID_FEATURE_WORD, 1025 .feat_names = { 1026 "npt", "lbrv", "svm-lock", "nrip-save", 1027 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 1028 NULL, NULL, "pause-filter", NULL, 1029 "pfthreshold", NULL, NULL, NULL, 1030 NULL, NULL, NULL, NULL, 1031 NULL, NULL, NULL, NULL, 1032 NULL, NULL, NULL, NULL, 1033 NULL, NULL, NULL, NULL, 1034 }, 1035 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 1036 .tcg_features = TCG_SVM_FEATURES, 1037 }, 1038 [FEAT_7_0_EBX] = { 1039 .type = CPUID_FEATURE_WORD, 1040 .feat_names = { 1041 "fsgsbase", "tsc-adjust", NULL, "bmi1", 1042 "hle", "avx2", NULL, "smep", 1043 "bmi2", "erms", "invpcid", "rtm", 1044 NULL, NULL, "mpx", NULL, 1045 "avx512f", "avx512dq", "rdseed", "adx", 1046 "smap", "avx512ifma", "pcommit", "clflushopt", 1047 "clwb", "intel-pt", "avx512pf", "avx512er", 1048 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 1049 }, 1050 .cpuid = { 1051 .eax = 7, 1052 .needs_ecx = true, .ecx = 0, 1053 .reg = R_EBX, 1054 }, 1055 .tcg_features = TCG_7_0_EBX_FEATURES, 1056 }, 1057 [FEAT_7_0_ECX] = { 1058 .type = CPUID_FEATURE_WORD, 1059 .feat_names = { 1060 NULL, "avx512vbmi", "umip", "pku", 1061 NULL /* ospke */, NULL, "avx512vbmi2", NULL, 1062 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 1063 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 1064 "la57", NULL, NULL, NULL, 1065 NULL, NULL, "rdpid", NULL, 1066 NULL, "cldemote", NULL, "movdiri", 1067 "movdir64b", NULL, NULL, NULL, 1068 }, 1069 .cpuid = { 1070 .eax = 7, 1071 .needs_ecx = true, .ecx = 0, 1072 .reg = R_ECX, 1073 }, 1074 .tcg_features = TCG_7_0_ECX_FEATURES, 1075 }, 1076 [FEAT_7_0_EDX] = { 1077 .type = CPUID_FEATURE_WORD, 1078 .feat_names = { 1079 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 1080 NULL, NULL, NULL, NULL, 1081 NULL, NULL, "md-clear", NULL, 1082 NULL, NULL, NULL, NULL, 1083 NULL, NULL, NULL, NULL, 1084 NULL, NULL, NULL, NULL, 1085 NULL, NULL, "spec-ctrl", "stibp", 1086 NULL, "arch-capabilities", NULL, "ssbd", 1087 }, 1088 .cpuid = { 1089 .eax = 7, 1090 .needs_ecx = true, .ecx = 0, 1091 .reg = R_EDX, 1092 }, 1093 .tcg_features = TCG_7_0_EDX_FEATURES, 1094 }, 1095 [FEAT_8000_0007_EDX] = { 1096 .type = CPUID_FEATURE_WORD, 1097 .feat_names = { 1098 NULL, NULL, NULL, NULL, 1099 NULL, NULL, NULL, NULL, 1100 "invtsc", NULL, NULL, NULL, 1101 NULL, NULL, NULL, NULL, 1102 NULL, NULL, NULL, NULL, 1103 NULL, NULL, NULL, NULL, 1104 NULL, NULL, NULL, NULL, 1105 NULL, NULL, NULL, NULL, 1106 }, 1107 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1108 .tcg_features = TCG_APM_FEATURES, 1109 .unmigratable_flags = CPUID_APM_INVTSC, 1110 }, 1111 [FEAT_8000_0008_EBX] = { 1112 .type = CPUID_FEATURE_WORD, 1113 .feat_names = { 1114 NULL, NULL, NULL, NULL, 1115 NULL, NULL, NULL, NULL, 1116 NULL, "wbnoinvd", NULL, NULL, 1117 "ibpb", NULL, NULL, NULL, 1118 NULL, NULL, NULL, NULL, 1119 NULL, NULL, NULL, NULL, 1120 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1121 NULL, NULL, NULL, NULL, 1122 }, 1123 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1124 .tcg_features = 0, 1125 .unmigratable_flags = 0, 1126 }, 1127 [FEAT_XSAVE] = { 1128 .type = CPUID_FEATURE_WORD, 1129 .feat_names = { 1130 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1131 NULL, NULL, NULL, NULL, 1132 NULL, NULL, NULL, NULL, 1133 NULL, NULL, NULL, NULL, 1134 NULL, NULL, NULL, NULL, 1135 NULL, NULL, NULL, NULL, 1136 NULL, NULL, NULL, NULL, 1137 NULL, NULL, NULL, NULL, 1138 }, 1139 .cpuid = { 1140 .eax = 0xd, 1141 .needs_ecx = true, .ecx = 1, 1142 .reg = R_EAX, 1143 }, 1144 .tcg_features = TCG_XSAVE_FEATURES, 1145 }, 1146 [FEAT_6_EAX] = { 1147 .type = CPUID_FEATURE_WORD, 1148 .feat_names = { 1149 NULL, NULL, "arat", NULL, 1150 NULL, NULL, NULL, NULL, 1151 NULL, NULL, NULL, NULL, 1152 NULL, NULL, NULL, NULL, 1153 NULL, NULL, NULL, NULL, 1154 NULL, NULL, NULL, NULL, 1155 NULL, NULL, NULL, NULL, 1156 NULL, NULL, NULL, NULL, 1157 }, 1158 .cpuid = { .eax = 6, .reg = R_EAX, }, 1159 .tcg_features = TCG_6_EAX_FEATURES, 1160 }, 1161 [FEAT_XSAVE_COMP_LO] = { 1162 .type = CPUID_FEATURE_WORD, 1163 .cpuid = { 1164 .eax = 0xD, 1165 .needs_ecx = true, .ecx = 0, 1166 .reg = R_EAX, 1167 }, 1168 .tcg_features = ~0U, 1169 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1170 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1171 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1172 XSTATE_PKRU_MASK, 1173 }, 1174 [FEAT_XSAVE_COMP_HI] = { 1175 .type = CPUID_FEATURE_WORD, 1176 .cpuid = { 1177 .eax = 0xD, 1178 .needs_ecx = true, .ecx = 0, 1179 .reg = R_EDX, 1180 }, 1181 .tcg_features = ~0U, 1182 }, 1183 /*Below are MSR exposed features*/ 1184 [FEAT_ARCH_CAPABILITIES] = { 1185 .type = MSR_FEATURE_WORD, 1186 .feat_names = { 1187 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1188 "ssb-no", "mds-no", NULL, NULL, 1189 NULL, NULL, NULL, NULL, 1190 NULL, NULL, NULL, NULL, 1191 NULL, NULL, NULL, NULL, 1192 NULL, NULL, NULL, NULL, 1193 NULL, NULL, NULL, NULL, 1194 NULL, NULL, NULL, NULL, 1195 }, 1196 .msr = { 1197 .index = MSR_IA32_ARCH_CAPABILITIES, 1198 .cpuid_dep = { 1199 FEAT_7_0_EDX, 1200 CPUID_7_0_EDX_ARCH_CAPABILITIES 1201 } 1202 }, 1203 }, 1204 }; 1205 1206 typedef struct X86RegisterInfo32 { 1207 /* Name of register */ 1208 const char *name; 1209 /* QAPI enum value register */ 1210 X86CPURegister32 qapi_enum; 1211 } X86RegisterInfo32; 1212 1213 #define REGISTER(reg) \ 1214 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1215 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1216 REGISTER(EAX), 1217 REGISTER(ECX), 1218 REGISTER(EDX), 1219 REGISTER(EBX), 1220 REGISTER(ESP), 1221 REGISTER(EBP), 1222 REGISTER(ESI), 1223 REGISTER(EDI), 1224 }; 1225 #undef REGISTER 1226 1227 typedef struct ExtSaveArea { 1228 uint32_t feature, bits; 1229 uint32_t offset, size; 1230 } ExtSaveArea; 1231 1232 static const ExtSaveArea x86_ext_save_areas[] = { 1233 [XSTATE_FP_BIT] = { 1234 /* x87 FP state component is always enabled if XSAVE is supported */ 1235 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1236 /* x87 state is in the legacy region of the XSAVE area */ 1237 .offset = 0, 1238 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1239 }, 1240 [XSTATE_SSE_BIT] = { 1241 /* SSE state component is always enabled if XSAVE is supported */ 1242 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1243 /* SSE state is in the legacy region of the XSAVE area */ 1244 .offset = 0, 1245 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1246 }, 1247 [XSTATE_YMM_BIT] = 1248 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1249 .offset = offsetof(X86XSaveArea, avx_state), 1250 .size = sizeof(XSaveAVX) }, 1251 [XSTATE_BNDREGS_BIT] = 1252 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1253 .offset = offsetof(X86XSaveArea, bndreg_state), 1254 .size = sizeof(XSaveBNDREG) }, 1255 [XSTATE_BNDCSR_BIT] = 1256 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1257 .offset = offsetof(X86XSaveArea, bndcsr_state), 1258 .size = sizeof(XSaveBNDCSR) }, 1259 [XSTATE_OPMASK_BIT] = 1260 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1261 .offset = offsetof(X86XSaveArea, opmask_state), 1262 .size = sizeof(XSaveOpmask) }, 1263 [XSTATE_ZMM_Hi256_BIT] = 1264 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1265 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1266 .size = sizeof(XSaveZMM_Hi256) }, 1267 [XSTATE_Hi16_ZMM_BIT] = 1268 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1269 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1270 .size = sizeof(XSaveHi16_ZMM) }, 1271 [XSTATE_PKRU_BIT] = 1272 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1273 .offset = offsetof(X86XSaveArea, pkru_state), 1274 .size = sizeof(XSavePKRU) }, 1275 }; 1276 1277 static uint32_t xsave_area_size(uint64_t mask) 1278 { 1279 int i; 1280 uint64_t ret = 0; 1281 1282 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1283 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1284 if ((mask >> i) & 1) { 1285 ret = MAX(ret, esa->offset + esa->size); 1286 } 1287 } 1288 return ret; 1289 } 1290 1291 static inline bool accel_uses_host_cpuid(void) 1292 { 1293 return kvm_enabled() || hvf_enabled(); 1294 } 1295 1296 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1297 { 1298 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1299 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1300 } 1301 1302 const char *get_register_name_32(unsigned int reg) 1303 { 1304 if (reg >= CPU_NB_REGS32) { 1305 return NULL; 1306 } 1307 return x86_reg_info_32[reg].name; 1308 } 1309 1310 /* 1311 * Returns the set of feature flags that are supported and migratable by 1312 * QEMU, for a given FeatureWord. 1313 */ 1314 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w) 1315 { 1316 FeatureWordInfo *wi = &feature_word_info[w]; 1317 uint32_t r = 0; 1318 int i; 1319 1320 for (i = 0; i < 32; i++) { 1321 uint32_t f = 1U << i; 1322 1323 /* If the feature name is known, it is implicitly considered migratable, 1324 * unless it is explicitly set in unmigratable_flags */ 1325 if ((wi->migratable_flags & f) || 1326 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1327 r |= f; 1328 } 1329 } 1330 return r; 1331 } 1332 1333 void host_cpuid(uint32_t function, uint32_t count, 1334 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1335 { 1336 uint32_t vec[4]; 1337 1338 #ifdef __x86_64__ 1339 asm volatile("cpuid" 1340 : "=a"(vec[0]), "=b"(vec[1]), 1341 "=c"(vec[2]), "=d"(vec[3]) 1342 : "0"(function), "c"(count) : "cc"); 1343 #elif defined(__i386__) 1344 asm volatile("pusha \n\t" 1345 "cpuid \n\t" 1346 "mov %%eax, 0(%2) \n\t" 1347 "mov %%ebx, 4(%2) \n\t" 1348 "mov %%ecx, 8(%2) \n\t" 1349 "mov %%edx, 12(%2) \n\t" 1350 "popa" 1351 : : "a"(function), "c"(count), "S"(vec) 1352 : "memory", "cc"); 1353 #else 1354 abort(); 1355 #endif 1356 1357 if (eax) 1358 *eax = vec[0]; 1359 if (ebx) 1360 *ebx = vec[1]; 1361 if (ecx) 1362 *ecx = vec[2]; 1363 if (edx) 1364 *edx = vec[3]; 1365 } 1366 1367 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1368 { 1369 uint32_t eax, ebx, ecx, edx; 1370 1371 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1372 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1373 1374 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1375 if (family) { 1376 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1377 } 1378 if (model) { 1379 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1380 } 1381 if (stepping) { 1382 *stepping = eax & 0x0F; 1383 } 1384 } 1385 1386 /* CPU class name definitions: */ 1387 1388 /* Return type name for a given CPU model name 1389 * Caller is responsible for freeing the returned string. 1390 */ 1391 static char *x86_cpu_type_name(const char *model_name) 1392 { 1393 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1394 } 1395 1396 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1397 { 1398 ObjectClass *oc; 1399 char *typename = x86_cpu_type_name(cpu_model); 1400 oc = object_class_by_name(typename); 1401 g_free(typename); 1402 return oc; 1403 } 1404 1405 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1406 { 1407 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1408 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1409 return g_strndup(class_name, 1410 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1411 } 1412 1413 struct X86CPUDefinition { 1414 const char *name; 1415 uint32_t level; 1416 uint32_t xlevel; 1417 /* vendor is zero-terminated, 12 character ASCII string */ 1418 char vendor[CPUID_VENDOR_SZ + 1]; 1419 int family; 1420 int model; 1421 int stepping; 1422 FeatureWordArray features; 1423 const char *model_id; 1424 CPUCaches *cache_info; 1425 }; 1426 1427 static CPUCaches epyc_cache_info = { 1428 .l1d_cache = &(CPUCacheInfo) { 1429 .type = DATA_CACHE, 1430 .level = 1, 1431 .size = 32 * KiB, 1432 .line_size = 64, 1433 .associativity = 8, 1434 .partitions = 1, 1435 .sets = 64, 1436 .lines_per_tag = 1, 1437 .self_init = 1, 1438 .no_invd_sharing = true, 1439 }, 1440 .l1i_cache = &(CPUCacheInfo) { 1441 .type = INSTRUCTION_CACHE, 1442 .level = 1, 1443 .size = 64 * KiB, 1444 .line_size = 64, 1445 .associativity = 4, 1446 .partitions = 1, 1447 .sets = 256, 1448 .lines_per_tag = 1, 1449 .self_init = 1, 1450 .no_invd_sharing = true, 1451 }, 1452 .l2_cache = &(CPUCacheInfo) { 1453 .type = UNIFIED_CACHE, 1454 .level = 2, 1455 .size = 512 * KiB, 1456 .line_size = 64, 1457 .associativity = 8, 1458 .partitions = 1, 1459 .sets = 1024, 1460 .lines_per_tag = 1, 1461 }, 1462 .l3_cache = &(CPUCacheInfo) { 1463 .type = UNIFIED_CACHE, 1464 .level = 3, 1465 .size = 8 * MiB, 1466 .line_size = 64, 1467 .associativity = 16, 1468 .partitions = 1, 1469 .sets = 8192, 1470 .lines_per_tag = 1, 1471 .self_init = true, 1472 .inclusive = true, 1473 .complex_indexing = true, 1474 }, 1475 }; 1476 1477 static X86CPUDefinition builtin_x86_defs[] = { 1478 { 1479 .name = "qemu64", 1480 .level = 0xd, 1481 .vendor = CPUID_VENDOR_AMD, 1482 .family = 6, 1483 .model = 6, 1484 .stepping = 3, 1485 .features[FEAT_1_EDX] = 1486 PPRO_FEATURES | 1487 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1488 CPUID_PSE36, 1489 .features[FEAT_1_ECX] = 1490 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1491 .features[FEAT_8000_0001_EDX] = 1492 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1493 .features[FEAT_8000_0001_ECX] = 1494 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1495 .xlevel = 0x8000000A, 1496 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1497 }, 1498 { 1499 .name = "phenom", 1500 .level = 5, 1501 .vendor = CPUID_VENDOR_AMD, 1502 .family = 16, 1503 .model = 2, 1504 .stepping = 3, 1505 /* Missing: CPUID_HT */ 1506 .features[FEAT_1_EDX] = 1507 PPRO_FEATURES | 1508 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1509 CPUID_PSE36 | CPUID_VME, 1510 .features[FEAT_1_ECX] = 1511 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1512 CPUID_EXT_POPCNT, 1513 .features[FEAT_8000_0001_EDX] = 1514 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1515 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1516 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1517 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1518 CPUID_EXT3_CR8LEG, 1519 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1520 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1521 .features[FEAT_8000_0001_ECX] = 1522 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1523 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1524 /* Missing: CPUID_SVM_LBRV */ 1525 .features[FEAT_SVM] = 1526 CPUID_SVM_NPT, 1527 .xlevel = 0x8000001A, 1528 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1529 }, 1530 { 1531 .name = "core2duo", 1532 .level = 10, 1533 .vendor = CPUID_VENDOR_INTEL, 1534 .family = 6, 1535 .model = 15, 1536 .stepping = 11, 1537 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1538 .features[FEAT_1_EDX] = 1539 PPRO_FEATURES | 1540 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1541 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1542 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1543 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1544 .features[FEAT_1_ECX] = 1545 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1546 CPUID_EXT_CX16, 1547 .features[FEAT_8000_0001_EDX] = 1548 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1549 .features[FEAT_8000_0001_ECX] = 1550 CPUID_EXT3_LAHF_LM, 1551 .xlevel = 0x80000008, 1552 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1553 }, 1554 { 1555 .name = "kvm64", 1556 .level = 0xd, 1557 .vendor = CPUID_VENDOR_INTEL, 1558 .family = 15, 1559 .model = 6, 1560 .stepping = 1, 1561 /* Missing: CPUID_HT */ 1562 .features[FEAT_1_EDX] = 1563 PPRO_FEATURES | CPUID_VME | 1564 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1565 CPUID_PSE36, 1566 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1567 .features[FEAT_1_ECX] = 1568 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1569 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1570 .features[FEAT_8000_0001_EDX] = 1571 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1572 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1573 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1574 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1575 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1576 .features[FEAT_8000_0001_ECX] = 1577 0, 1578 .xlevel = 0x80000008, 1579 .model_id = "Common KVM processor" 1580 }, 1581 { 1582 .name = "qemu32", 1583 .level = 4, 1584 .vendor = CPUID_VENDOR_INTEL, 1585 .family = 6, 1586 .model = 6, 1587 .stepping = 3, 1588 .features[FEAT_1_EDX] = 1589 PPRO_FEATURES, 1590 .features[FEAT_1_ECX] = 1591 CPUID_EXT_SSE3, 1592 .xlevel = 0x80000004, 1593 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1594 }, 1595 { 1596 .name = "kvm32", 1597 .level = 5, 1598 .vendor = CPUID_VENDOR_INTEL, 1599 .family = 15, 1600 .model = 6, 1601 .stepping = 1, 1602 .features[FEAT_1_EDX] = 1603 PPRO_FEATURES | CPUID_VME | 1604 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1605 .features[FEAT_1_ECX] = 1606 CPUID_EXT_SSE3, 1607 .features[FEAT_8000_0001_ECX] = 1608 0, 1609 .xlevel = 0x80000008, 1610 .model_id = "Common 32-bit KVM processor" 1611 }, 1612 { 1613 .name = "coreduo", 1614 .level = 10, 1615 .vendor = CPUID_VENDOR_INTEL, 1616 .family = 6, 1617 .model = 14, 1618 .stepping = 8, 1619 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1620 .features[FEAT_1_EDX] = 1621 PPRO_FEATURES | CPUID_VME | 1622 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 1623 CPUID_SS, 1624 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 1625 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1626 .features[FEAT_1_ECX] = 1627 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 1628 .features[FEAT_8000_0001_EDX] = 1629 CPUID_EXT2_NX, 1630 .xlevel = 0x80000008, 1631 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 1632 }, 1633 { 1634 .name = "486", 1635 .level = 1, 1636 .vendor = CPUID_VENDOR_INTEL, 1637 .family = 4, 1638 .model = 8, 1639 .stepping = 0, 1640 .features[FEAT_1_EDX] = 1641 I486_FEATURES, 1642 .xlevel = 0, 1643 .model_id = "", 1644 }, 1645 { 1646 .name = "pentium", 1647 .level = 1, 1648 .vendor = CPUID_VENDOR_INTEL, 1649 .family = 5, 1650 .model = 4, 1651 .stepping = 3, 1652 .features[FEAT_1_EDX] = 1653 PENTIUM_FEATURES, 1654 .xlevel = 0, 1655 .model_id = "", 1656 }, 1657 { 1658 .name = "pentium2", 1659 .level = 2, 1660 .vendor = CPUID_VENDOR_INTEL, 1661 .family = 6, 1662 .model = 5, 1663 .stepping = 2, 1664 .features[FEAT_1_EDX] = 1665 PENTIUM2_FEATURES, 1666 .xlevel = 0, 1667 .model_id = "", 1668 }, 1669 { 1670 .name = "pentium3", 1671 .level = 3, 1672 .vendor = CPUID_VENDOR_INTEL, 1673 .family = 6, 1674 .model = 7, 1675 .stepping = 3, 1676 .features[FEAT_1_EDX] = 1677 PENTIUM3_FEATURES, 1678 .xlevel = 0, 1679 .model_id = "", 1680 }, 1681 { 1682 .name = "athlon", 1683 .level = 2, 1684 .vendor = CPUID_VENDOR_AMD, 1685 .family = 6, 1686 .model = 2, 1687 .stepping = 3, 1688 .features[FEAT_1_EDX] = 1689 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 1690 CPUID_MCA, 1691 .features[FEAT_8000_0001_EDX] = 1692 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 1693 .xlevel = 0x80000008, 1694 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1695 }, 1696 { 1697 .name = "n270", 1698 .level = 10, 1699 .vendor = CPUID_VENDOR_INTEL, 1700 .family = 6, 1701 .model = 28, 1702 .stepping = 2, 1703 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1704 .features[FEAT_1_EDX] = 1705 PPRO_FEATURES | 1706 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 1707 CPUID_ACPI | CPUID_SS, 1708 /* Some CPUs got no CPUID_SEP */ 1709 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 1710 * CPUID_EXT_XTPR */ 1711 .features[FEAT_1_ECX] = 1712 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1713 CPUID_EXT_MOVBE, 1714 .features[FEAT_8000_0001_EDX] = 1715 CPUID_EXT2_NX, 1716 .features[FEAT_8000_0001_ECX] = 1717 CPUID_EXT3_LAHF_LM, 1718 .xlevel = 0x80000008, 1719 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 1720 }, 1721 { 1722 .name = "Conroe", 1723 .level = 10, 1724 .vendor = CPUID_VENDOR_INTEL, 1725 .family = 6, 1726 .model = 15, 1727 .stepping = 3, 1728 .features[FEAT_1_EDX] = 1729 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1730 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1731 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1732 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1733 CPUID_DE | CPUID_FP87, 1734 .features[FEAT_1_ECX] = 1735 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1736 .features[FEAT_8000_0001_EDX] = 1737 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1738 .features[FEAT_8000_0001_ECX] = 1739 CPUID_EXT3_LAHF_LM, 1740 .xlevel = 0x80000008, 1741 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 1742 }, 1743 { 1744 .name = "Penryn", 1745 .level = 10, 1746 .vendor = CPUID_VENDOR_INTEL, 1747 .family = 6, 1748 .model = 23, 1749 .stepping = 3, 1750 .features[FEAT_1_EDX] = 1751 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1752 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1753 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1754 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1755 CPUID_DE | CPUID_FP87, 1756 .features[FEAT_1_ECX] = 1757 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1758 CPUID_EXT_SSE3, 1759 .features[FEAT_8000_0001_EDX] = 1760 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1761 .features[FEAT_8000_0001_ECX] = 1762 CPUID_EXT3_LAHF_LM, 1763 .xlevel = 0x80000008, 1764 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 1765 }, 1766 { 1767 .name = "Nehalem", 1768 .level = 11, 1769 .vendor = CPUID_VENDOR_INTEL, 1770 .family = 6, 1771 .model = 26, 1772 .stepping = 3, 1773 .features[FEAT_1_EDX] = 1774 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1775 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1776 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1777 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1778 CPUID_DE | CPUID_FP87, 1779 .features[FEAT_1_ECX] = 1780 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1781 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1782 .features[FEAT_8000_0001_EDX] = 1783 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1784 .features[FEAT_8000_0001_ECX] = 1785 CPUID_EXT3_LAHF_LM, 1786 .xlevel = 0x80000008, 1787 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 1788 }, 1789 { 1790 .name = "Nehalem-IBRS", 1791 .level = 11, 1792 .vendor = CPUID_VENDOR_INTEL, 1793 .family = 6, 1794 .model = 26, 1795 .stepping = 3, 1796 .features[FEAT_1_EDX] = 1797 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1798 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1799 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1800 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1801 CPUID_DE | CPUID_FP87, 1802 .features[FEAT_1_ECX] = 1803 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1804 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1805 .features[FEAT_7_0_EDX] = 1806 CPUID_7_0_EDX_SPEC_CTRL, 1807 .features[FEAT_8000_0001_EDX] = 1808 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1809 .features[FEAT_8000_0001_ECX] = 1810 CPUID_EXT3_LAHF_LM, 1811 .xlevel = 0x80000008, 1812 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)", 1813 }, 1814 { 1815 .name = "Westmere", 1816 .level = 11, 1817 .vendor = CPUID_VENDOR_INTEL, 1818 .family = 6, 1819 .model = 44, 1820 .stepping = 1, 1821 .features[FEAT_1_EDX] = 1822 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1823 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1824 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1825 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1826 CPUID_DE | CPUID_FP87, 1827 .features[FEAT_1_ECX] = 1828 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1829 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1830 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1831 .features[FEAT_8000_0001_EDX] = 1832 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1833 .features[FEAT_8000_0001_ECX] = 1834 CPUID_EXT3_LAHF_LM, 1835 .features[FEAT_6_EAX] = 1836 CPUID_6_EAX_ARAT, 1837 .xlevel = 0x80000008, 1838 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 1839 }, 1840 { 1841 .name = "Westmere-IBRS", 1842 .level = 11, 1843 .vendor = CPUID_VENDOR_INTEL, 1844 .family = 6, 1845 .model = 44, 1846 .stepping = 1, 1847 .features[FEAT_1_EDX] = 1848 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1849 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1850 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1851 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1852 CPUID_DE | CPUID_FP87, 1853 .features[FEAT_1_ECX] = 1854 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1855 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1856 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1857 .features[FEAT_8000_0001_EDX] = 1858 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1859 .features[FEAT_8000_0001_ECX] = 1860 CPUID_EXT3_LAHF_LM, 1861 .features[FEAT_7_0_EDX] = 1862 CPUID_7_0_EDX_SPEC_CTRL, 1863 .features[FEAT_6_EAX] = 1864 CPUID_6_EAX_ARAT, 1865 .xlevel = 0x80000008, 1866 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)", 1867 }, 1868 { 1869 .name = "SandyBridge", 1870 .level = 0xd, 1871 .vendor = CPUID_VENDOR_INTEL, 1872 .family = 6, 1873 .model = 42, 1874 .stepping = 1, 1875 .features[FEAT_1_EDX] = 1876 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1877 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1878 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1879 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1880 CPUID_DE | CPUID_FP87, 1881 .features[FEAT_1_ECX] = 1882 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1883 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1884 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1885 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1886 CPUID_EXT_SSE3, 1887 .features[FEAT_8000_0001_EDX] = 1888 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1889 CPUID_EXT2_SYSCALL, 1890 .features[FEAT_8000_0001_ECX] = 1891 CPUID_EXT3_LAHF_LM, 1892 .features[FEAT_XSAVE] = 1893 CPUID_XSAVE_XSAVEOPT, 1894 .features[FEAT_6_EAX] = 1895 CPUID_6_EAX_ARAT, 1896 .xlevel = 0x80000008, 1897 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 1898 }, 1899 { 1900 .name = "SandyBridge-IBRS", 1901 .level = 0xd, 1902 .vendor = CPUID_VENDOR_INTEL, 1903 .family = 6, 1904 .model = 42, 1905 .stepping = 1, 1906 .features[FEAT_1_EDX] = 1907 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1908 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1909 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1910 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1911 CPUID_DE | CPUID_FP87, 1912 .features[FEAT_1_ECX] = 1913 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1914 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1915 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1916 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1917 CPUID_EXT_SSE3, 1918 .features[FEAT_8000_0001_EDX] = 1919 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1920 CPUID_EXT2_SYSCALL, 1921 .features[FEAT_8000_0001_ECX] = 1922 CPUID_EXT3_LAHF_LM, 1923 .features[FEAT_7_0_EDX] = 1924 CPUID_7_0_EDX_SPEC_CTRL, 1925 .features[FEAT_XSAVE] = 1926 CPUID_XSAVE_XSAVEOPT, 1927 .features[FEAT_6_EAX] = 1928 CPUID_6_EAX_ARAT, 1929 .xlevel = 0x80000008, 1930 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)", 1931 }, 1932 { 1933 .name = "IvyBridge", 1934 .level = 0xd, 1935 .vendor = CPUID_VENDOR_INTEL, 1936 .family = 6, 1937 .model = 58, 1938 .stepping = 9, 1939 .features[FEAT_1_EDX] = 1940 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1941 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1942 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1943 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1944 CPUID_DE | CPUID_FP87, 1945 .features[FEAT_1_ECX] = 1946 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1947 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1948 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1949 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1950 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1951 .features[FEAT_7_0_EBX] = 1952 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1953 CPUID_7_0_EBX_ERMS, 1954 .features[FEAT_8000_0001_EDX] = 1955 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1956 CPUID_EXT2_SYSCALL, 1957 .features[FEAT_8000_0001_ECX] = 1958 CPUID_EXT3_LAHF_LM, 1959 .features[FEAT_XSAVE] = 1960 CPUID_XSAVE_XSAVEOPT, 1961 .features[FEAT_6_EAX] = 1962 CPUID_6_EAX_ARAT, 1963 .xlevel = 0x80000008, 1964 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 1965 }, 1966 { 1967 .name = "IvyBridge-IBRS", 1968 .level = 0xd, 1969 .vendor = CPUID_VENDOR_INTEL, 1970 .family = 6, 1971 .model = 58, 1972 .stepping = 9, 1973 .features[FEAT_1_EDX] = 1974 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1975 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1976 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1977 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1978 CPUID_DE | CPUID_FP87, 1979 .features[FEAT_1_ECX] = 1980 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1981 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1982 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1983 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1984 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1985 .features[FEAT_7_0_EBX] = 1986 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1987 CPUID_7_0_EBX_ERMS, 1988 .features[FEAT_8000_0001_EDX] = 1989 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1990 CPUID_EXT2_SYSCALL, 1991 .features[FEAT_8000_0001_ECX] = 1992 CPUID_EXT3_LAHF_LM, 1993 .features[FEAT_7_0_EDX] = 1994 CPUID_7_0_EDX_SPEC_CTRL, 1995 .features[FEAT_XSAVE] = 1996 CPUID_XSAVE_XSAVEOPT, 1997 .features[FEAT_6_EAX] = 1998 CPUID_6_EAX_ARAT, 1999 .xlevel = 0x80000008, 2000 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)", 2001 }, 2002 { 2003 .name = "Haswell-noTSX", 2004 .level = 0xd, 2005 .vendor = CPUID_VENDOR_INTEL, 2006 .family = 6, 2007 .model = 60, 2008 .stepping = 1, 2009 .features[FEAT_1_EDX] = 2010 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2011 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2012 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2013 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2014 CPUID_DE | CPUID_FP87, 2015 .features[FEAT_1_ECX] = 2016 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2017 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2018 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2019 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2020 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2021 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2022 .features[FEAT_8000_0001_EDX] = 2023 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2024 CPUID_EXT2_SYSCALL, 2025 .features[FEAT_8000_0001_ECX] = 2026 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2027 .features[FEAT_7_0_EBX] = 2028 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2029 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2030 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID, 2031 .features[FEAT_XSAVE] = 2032 CPUID_XSAVE_XSAVEOPT, 2033 .features[FEAT_6_EAX] = 2034 CPUID_6_EAX_ARAT, 2035 .xlevel = 0x80000008, 2036 .model_id = "Intel Core Processor (Haswell, no TSX)", 2037 }, 2038 { 2039 .name = "Haswell-noTSX-IBRS", 2040 .level = 0xd, 2041 .vendor = CPUID_VENDOR_INTEL, 2042 .family = 6, 2043 .model = 60, 2044 .stepping = 1, 2045 .features[FEAT_1_EDX] = 2046 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2047 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2048 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2049 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2050 CPUID_DE | CPUID_FP87, 2051 .features[FEAT_1_ECX] = 2052 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2053 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2054 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2055 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2056 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2057 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2058 .features[FEAT_8000_0001_EDX] = 2059 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2060 CPUID_EXT2_SYSCALL, 2061 .features[FEAT_8000_0001_ECX] = 2062 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2063 .features[FEAT_7_0_EDX] = 2064 CPUID_7_0_EDX_SPEC_CTRL, 2065 .features[FEAT_7_0_EBX] = 2066 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2067 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2068 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID, 2069 .features[FEAT_XSAVE] = 2070 CPUID_XSAVE_XSAVEOPT, 2071 .features[FEAT_6_EAX] = 2072 CPUID_6_EAX_ARAT, 2073 .xlevel = 0x80000008, 2074 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)", 2075 }, 2076 { 2077 .name = "Haswell", 2078 .level = 0xd, 2079 .vendor = CPUID_VENDOR_INTEL, 2080 .family = 6, 2081 .model = 60, 2082 .stepping = 4, 2083 .features[FEAT_1_EDX] = 2084 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2085 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2086 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2087 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2088 CPUID_DE | CPUID_FP87, 2089 .features[FEAT_1_ECX] = 2090 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2091 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2092 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2093 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2094 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2095 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2096 .features[FEAT_8000_0001_EDX] = 2097 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2098 CPUID_EXT2_SYSCALL, 2099 .features[FEAT_8000_0001_ECX] = 2100 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2101 .features[FEAT_7_0_EBX] = 2102 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2103 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2104 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2105 CPUID_7_0_EBX_RTM, 2106 .features[FEAT_XSAVE] = 2107 CPUID_XSAVE_XSAVEOPT, 2108 .features[FEAT_6_EAX] = 2109 CPUID_6_EAX_ARAT, 2110 .xlevel = 0x80000008, 2111 .model_id = "Intel Core Processor (Haswell)", 2112 }, 2113 { 2114 .name = "Haswell-IBRS", 2115 .level = 0xd, 2116 .vendor = CPUID_VENDOR_INTEL, 2117 .family = 6, 2118 .model = 60, 2119 .stepping = 4, 2120 .features[FEAT_1_EDX] = 2121 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2122 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2123 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2124 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2125 CPUID_DE | CPUID_FP87, 2126 .features[FEAT_1_ECX] = 2127 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2128 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2129 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2130 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2131 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2132 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2133 .features[FEAT_8000_0001_EDX] = 2134 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2135 CPUID_EXT2_SYSCALL, 2136 .features[FEAT_8000_0001_ECX] = 2137 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2138 .features[FEAT_7_0_EDX] = 2139 CPUID_7_0_EDX_SPEC_CTRL, 2140 .features[FEAT_7_0_EBX] = 2141 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2142 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2143 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2144 CPUID_7_0_EBX_RTM, 2145 .features[FEAT_XSAVE] = 2146 CPUID_XSAVE_XSAVEOPT, 2147 .features[FEAT_6_EAX] = 2148 CPUID_6_EAX_ARAT, 2149 .xlevel = 0x80000008, 2150 .model_id = "Intel Core Processor (Haswell, IBRS)", 2151 }, 2152 { 2153 .name = "Broadwell-noTSX", 2154 .level = 0xd, 2155 .vendor = CPUID_VENDOR_INTEL, 2156 .family = 6, 2157 .model = 61, 2158 .stepping = 2, 2159 .features[FEAT_1_EDX] = 2160 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2161 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2162 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2163 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2164 CPUID_DE | CPUID_FP87, 2165 .features[FEAT_1_ECX] = 2166 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2167 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2168 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2169 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2170 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2171 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2172 .features[FEAT_8000_0001_EDX] = 2173 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2174 CPUID_EXT2_SYSCALL, 2175 .features[FEAT_8000_0001_ECX] = 2176 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2177 .features[FEAT_7_0_EBX] = 2178 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2179 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2180 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2181 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2182 CPUID_7_0_EBX_SMAP, 2183 .features[FEAT_XSAVE] = 2184 CPUID_XSAVE_XSAVEOPT, 2185 .features[FEAT_6_EAX] = 2186 CPUID_6_EAX_ARAT, 2187 .xlevel = 0x80000008, 2188 .model_id = "Intel Core Processor (Broadwell, no TSX)", 2189 }, 2190 { 2191 .name = "Broadwell-noTSX-IBRS", 2192 .level = 0xd, 2193 .vendor = CPUID_VENDOR_INTEL, 2194 .family = 6, 2195 .model = 61, 2196 .stepping = 2, 2197 .features[FEAT_1_EDX] = 2198 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2199 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2200 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2201 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2202 CPUID_DE | CPUID_FP87, 2203 .features[FEAT_1_ECX] = 2204 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2205 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2206 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2207 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2208 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2209 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2210 .features[FEAT_8000_0001_EDX] = 2211 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2212 CPUID_EXT2_SYSCALL, 2213 .features[FEAT_8000_0001_ECX] = 2214 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2215 .features[FEAT_7_0_EDX] = 2216 CPUID_7_0_EDX_SPEC_CTRL, 2217 .features[FEAT_7_0_EBX] = 2218 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2219 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2220 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2221 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2222 CPUID_7_0_EBX_SMAP, 2223 .features[FEAT_XSAVE] = 2224 CPUID_XSAVE_XSAVEOPT, 2225 .features[FEAT_6_EAX] = 2226 CPUID_6_EAX_ARAT, 2227 .xlevel = 0x80000008, 2228 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)", 2229 }, 2230 { 2231 .name = "Broadwell", 2232 .level = 0xd, 2233 .vendor = CPUID_VENDOR_INTEL, 2234 .family = 6, 2235 .model = 61, 2236 .stepping = 2, 2237 .features[FEAT_1_EDX] = 2238 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2239 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2240 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2241 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2242 CPUID_DE | CPUID_FP87, 2243 .features[FEAT_1_ECX] = 2244 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2245 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2246 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2247 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2248 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2249 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2250 .features[FEAT_8000_0001_EDX] = 2251 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2252 CPUID_EXT2_SYSCALL, 2253 .features[FEAT_8000_0001_ECX] = 2254 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2255 .features[FEAT_7_0_EBX] = 2256 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2257 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2258 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2259 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2260 CPUID_7_0_EBX_SMAP, 2261 .features[FEAT_XSAVE] = 2262 CPUID_XSAVE_XSAVEOPT, 2263 .features[FEAT_6_EAX] = 2264 CPUID_6_EAX_ARAT, 2265 .xlevel = 0x80000008, 2266 .model_id = "Intel Core Processor (Broadwell)", 2267 }, 2268 { 2269 .name = "Broadwell-IBRS", 2270 .level = 0xd, 2271 .vendor = CPUID_VENDOR_INTEL, 2272 .family = 6, 2273 .model = 61, 2274 .stepping = 2, 2275 .features[FEAT_1_EDX] = 2276 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2277 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2278 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2279 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2280 CPUID_DE | CPUID_FP87, 2281 .features[FEAT_1_ECX] = 2282 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2283 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2284 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2285 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2286 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2287 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2288 .features[FEAT_8000_0001_EDX] = 2289 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2290 CPUID_EXT2_SYSCALL, 2291 .features[FEAT_8000_0001_ECX] = 2292 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2293 .features[FEAT_7_0_EDX] = 2294 CPUID_7_0_EDX_SPEC_CTRL, 2295 .features[FEAT_7_0_EBX] = 2296 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2297 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2298 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2299 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2300 CPUID_7_0_EBX_SMAP, 2301 .features[FEAT_XSAVE] = 2302 CPUID_XSAVE_XSAVEOPT, 2303 .features[FEAT_6_EAX] = 2304 CPUID_6_EAX_ARAT, 2305 .xlevel = 0x80000008, 2306 .model_id = "Intel Core Processor (Broadwell, IBRS)", 2307 }, 2308 { 2309 .name = "Skylake-Client", 2310 .level = 0xd, 2311 .vendor = CPUID_VENDOR_INTEL, 2312 .family = 6, 2313 .model = 94, 2314 .stepping = 3, 2315 .features[FEAT_1_EDX] = 2316 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2317 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2318 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2319 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2320 CPUID_DE | CPUID_FP87, 2321 .features[FEAT_1_ECX] = 2322 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2323 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2324 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2325 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2326 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2327 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2328 .features[FEAT_8000_0001_EDX] = 2329 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2330 CPUID_EXT2_SYSCALL, 2331 .features[FEAT_8000_0001_ECX] = 2332 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2333 .features[FEAT_7_0_EBX] = 2334 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2335 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2336 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2337 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2338 CPUID_7_0_EBX_SMAP, 2339 /* Missing: XSAVES (not supported by some Linux versions, 2340 * including v4.1 to v4.12). 2341 * KVM doesn't yet expose any XSAVES state save component, 2342 * and the only one defined in Skylake (processor tracing) 2343 * probably will block migration anyway. 2344 */ 2345 .features[FEAT_XSAVE] = 2346 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2347 CPUID_XSAVE_XGETBV1, 2348 .features[FEAT_6_EAX] = 2349 CPUID_6_EAX_ARAT, 2350 .xlevel = 0x80000008, 2351 .model_id = "Intel Core Processor (Skylake)", 2352 }, 2353 { 2354 .name = "Skylake-Client-IBRS", 2355 .level = 0xd, 2356 .vendor = CPUID_VENDOR_INTEL, 2357 .family = 6, 2358 .model = 94, 2359 .stepping = 3, 2360 .features[FEAT_1_EDX] = 2361 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2362 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2363 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2364 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2365 CPUID_DE | CPUID_FP87, 2366 .features[FEAT_1_ECX] = 2367 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2368 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2369 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2370 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2371 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2372 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2373 .features[FEAT_8000_0001_EDX] = 2374 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2375 CPUID_EXT2_SYSCALL, 2376 .features[FEAT_8000_0001_ECX] = 2377 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2378 .features[FEAT_7_0_EDX] = 2379 CPUID_7_0_EDX_SPEC_CTRL, 2380 .features[FEAT_7_0_EBX] = 2381 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2382 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2383 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2384 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2385 CPUID_7_0_EBX_SMAP, 2386 /* Missing: XSAVES (not supported by some Linux versions, 2387 * including v4.1 to v4.12). 2388 * KVM doesn't yet expose any XSAVES state save component, 2389 * and the only one defined in Skylake (processor tracing) 2390 * probably will block migration anyway. 2391 */ 2392 .features[FEAT_XSAVE] = 2393 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2394 CPUID_XSAVE_XGETBV1, 2395 .features[FEAT_6_EAX] = 2396 CPUID_6_EAX_ARAT, 2397 .xlevel = 0x80000008, 2398 .model_id = "Intel Core Processor (Skylake, IBRS)", 2399 }, 2400 { 2401 .name = "Skylake-Server", 2402 .level = 0xd, 2403 .vendor = CPUID_VENDOR_INTEL, 2404 .family = 6, 2405 .model = 85, 2406 .stepping = 4, 2407 .features[FEAT_1_EDX] = 2408 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2409 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2410 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2411 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2412 CPUID_DE | CPUID_FP87, 2413 .features[FEAT_1_ECX] = 2414 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2415 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2416 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2417 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2418 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2419 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2420 .features[FEAT_8000_0001_EDX] = 2421 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2422 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2423 .features[FEAT_8000_0001_ECX] = 2424 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2425 .features[FEAT_7_0_EBX] = 2426 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2427 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2428 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2429 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2430 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2431 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2432 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2433 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2434 .features[FEAT_7_0_ECX] = 2435 CPUID_7_0_ECX_PKU, 2436 /* Missing: XSAVES (not supported by some Linux versions, 2437 * including v4.1 to v4.12). 2438 * KVM doesn't yet expose any XSAVES state save component, 2439 * and the only one defined in Skylake (processor tracing) 2440 * probably will block migration anyway. 2441 */ 2442 .features[FEAT_XSAVE] = 2443 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2444 CPUID_XSAVE_XGETBV1, 2445 .features[FEAT_6_EAX] = 2446 CPUID_6_EAX_ARAT, 2447 .xlevel = 0x80000008, 2448 .model_id = "Intel Xeon Processor (Skylake)", 2449 }, 2450 { 2451 .name = "Skylake-Server-IBRS", 2452 .level = 0xd, 2453 .vendor = CPUID_VENDOR_INTEL, 2454 .family = 6, 2455 .model = 85, 2456 .stepping = 4, 2457 .features[FEAT_1_EDX] = 2458 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2459 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2460 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2461 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2462 CPUID_DE | CPUID_FP87, 2463 .features[FEAT_1_ECX] = 2464 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2465 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2466 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2467 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2468 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2469 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2470 .features[FEAT_8000_0001_EDX] = 2471 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2472 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2473 .features[FEAT_8000_0001_ECX] = 2474 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2475 .features[FEAT_7_0_EDX] = 2476 CPUID_7_0_EDX_SPEC_CTRL, 2477 .features[FEAT_7_0_EBX] = 2478 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2479 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2480 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2481 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2482 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2483 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2484 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2485 CPUID_7_0_EBX_AVX512VL, 2486 .features[FEAT_7_0_ECX] = 2487 CPUID_7_0_ECX_PKU, 2488 /* Missing: XSAVES (not supported by some Linux versions, 2489 * including v4.1 to v4.12). 2490 * KVM doesn't yet expose any XSAVES state save component, 2491 * and the only one defined in Skylake (processor tracing) 2492 * probably will block migration anyway. 2493 */ 2494 .features[FEAT_XSAVE] = 2495 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2496 CPUID_XSAVE_XGETBV1, 2497 .features[FEAT_6_EAX] = 2498 CPUID_6_EAX_ARAT, 2499 .xlevel = 0x80000008, 2500 .model_id = "Intel Xeon Processor (Skylake, IBRS)", 2501 }, 2502 { 2503 .name = "Cascadelake-Server", 2504 .level = 0xd, 2505 .vendor = CPUID_VENDOR_INTEL, 2506 .family = 6, 2507 .model = 85, 2508 .stepping = 6, 2509 .features[FEAT_1_EDX] = 2510 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2511 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2512 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2513 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2514 CPUID_DE | CPUID_FP87, 2515 .features[FEAT_1_ECX] = 2516 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2517 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2518 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2519 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2520 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2521 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2522 .features[FEAT_8000_0001_EDX] = 2523 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2524 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2525 .features[FEAT_8000_0001_ECX] = 2526 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2527 .features[FEAT_7_0_EBX] = 2528 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2529 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2530 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2531 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2532 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2533 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2534 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2535 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2536 .features[FEAT_7_0_ECX] = 2537 CPUID_7_0_ECX_PKU | 2538 CPUID_7_0_ECX_AVX512VNNI, 2539 .features[FEAT_7_0_EDX] = 2540 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2541 /* Missing: XSAVES (not supported by some Linux versions, 2542 * including v4.1 to v4.12). 2543 * KVM doesn't yet expose any XSAVES state save component, 2544 * and the only one defined in Skylake (processor tracing) 2545 * probably will block migration anyway. 2546 */ 2547 .features[FEAT_XSAVE] = 2548 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2549 CPUID_XSAVE_XGETBV1, 2550 .features[FEAT_6_EAX] = 2551 CPUID_6_EAX_ARAT, 2552 .xlevel = 0x80000008, 2553 .model_id = "Intel Xeon Processor (Cascadelake)", 2554 }, 2555 { 2556 .name = "Icelake-Client", 2557 .level = 0xd, 2558 .vendor = CPUID_VENDOR_INTEL, 2559 .family = 6, 2560 .model = 126, 2561 .stepping = 0, 2562 .features[FEAT_1_EDX] = 2563 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2564 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2565 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2566 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2567 CPUID_DE | CPUID_FP87, 2568 .features[FEAT_1_ECX] = 2569 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2570 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2571 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2572 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2573 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2574 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2575 .features[FEAT_8000_0001_EDX] = 2576 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2577 CPUID_EXT2_SYSCALL, 2578 .features[FEAT_8000_0001_ECX] = 2579 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2580 .features[FEAT_8000_0008_EBX] = 2581 CPUID_8000_0008_EBX_WBNOINVD, 2582 .features[FEAT_7_0_EBX] = 2583 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2584 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2585 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2586 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2587 CPUID_7_0_EBX_SMAP, 2588 .features[FEAT_7_0_ECX] = 2589 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 2590 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI | 2591 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 2592 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 2593 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 2594 .features[FEAT_7_0_EDX] = 2595 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2596 /* Missing: XSAVES (not supported by some Linux versions, 2597 * including v4.1 to v4.12). 2598 * KVM doesn't yet expose any XSAVES state save component, 2599 * and the only one defined in Skylake (processor tracing) 2600 * probably will block migration anyway. 2601 */ 2602 .features[FEAT_XSAVE] = 2603 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2604 CPUID_XSAVE_XGETBV1, 2605 .features[FEAT_6_EAX] = 2606 CPUID_6_EAX_ARAT, 2607 .xlevel = 0x80000008, 2608 .model_id = "Intel Core Processor (Icelake)", 2609 }, 2610 { 2611 .name = "Icelake-Server", 2612 .level = 0xd, 2613 .vendor = CPUID_VENDOR_INTEL, 2614 .family = 6, 2615 .model = 134, 2616 .stepping = 0, 2617 .features[FEAT_1_EDX] = 2618 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2619 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2620 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2621 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2622 CPUID_DE | CPUID_FP87, 2623 .features[FEAT_1_ECX] = 2624 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2625 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2626 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2627 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2628 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2629 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2630 .features[FEAT_8000_0001_EDX] = 2631 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2632 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2633 .features[FEAT_8000_0001_ECX] = 2634 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2635 .features[FEAT_8000_0008_EBX] = 2636 CPUID_8000_0008_EBX_WBNOINVD, 2637 .features[FEAT_7_0_EBX] = 2638 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2639 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2640 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2641 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2642 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2643 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2644 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2645 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2646 .features[FEAT_7_0_ECX] = 2647 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 2648 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI | 2649 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 2650 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 2651 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 2652 .features[FEAT_7_0_EDX] = 2653 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2654 /* Missing: XSAVES (not supported by some Linux versions, 2655 * including v4.1 to v4.12). 2656 * KVM doesn't yet expose any XSAVES state save component, 2657 * and the only one defined in Skylake (processor tracing) 2658 * probably will block migration anyway. 2659 */ 2660 .features[FEAT_XSAVE] = 2661 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2662 CPUID_XSAVE_XGETBV1, 2663 .features[FEAT_6_EAX] = 2664 CPUID_6_EAX_ARAT, 2665 .xlevel = 0x80000008, 2666 .model_id = "Intel Xeon Processor (Icelake)", 2667 }, 2668 { 2669 .name = "KnightsMill", 2670 .level = 0xd, 2671 .vendor = CPUID_VENDOR_INTEL, 2672 .family = 6, 2673 .model = 133, 2674 .stepping = 0, 2675 .features[FEAT_1_EDX] = 2676 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 2677 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 2678 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 2679 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 2680 CPUID_PSE | CPUID_DE | CPUID_FP87, 2681 .features[FEAT_1_ECX] = 2682 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2683 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2684 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2685 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2686 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2687 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2688 .features[FEAT_8000_0001_EDX] = 2689 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2690 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2691 .features[FEAT_8000_0001_ECX] = 2692 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2693 .features[FEAT_7_0_EBX] = 2694 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2695 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 2696 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 2697 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 2698 CPUID_7_0_EBX_AVX512ER, 2699 .features[FEAT_7_0_ECX] = 2700 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 2701 .features[FEAT_7_0_EDX] = 2702 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 2703 .features[FEAT_XSAVE] = 2704 CPUID_XSAVE_XSAVEOPT, 2705 .features[FEAT_6_EAX] = 2706 CPUID_6_EAX_ARAT, 2707 .xlevel = 0x80000008, 2708 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 2709 }, 2710 { 2711 .name = "Opteron_G1", 2712 .level = 5, 2713 .vendor = CPUID_VENDOR_AMD, 2714 .family = 15, 2715 .model = 6, 2716 .stepping = 1, 2717 .features[FEAT_1_EDX] = 2718 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2719 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2720 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2721 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2722 CPUID_DE | CPUID_FP87, 2723 .features[FEAT_1_ECX] = 2724 CPUID_EXT_SSE3, 2725 .features[FEAT_8000_0001_EDX] = 2726 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2727 .xlevel = 0x80000008, 2728 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 2729 }, 2730 { 2731 .name = "Opteron_G2", 2732 .level = 5, 2733 .vendor = CPUID_VENDOR_AMD, 2734 .family = 15, 2735 .model = 6, 2736 .stepping = 1, 2737 .features[FEAT_1_EDX] = 2738 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2739 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2740 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2741 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2742 CPUID_DE | CPUID_FP87, 2743 .features[FEAT_1_ECX] = 2744 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 2745 .features[FEAT_8000_0001_EDX] = 2746 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2747 .features[FEAT_8000_0001_ECX] = 2748 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2749 .xlevel = 0x80000008, 2750 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 2751 }, 2752 { 2753 .name = "Opteron_G3", 2754 .level = 5, 2755 .vendor = CPUID_VENDOR_AMD, 2756 .family = 16, 2757 .model = 2, 2758 .stepping = 3, 2759 .features[FEAT_1_EDX] = 2760 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2761 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2762 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2763 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2764 CPUID_DE | CPUID_FP87, 2765 .features[FEAT_1_ECX] = 2766 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 2767 CPUID_EXT_SSE3, 2768 .features[FEAT_8000_0001_EDX] = 2769 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 2770 CPUID_EXT2_RDTSCP, 2771 .features[FEAT_8000_0001_ECX] = 2772 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 2773 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2774 .xlevel = 0x80000008, 2775 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 2776 }, 2777 { 2778 .name = "Opteron_G4", 2779 .level = 0xd, 2780 .vendor = CPUID_VENDOR_AMD, 2781 .family = 21, 2782 .model = 1, 2783 .stepping = 2, 2784 .features[FEAT_1_EDX] = 2785 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2786 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2787 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2788 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2789 CPUID_DE | CPUID_FP87, 2790 .features[FEAT_1_ECX] = 2791 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2792 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2793 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2794 CPUID_EXT_SSE3, 2795 .features[FEAT_8000_0001_EDX] = 2796 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2797 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 2798 .features[FEAT_8000_0001_ECX] = 2799 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2800 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2801 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2802 CPUID_EXT3_LAHF_LM, 2803 .features[FEAT_SVM] = 2804 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2805 /* no xsaveopt! */ 2806 .xlevel = 0x8000001A, 2807 .model_id = "AMD Opteron 62xx class CPU", 2808 }, 2809 { 2810 .name = "Opteron_G5", 2811 .level = 0xd, 2812 .vendor = CPUID_VENDOR_AMD, 2813 .family = 21, 2814 .model = 2, 2815 .stepping = 0, 2816 .features[FEAT_1_EDX] = 2817 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2818 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2819 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2820 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2821 CPUID_DE | CPUID_FP87, 2822 .features[FEAT_1_ECX] = 2823 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 2824 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2825 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 2826 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2827 .features[FEAT_8000_0001_EDX] = 2828 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2829 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 2830 .features[FEAT_8000_0001_ECX] = 2831 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2832 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2833 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2834 CPUID_EXT3_LAHF_LM, 2835 .features[FEAT_SVM] = 2836 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2837 /* no xsaveopt! */ 2838 .xlevel = 0x8000001A, 2839 .model_id = "AMD Opteron 63xx class CPU", 2840 }, 2841 { 2842 .name = "EPYC", 2843 .level = 0xd, 2844 .vendor = CPUID_VENDOR_AMD, 2845 .family = 23, 2846 .model = 1, 2847 .stepping = 2, 2848 .features[FEAT_1_EDX] = 2849 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2850 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2851 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2852 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2853 CPUID_VME | CPUID_FP87, 2854 .features[FEAT_1_ECX] = 2855 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2856 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 2857 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2858 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2859 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2860 .features[FEAT_8000_0001_EDX] = 2861 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2862 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2863 CPUID_EXT2_SYSCALL, 2864 .features[FEAT_8000_0001_ECX] = 2865 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2866 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2867 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2868 CPUID_EXT3_TOPOEXT, 2869 .features[FEAT_7_0_EBX] = 2870 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2871 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2872 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2873 CPUID_7_0_EBX_SHA_NI, 2874 /* Missing: XSAVES (not supported by some Linux versions, 2875 * including v4.1 to v4.12). 2876 * KVM doesn't yet expose any XSAVES state save component. 2877 */ 2878 .features[FEAT_XSAVE] = 2879 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2880 CPUID_XSAVE_XGETBV1, 2881 .features[FEAT_6_EAX] = 2882 CPUID_6_EAX_ARAT, 2883 .features[FEAT_SVM] = 2884 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2885 .xlevel = 0x8000001E, 2886 .model_id = "AMD EPYC Processor", 2887 .cache_info = &epyc_cache_info, 2888 }, 2889 { 2890 .name = "EPYC-IBPB", 2891 .level = 0xd, 2892 .vendor = CPUID_VENDOR_AMD, 2893 .family = 23, 2894 .model = 1, 2895 .stepping = 2, 2896 .features[FEAT_1_EDX] = 2897 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2898 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2899 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2900 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2901 CPUID_VME | CPUID_FP87, 2902 .features[FEAT_1_ECX] = 2903 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2904 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 2905 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2906 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2907 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2908 .features[FEAT_8000_0001_EDX] = 2909 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2910 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2911 CPUID_EXT2_SYSCALL, 2912 .features[FEAT_8000_0001_ECX] = 2913 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2914 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2915 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2916 CPUID_EXT3_TOPOEXT, 2917 .features[FEAT_8000_0008_EBX] = 2918 CPUID_8000_0008_EBX_IBPB, 2919 .features[FEAT_7_0_EBX] = 2920 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2921 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2922 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2923 CPUID_7_0_EBX_SHA_NI, 2924 /* Missing: XSAVES (not supported by some Linux versions, 2925 * including v4.1 to v4.12). 2926 * KVM doesn't yet expose any XSAVES state save component. 2927 */ 2928 .features[FEAT_XSAVE] = 2929 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2930 CPUID_XSAVE_XGETBV1, 2931 .features[FEAT_6_EAX] = 2932 CPUID_6_EAX_ARAT, 2933 .features[FEAT_SVM] = 2934 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2935 .xlevel = 0x8000001E, 2936 .model_id = "AMD EPYC Processor (with IBPB)", 2937 .cache_info = &epyc_cache_info, 2938 }, 2939 { 2940 .name = "Dhyana", 2941 .level = 0xd, 2942 .vendor = CPUID_VENDOR_HYGON, 2943 .family = 24, 2944 .model = 0, 2945 .stepping = 1, 2946 .features[FEAT_1_EDX] = 2947 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2948 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2949 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2950 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2951 CPUID_VME | CPUID_FP87, 2952 .features[FEAT_1_ECX] = 2953 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2954 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 2955 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2956 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2957 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 2958 .features[FEAT_8000_0001_EDX] = 2959 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2960 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2961 CPUID_EXT2_SYSCALL, 2962 .features[FEAT_8000_0001_ECX] = 2963 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2964 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2965 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2966 CPUID_EXT3_TOPOEXT, 2967 .features[FEAT_8000_0008_EBX] = 2968 CPUID_8000_0008_EBX_IBPB, 2969 .features[FEAT_7_0_EBX] = 2970 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2971 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2972 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 2973 /* 2974 * Missing: XSAVES (not supported by some Linux versions, 2975 * including v4.1 to v4.12). 2976 * KVM doesn't yet expose any XSAVES state save component. 2977 */ 2978 .features[FEAT_XSAVE] = 2979 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2980 CPUID_XSAVE_XGETBV1, 2981 .features[FEAT_6_EAX] = 2982 CPUID_6_EAX_ARAT, 2983 .features[FEAT_SVM] = 2984 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2985 .xlevel = 0x8000001E, 2986 .model_id = "Hygon Dhyana Processor", 2987 .cache_info = &epyc_cache_info, 2988 }, 2989 }; 2990 2991 typedef struct PropValue { 2992 const char *prop, *value; 2993 } PropValue; 2994 2995 /* KVM-specific features that are automatically added/removed 2996 * from all CPU models when KVM is enabled. 2997 */ 2998 static PropValue kvm_default_props[] = { 2999 { "kvmclock", "on" }, 3000 { "kvm-nopiodelay", "on" }, 3001 { "kvm-asyncpf", "on" }, 3002 { "kvm-steal-time", "on" }, 3003 { "kvm-pv-eoi", "on" }, 3004 { "kvmclock-stable-bit", "on" }, 3005 { "x2apic", "on" }, 3006 { "acpi", "off" }, 3007 { "monitor", "off" }, 3008 { "svm", "off" }, 3009 { NULL, NULL }, 3010 }; 3011 3012 /* TCG-specific defaults that override all CPU models when using TCG 3013 */ 3014 static PropValue tcg_default_props[] = { 3015 { "vme", "off" }, 3016 { NULL, NULL }, 3017 }; 3018 3019 3020 void x86_cpu_change_kvm_default(const char *prop, const char *value) 3021 { 3022 PropValue *pv; 3023 for (pv = kvm_default_props; pv->prop; pv++) { 3024 if (!strcmp(pv->prop, prop)) { 3025 pv->value = value; 3026 break; 3027 } 3028 } 3029 3030 /* It is valid to call this function only for properties that 3031 * are already present in the kvm_default_props table. 3032 */ 3033 assert(pv->prop); 3034 } 3035 3036 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 3037 bool migratable_only); 3038 3039 static bool lmce_supported(void) 3040 { 3041 uint64_t mce_cap = 0; 3042 3043 #ifdef CONFIG_KVM 3044 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 3045 return false; 3046 } 3047 #endif 3048 3049 return !!(mce_cap & MCG_LMCE_P); 3050 } 3051 3052 #define CPUID_MODEL_ID_SZ 48 3053 3054 /** 3055 * cpu_x86_fill_model_id: 3056 * Get CPUID model ID string from host CPU. 3057 * 3058 * @str should have at least CPUID_MODEL_ID_SZ bytes 3059 * 3060 * The function does NOT add a null terminator to the string 3061 * automatically. 3062 */ 3063 static int cpu_x86_fill_model_id(char *str) 3064 { 3065 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 3066 int i; 3067 3068 for (i = 0; i < 3; i++) { 3069 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 3070 memcpy(str + i * 16 + 0, &eax, 4); 3071 memcpy(str + i * 16 + 4, &ebx, 4); 3072 memcpy(str + i * 16 + 8, &ecx, 4); 3073 memcpy(str + i * 16 + 12, &edx, 4); 3074 } 3075 return 0; 3076 } 3077 3078 static Property max_x86_cpu_properties[] = { 3079 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 3080 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 3081 DEFINE_PROP_END_OF_LIST() 3082 }; 3083 3084 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 3085 { 3086 DeviceClass *dc = DEVICE_CLASS(oc); 3087 X86CPUClass *xcc = X86_CPU_CLASS(oc); 3088 3089 xcc->ordering = 9; 3090 3091 xcc->model_description = 3092 "Enables all features supported by the accelerator in the current host"; 3093 3094 dc->props = max_x86_cpu_properties; 3095 } 3096 3097 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp); 3098 3099 static void max_x86_cpu_initfn(Object *obj) 3100 { 3101 X86CPU *cpu = X86_CPU(obj); 3102 CPUX86State *env = &cpu->env; 3103 KVMState *s = kvm_state; 3104 3105 /* We can't fill the features array here because we don't know yet if 3106 * "migratable" is true or false. 3107 */ 3108 cpu->max_features = true; 3109 3110 if (accel_uses_host_cpuid()) { 3111 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 3112 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 3113 int family, model, stepping; 3114 X86CPUDefinition host_cpudef = { }; 3115 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 3116 3117 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 3118 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx); 3119 3120 host_vendor_fms(vendor, &family, &model, &stepping); 3121 3122 cpu_x86_fill_model_id(model_id); 3123 3124 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort); 3125 object_property_set_int(OBJECT(cpu), family, "family", &error_abort); 3126 object_property_set_int(OBJECT(cpu), model, "model", &error_abort); 3127 object_property_set_int(OBJECT(cpu), stepping, "stepping", 3128 &error_abort); 3129 object_property_set_str(OBJECT(cpu), model_id, "model-id", 3130 &error_abort); 3131 3132 if (kvm_enabled()) { 3133 env->cpuid_min_level = 3134 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 3135 env->cpuid_min_xlevel = 3136 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 3137 env->cpuid_min_xlevel2 = 3138 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 3139 } else { 3140 env->cpuid_min_level = 3141 hvf_get_supported_cpuid(0x0, 0, R_EAX); 3142 env->cpuid_min_xlevel = 3143 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 3144 env->cpuid_min_xlevel2 = 3145 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 3146 } 3147 3148 if (lmce_supported()) { 3149 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 3150 } 3151 } else { 3152 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 3153 "vendor", &error_abort); 3154 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 3155 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 3156 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 3157 object_property_set_str(OBJECT(cpu), 3158 "QEMU TCG CPU version " QEMU_HW_VERSION, 3159 "model-id", &error_abort); 3160 } 3161 3162 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 3163 } 3164 3165 static const TypeInfo max_x86_cpu_type_info = { 3166 .name = X86_CPU_TYPE_NAME("max"), 3167 .parent = TYPE_X86_CPU, 3168 .instance_init = max_x86_cpu_initfn, 3169 .class_init = max_x86_cpu_class_init, 3170 }; 3171 3172 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 3173 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 3174 { 3175 X86CPUClass *xcc = X86_CPU_CLASS(oc); 3176 3177 xcc->host_cpuid_required = true; 3178 xcc->ordering = 8; 3179 3180 #if defined(CONFIG_KVM) 3181 xcc->model_description = 3182 "KVM processor with all supported host features "; 3183 #elif defined(CONFIG_HVF) 3184 xcc->model_description = 3185 "HVF processor with all supported host features "; 3186 #endif 3187 } 3188 3189 static const TypeInfo host_x86_cpu_type_info = { 3190 .name = X86_CPU_TYPE_NAME("host"), 3191 .parent = X86_CPU_TYPE_NAME("max"), 3192 .class_init = host_x86_cpu_class_init, 3193 }; 3194 3195 #endif 3196 3197 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 3198 { 3199 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 3200 3201 switch (f->type) { 3202 case CPUID_FEATURE_WORD: 3203 { 3204 const char *reg = get_register_name_32(f->cpuid.reg); 3205 assert(reg); 3206 return g_strdup_printf("CPUID.%02XH:%s", 3207 f->cpuid.eax, reg); 3208 } 3209 case MSR_FEATURE_WORD: 3210 return g_strdup_printf("MSR(%02XH)", 3211 f->msr.index); 3212 } 3213 3214 return NULL; 3215 } 3216 3217 static void report_unavailable_features(FeatureWord w, uint32_t mask) 3218 { 3219 FeatureWordInfo *f = &feature_word_info[w]; 3220 int i; 3221 char *feat_word_str; 3222 3223 for (i = 0; i < 32; ++i) { 3224 if ((1UL << i) & mask) { 3225 feat_word_str = feature_word_description(f, i); 3226 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]", 3227 accel_uses_host_cpuid() ? "host" : "TCG", 3228 feat_word_str, 3229 f->feat_names[i] ? "." : "", 3230 f->feat_names[i] ? f->feat_names[i] : "", i); 3231 g_free(feat_word_str); 3232 } 3233 } 3234 } 3235 3236 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 3237 const char *name, void *opaque, 3238 Error **errp) 3239 { 3240 X86CPU *cpu = X86_CPU(obj); 3241 CPUX86State *env = &cpu->env; 3242 int64_t value; 3243 3244 value = (env->cpuid_version >> 8) & 0xf; 3245 if (value == 0xf) { 3246 value += (env->cpuid_version >> 20) & 0xff; 3247 } 3248 visit_type_int(v, name, &value, errp); 3249 } 3250 3251 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 3252 const char *name, void *opaque, 3253 Error **errp) 3254 { 3255 X86CPU *cpu = X86_CPU(obj); 3256 CPUX86State *env = &cpu->env; 3257 const int64_t min = 0; 3258 const int64_t max = 0xff + 0xf; 3259 Error *local_err = NULL; 3260 int64_t value; 3261 3262 visit_type_int(v, name, &value, &local_err); 3263 if (local_err) { 3264 error_propagate(errp, local_err); 3265 return; 3266 } 3267 if (value < min || value > max) { 3268 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3269 name ? name : "null", value, min, max); 3270 return; 3271 } 3272 3273 env->cpuid_version &= ~0xff00f00; 3274 if (value > 0x0f) { 3275 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 3276 } else { 3277 env->cpuid_version |= value << 8; 3278 } 3279 } 3280 3281 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 3282 const char *name, void *opaque, 3283 Error **errp) 3284 { 3285 X86CPU *cpu = X86_CPU(obj); 3286 CPUX86State *env = &cpu->env; 3287 int64_t value; 3288 3289 value = (env->cpuid_version >> 4) & 0xf; 3290 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 3291 visit_type_int(v, name, &value, errp); 3292 } 3293 3294 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 3295 const char *name, void *opaque, 3296 Error **errp) 3297 { 3298 X86CPU *cpu = X86_CPU(obj); 3299 CPUX86State *env = &cpu->env; 3300 const int64_t min = 0; 3301 const int64_t max = 0xff; 3302 Error *local_err = NULL; 3303 int64_t value; 3304 3305 visit_type_int(v, name, &value, &local_err); 3306 if (local_err) { 3307 error_propagate(errp, local_err); 3308 return; 3309 } 3310 if (value < min || value > max) { 3311 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3312 name ? name : "null", value, min, max); 3313 return; 3314 } 3315 3316 env->cpuid_version &= ~0xf00f0; 3317 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 3318 } 3319 3320 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 3321 const char *name, void *opaque, 3322 Error **errp) 3323 { 3324 X86CPU *cpu = X86_CPU(obj); 3325 CPUX86State *env = &cpu->env; 3326 int64_t value; 3327 3328 value = env->cpuid_version & 0xf; 3329 visit_type_int(v, name, &value, errp); 3330 } 3331 3332 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 3333 const char *name, void *opaque, 3334 Error **errp) 3335 { 3336 X86CPU *cpu = X86_CPU(obj); 3337 CPUX86State *env = &cpu->env; 3338 const int64_t min = 0; 3339 const int64_t max = 0xf; 3340 Error *local_err = NULL; 3341 int64_t value; 3342 3343 visit_type_int(v, name, &value, &local_err); 3344 if (local_err) { 3345 error_propagate(errp, local_err); 3346 return; 3347 } 3348 if (value < min || value > max) { 3349 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3350 name ? name : "null", value, min, max); 3351 return; 3352 } 3353 3354 env->cpuid_version &= ~0xf; 3355 env->cpuid_version |= value & 0xf; 3356 } 3357 3358 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 3359 { 3360 X86CPU *cpu = X86_CPU(obj); 3361 CPUX86State *env = &cpu->env; 3362 char *value; 3363 3364 value = g_malloc(CPUID_VENDOR_SZ + 1); 3365 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 3366 env->cpuid_vendor3); 3367 return value; 3368 } 3369 3370 static void x86_cpuid_set_vendor(Object *obj, const char *value, 3371 Error **errp) 3372 { 3373 X86CPU *cpu = X86_CPU(obj); 3374 CPUX86State *env = &cpu->env; 3375 int i; 3376 3377 if (strlen(value) != CPUID_VENDOR_SZ) { 3378 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 3379 return; 3380 } 3381 3382 env->cpuid_vendor1 = 0; 3383 env->cpuid_vendor2 = 0; 3384 env->cpuid_vendor3 = 0; 3385 for (i = 0; i < 4; i++) { 3386 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 3387 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 3388 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 3389 } 3390 } 3391 3392 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 3393 { 3394 X86CPU *cpu = X86_CPU(obj); 3395 CPUX86State *env = &cpu->env; 3396 char *value; 3397 int i; 3398 3399 value = g_malloc(48 + 1); 3400 for (i = 0; i < 48; i++) { 3401 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 3402 } 3403 value[48] = '\0'; 3404 return value; 3405 } 3406 3407 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 3408 Error **errp) 3409 { 3410 X86CPU *cpu = X86_CPU(obj); 3411 CPUX86State *env = &cpu->env; 3412 int c, len, i; 3413 3414 if (model_id == NULL) { 3415 model_id = ""; 3416 } 3417 len = strlen(model_id); 3418 memset(env->cpuid_model, 0, 48); 3419 for (i = 0; i < 48; i++) { 3420 if (i >= len) { 3421 c = '\0'; 3422 } else { 3423 c = (uint8_t)model_id[i]; 3424 } 3425 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 3426 } 3427 } 3428 3429 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 3430 void *opaque, Error **errp) 3431 { 3432 X86CPU *cpu = X86_CPU(obj); 3433 int64_t value; 3434 3435 value = cpu->env.tsc_khz * 1000; 3436 visit_type_int(v, name, &value, errp); 3437 } 3438 3439 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 3440 void *opaque, Error **errp) 3441 { 3442 X86CPU *cpu = X86_CPU(obj); 3443 const int64_t min = 0; 3444 const int64_t max = INT64_MAX; 3445 Error *local_err = NULL; 3446 int64_t value; 3447 3448 visit_type_int(v, name, &value, &local_err); 3449 if (local_err) { 3450 error_propagate(errp, local_err); 3451 return; 3452 } 3453 if (value < min || value > max) { 3454 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3455 name ? name : "null", value, min, max); 3456 return; 3457 } 3458 3459 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 3460 } 3461 3462 /* Generic getter for "feature-words" and "filtered-features" properties */ 3463 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 3464 const char *name, void *opaque, 3465 Error **errp) 3466 { 3467 uint32_t *array = (uint32_t *)opaque; 3468 FeatureWord w; 3469 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 3470 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 3471 X86CPUFeatureWordInfoList *list = NULL; 3472 3473 for (w = 0; w < FEATURE_WORDS; w++) { 3474 FeatureWordInfo *wi = &feature_word_info[w]; 3475 /* 3476 * We didn't have MSR features when "feature-words" was 3477 * introduced. Therefore skipped other type entries. 3478 */ 3479 if (wi->type != CPUID_FEATURE_WORD) { 3480 continue; 3481 } 3482 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 3483 qwi->cpuid_input_eax = wi->cpuid.eax; 3484 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 3485 qwi->cpuid_input_ecx = wi->cpuid.ecx; 3486 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 3487 qwi->features = array[w]; 3488 3489 /* List will be in reverse order, but order shouldn't matter */ 3490 list_entries[w].next = list; 3491 list_entries[w].value = &word_infos[w]; 3492 list = &list_entries[w]; 3493 } 3494 3495 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 3496 } 3497 3498 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name, 3499 void *opaque, Error **errp) 3500 { 3501 X86CPU *cpu = X86_CPU(obj); 3502 int64_t value = cpu->hyperv_spinlock_attempts; 3503 3504 visit_type_int(v, name, &value, errp); 3505 } 3506 3507 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name, 3508 void *opaque, Error **errp) 3509 { 3510 const int64_t min = 0xFFF; 3511 const int64_t max = UINT_MAX; 3512 X86CPU *cpu = X86_CPU(obj); 3513 Error *err = NULL; 3514 int64_t value; 3515 3516 visit_type_int(v, name, &value, &err); 3517 if (err) { 3518 error_propagate(errp, err); 3519 return; 3520 } 3521 3522 if (value < min || value > max) { 3523 error_setg(errp, "Property %s.%s doesn't take value %" PRId64 3524 " (minimum: %" PRId64 ", maximum: %" PRId64 ")", 3525 object_get_typename(obj), name ? name : "null", 3526 value, min, max); 3527 return; 3528 } 3529 cpu->hyperv_spinlock_attempts = value; 3530 } 3531 3532 static const PropertyInfo qdev_prop_spinlocks = { 3533 .name = "int", 3534 .get = x86_get_hv_spinlocks, 3535 .set = x86_set_hv_spinlocks, 3536 }; 3537 3538 /* Convert all '_' in a feature string option name to '-', to make feature 3539 * name conform to QOM property naming rule, which uses '-' instead of '_'. 3540 */ 3541 static inline void feat2prop(char *s) 3542 { 3543 while ((s = strchr(s, '_'))) { 3544 *s = '-'; 3545 } 3546 } 3547 3548 /* Return the feature property name for a feature flag bit */ 3549 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 3550 { 3551 /* XSAVE components are automatically enabled by other features, 3552 * so return the original feature name instead 3553 */ 3554 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 3555 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 3556 3557 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 3558 x86_ext_save_areas[comp].bits) { 3559 w = x86_ext_save_areas[comp].feature; 3560 bitnr = ctz32(x86_ext_save_areas[comp].bits); 3561 } 3562 } 3563 3564 assert(bitnr < 32); 3565 assert(w < FEATURE_WORDS); 3566 return feature_word_info[w].feat_names[bitnr]; 3567 } 3568 3569 /* Compatibily hack to maintain legacy +-feat semantic, 3570 * where +-feat overwrites any feature set by 3571 * feat=on|feat even if the later is parsed after +-feat 3572 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 3573 */ 3574 static GList *plus_features, *minus_features; 3575 3576 static gint compare_string(gconstpointer a, gconstpointer b) 3577 { 3578 return g_strcmp0(a, b); 3579 } 3580 3581 /* Parse "+feature,-feature,feature=foo" CPU feature string 3582 */ 3583 static void x86_cpu_parse_featurestr(const char *typename, char *features, 3584 Error **errp) 3585 { 3586 char *featurestr; /* Single 'key=value" string being parsed */ 3587 static bool cpu_globals_initialized; 3588 bool ambiguous = false; 3589 3590 if (cpu_globals_initialized) { 3591 return; 3592 } 3593 cpu_globals_initialized = true; 3594 3595 if (!features) { 3596 return; 3597 } 3598 3599 for (featurestr = strtok(features, ","); 3600 featurestr; 3601 featurestr = strtok(NULL, ",")) { 3602 const char *name; 3603 const char *val = NULL; 3604 char *eq = NULL; 3605 char num[32]; 3606 GlobalProperty *prop; 3607 3608 /* Compatibility syntax: */ 3609 if (featurestr[0] == '+') { 3610 plus_features = g_list_append(plus_features, 3611 g_strdup(featurestr + 1)); 3612 continue; 3613 } else if (featurestr[0] == '-') { 3614 minus_features = g_list_append(minus_features, 3615 g_strdup(featurestr + 1)); 3616 continue; 3617 } 3618 3619 eq = strchr(featurestr, '='); 3620 if (eq) { 3621 *eq++ = 0; 3622 val = eq; 3623 } else { 3624 val = "on"; 3625 } 3626 3627 feat2prop(featurestr); 3628 name = featurestr; 3629 3630 if (g_list_find_custom(plus_features, name, compare_string)) { 3631 warn_report("Ambiguous CPU model string. " 3632 "Don't mix both \"+%s\" and \"%s=%s\"", 3633 name, name, val); 3634 ambiguous = true; 3635 } 3636 if (g_list_find_custom(minus_features, name, compare_string)) { 3637 warn_report("Ambiguous CPU model string. " 3638 "Don't mix both \"-%s\" and \"%s=%s\"", 3639 name, name, val); 3640 ambiguous = true; 3641 } 3642 3643 /* Special case: */ 3644 if (!strcmp(name, "tsc-freq")) { 3645 int ret; 3646 uint64_t tsc_freq; 3647 3648 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 3649 if (ret < 0 || tsc_freq > INT64_MAX) { 3650 error_setg(errp, "bad numerical value %s", val); 3651 return; 3652 } 3653 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 3654 val = num; 3655 name = "tsc-frequency"; 3656 } 3657 3658 prop = g_new0(typeof(*prop), 1); 3659 prop->driver = typename; 3660 prop->property = g_strdup(name); 3661 prop->value = g_strdup(val); 3662 qdev_prop_register_global(prop); 3663 } 3664 3665 if (ambiguous) { 3666 warn_report("Compatibility of ambiguous CPU model " 3667 "strings won't be kept on future QEMU versions"); 3668 } 3669 } 3670 3671 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 3672 static int x86_cpu_filter_features(X86CPU *cpu); 3673 3674 /* Build a list with the name of all features on a feature word array */ 3675 static void x86_cpu_list_feature_names(FeatureWordArray features, 3676 strList **feat_names) 3677 { 3678 FeatureWord w; 3679 strList **next = feat_names; 3680 3681 for (w = 0; w < FEATURE_WORDS; w++) { 3682 uint32_t filtered = features[w]; 3683 int i; 3684 for (i = 0; i < 32; i++) { 3685 if (filtered & (1UL << i)) { 3686 strList *new = g_new0(strList, 1); 3687 new->value = g_strdup(x86_cpu_feature_name(w, i)); 3688 *next = new; 3689 next = &new->next; 3690 } 3691 } 3692 } 3693 } 3694 3695 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 3696 const char *name, void *opaque, 3697 Error **errp) 3698 { 3699 X86CPU *xc = X86_CPU(obj); 3700 strList *result = NULL; 3701 3702 x86_cpu_list_feature_names(xc->filtered_features, &result); 3703 visit_type_strList(v, "unavailable-features", &result, errp); 3704 } 3705 3706 /* Check for missing features that may prevent the CPU class from 3707 * running using the current machine and accelerator. 3708 */ 3709 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 3710 strList **missing_feats) 3711 { 3712 X86CPU *xc; 3713 Error *err = NULL; 3714 strList **next = missing_feats; 3715 3716 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 3717 strList *new = g_new0(strList, 1); 3718 new->value = g_strdup("kvm"); 3719 *missing_feats = new; 3720 return; 3721 } 3722 3723 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 3724 3725 x86_cpu_expand_features(xc, &err); 3726 if (err) { 3727 /* Errors at x86_cpu_expand_features should never happen, 3728 * but in case it does, just report the model as not 3729 * runnable at all using the "type" property. 3730 */ 3731 strList *new = g_new0(strList, 1); 3732 new->value = g_strdup("type"); 3733 *next = new; 3734 next = &new->next; 3735 } 3736 3737 x86_cpu_filter_features(xc); 3738 3739 x86_cpu_list_feature_names(xc->filtered_features, next); 3740 3741 object_unref(OBJECT(xc)); 3742 } 3743 3744 /* Print all cpuid feature names in featureset 3745 */ 3746 static void listflags(GList *features) 3747 { 3748 size_t len = 0; 3749 GList *tmp; 3750 3751 for (tmp = features; tmp; tmp = tmp->next) { 3752 const char *name = tmp->data; 3753 if ((len + strlen(name) + 1) >= 75) { 3754 qemu_printf("\n"); 3755 len = 0; 3756 } 3757 qemu_printf("%s%s", len == 0 ? " " : " ", name); 3758 len += strlen(name) + 1; 3759 } 3760 qemu_printf("\n"); 3761 } 3762 3763 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 3764 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 3765 { 3766 ObjectClass *class_a = (ObjectClass *)a; 3767 ObjectClass *class_b = (ObjectClass *)b; 3768 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 3769 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 3770 char *name_a, *name_b; 3771 int ret; 3772 3773 if (cc_a->ordering != cc_b->ordering) { 3774 ret = cc_a->ordering - cc_b->ordering; 3775 } else { 3776 name_a = x86_cpu_class_get_model_name(cc_a); 3777 name_b = x86_cpu_class_get_model_name(cc_b); 3778 ret = strcmp(name_a, name_b); 3779 g_free(name_a); 3780 g_free(name_b); 3781 } 3782 return ret; 3783 } 3784 3785 static GSList *get_sorted_cpu_model_list(void) 3786 { 3787 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 3788 list = g_slist_sort(list, x86_cpu_list_compare); 3789 return list; 3790 } 3791 3792 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 3793 { 3794 ObjectClass *oc = data; 3795 X86CPUClass *cc = X86_CPU_CLASS(oc); 3796 char *name = x86_cpu_class_get_model_name(cc); 3797 const char *desc = cc->model_description; 3798 if (!desc && cc->cpu_def) { 3799 desc = cc->cpu_def->model_id; 3800 } 3801 3802 qemu_printf("x86 %-20s %-48s\n", name, desc); 3803 g_free(name); 3804 } 3805 3806 /* list available CPU models and flags */ 3807 void x86_cpu_list(void) 3808 { 3809 int i, j; 3810 GSList *list; 3811 GList *names = NULL; 3812 3813 qemu_printf("Available CPUs:\n"); 3814 list = get_sorted_cpu_model_list(); 3815 g_slist_foreach(list, x86_cpu_list_entry, NULL); 3816 g_slist_free(list); 3817 3818 names = NULL; 3819 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 3820 FeatureWordInfo *fw = &feature_word_info[i]; 3821 for (j = 0; j < 32; j++) { 3822 if (fw->feat_names[j]) { 3823 names = g_list_append(names, (gpointer)fw->feat_names[j]); 3824 } 3825 } 3826 } 3827 3828 names = g_list_sort(names, (GCompareFunc)strcmp); 3829 3830 qemu_printf("\nRecognized CPUID flags:\n"); 3831 listflags(names); 3832 qemu_printf("\n"); 3833 g_list_free(names); 3834 } 3835 3836 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 3837 { 3838 ObjectClass *oc = data; 3839 X86CPUClass *cc = X86_CPU_CLASS(oc); 3840 CpuDefinitionInfoList **cpu_list = user_data; 3841 CpuDefinitionInfoList *entry; 3842 CpuDefinitionInfo *info; 3843 3844 info = g_malloc0(sizeof(*info)); 3845 info->name = x86_cpu_class_get_model_name(cc); 3846 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 3847 info->has_unavailable_features = true; 3848 info->q_typename = g_strdup(object_class_get_name(oc)); 3849 info->migration_safe = cc->migration_safe; 3850 info->has_migration_safe = true; 3851 info->q_static = cc->static_model; 3852 3853 entry = g_malloc0(sizeof(*entry)); 3854 entry->value = info; 3855 entry->next = *cpu_list; 3856 *cpu_list = entry; 3857 } 3858 3859 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 3860 { 3861 CpuDefinitionInfoList *cpu_list = NULL; 3862 GSList *list = get_sorted_cpu_model_list(); 3863 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 3864 g_slist_free(list); 3865 return cpu_list; 3866 } 3867 3868 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 3869 bool migratable_only) 3870 { 3871 FeatureWordInfo *wi = &feature_word_info[w]; 3872 uint32_t r = 0; 3873 3874 if (kvm_enabled()) { 3875 switch (wi->type) { 3876 case CPUID_FEATURE_WORD: 3877 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 3878 wi->cpuid.ecx, 3879 wi->cpuid.reg); 3880 break; 3881 case MSR_FEATURE_WORD: 3882 r = kvm_arch_get_supported_msr_feature(kvm_state, 3883 wi->msr.index); 3884 break; 3885 } 3886 } else if (hvf_enabled()) { 3887 if (wi->type != CPUID_FEATURE_WORD) { 3888 return 0; 3889 } 3890 r = hvf_get_supported_cpuid(wi->cpuid.eax, 3891 wi->cpuid.ecx, 3892 wi->cpuid.reg); 3893 } else if (tcg_enabled()) { 3894 r = wi->tcg_features; 3895 } else { 3896 return ~0; 3897 } 3898 if (migratable_only) { 3899 r &= x86_cpu_get_migratable_flags(w); 3900 } 3901 return r; 3902 } 3903 3904 static void x86_cpu_report_filtered_features(X86CPU *cpu) 3905 { 3906 FeatureWord w; 3907 3908 for (w = 0; w < FEATURE_WORDS; w++) { 3909 report_unavailable_features(w, cpu->filtered_features[w]); 3910 } 3911 } 3912 3913 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 3914 { 3915 PropValue *pv; 3916 for (pv = props; pv->prop; pv++) { 3917 if (!pv->value) { 3918 continue; 3919 } 3920 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 3921 &error_abort); 3922 } 3923 } 3924 3925 /* Load data from X86CPUDefinition into a X86CPU object 3926 */ 3927 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp) 3928 { 3929 CPUX86State *env = &cpu->env; 3930 const char *vendor; 3931 char host_vendor[CPUID_VENDOR_SZ + 1]; 3932 FeatureWord w; 3933 3934 /*NOTE: any property set by this function should be returned by 3935 * x86_cpu_static_props(), so static expansion of 3936 * query-cpu-model-expansion is always complete. 3937 */ 3938 3939 /* CPU models only set _minimum_ values for level/xlevel: */ 3940 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp); 3941 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp); 3942 3943 object_property_set_int(OBJECT(cpu), def->family, "family", errp); 3944 object_property_set_int(OBJECT(cpu), def->model, "model", errp); 3945 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp); 3946 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); 3947 for (w = 0; w < FEATURE_WORDS; w++) { 3948 env->features[w] = def->features[w]; 3949 } 3950 3951 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 3952 cpu->legacy_cache = !def->cache_info; 3953 3954 /* Special cases not set in the X86CPUDefinition structs: */ 3955 /* TODO: in-kernel irqchip for hvf */ 3956 if (kvm_enabled()) { 3957 if (!kvm_irqchip_in_kernel()) { 3958 x86_cpu_change_kvm_default("x2apic", "off"); 3959 } 3960 3961 x86_cpu_apply_props(cpu, kvm_default_props); 3962 } else if (tcg_enabled()) { 3963 x86_cpu_apply_props(cpu, tcg_default_props); 3964 } 3965 3966 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 3967 3968 /* sysenter isn't supported in compatibility mode on AMD, 3969 * syscall isn't supported in compatibility mode on Intel. 3970 * Normally we advertise the actual CPU vendor, but you can 3971 * override this using the 'vendor' property if you want to use 3972 * KVM's sysenter/syscall emulation in compatibility mode and 3973 * when doing cross vendor migration 3974 */ 3975 vendor = def->vendor; 3976 if (accel_uses_host_cpuid()) { 3977 uint32_t ebx = 0, ecx = 0, edx = 0; 3978 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 3979 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 3980 vendor = host_vendor; 3981 } 3982 3983 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp); 3984 3985 } 3986 3987 #ifndef CONFIG_USER_ONLY 3988 /* Return a QDict containing keys for all properties that can be included 3989 * in static expansion of CPU models. All properties set by x86_cpu_load_def() 3990 * must be included in the dictionary. 3991 */ 3992 static QDict *x86_cpu_static_props(void) 3993 { 3994 FeatureWord w; 3995 int i; 3996 static const char *props[] = { 3997 "min-level", 3998 "min-xlevel", 3999 "family", 4000 "model", 4001 "stepping", 4002 "model-id", 4003 "vendor", 4004 "lmce", 4005 NULL, 4006 }; 4007 static QDict *d; 4008 4009 if (d) { 4010 return d; 4011 } 4012 4013 d = qdict_new(); 4014 for (i = 0; props[i]; i++) { 4015 qdict_put_null(d, props[i]); 4016 } 4017 4018 for (w = 0; w < FEATURE_WORDS; w++) { 4019 FeatureWordInfo *fi = &feature_word_info[w]; 4020 int bit; 4021 for (bit = 0; bit < 32; bit++) { 4022 if (!fi->feat_names[bit]) { 4023 continue; 4024 } 4025 qdict_put_null(d, fi->feat_names[bit]); 4026 } 4027 } 4028 4029 return d; 4030 } 4031 4032 /* Add an entry to @props dict, with the value for property. */ 4033 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 4034 { 4035 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 4036 &error_abort); 4037 4038 qdict_put_obj(props, prop, value); 4039 } 4040 4041 /* Convert CPU model data from X86CPU object to a property dictionary 4042 * that can recreate exactly the same CPU model. 4043 */ 4044 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 4045 { 4046 QDict *sprops = x86_cpu_static_props(); 4047 const QDictEntry *e; 4048 4049 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 4050 const char *prop = qdict_entry_key(e); 4051 x86_cpu_expand_prop(cpu, props, prop); 4052 } 4053 } 4054 4055 /* Convert CPU model data from X86CPU object to a property dictionary 4056 * that can recreate exactly the same CPU model, including every 4057 * writeable QOM property. 4058 */ 4059 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 4060 { 4061 ObjectPropertyIterator iter; 4062 ObjectProperty *prop; 4063 4064 object_property_iter_init(&iter, OBJECT(cpu)); 4065 while ((prop = object_property_iter_next(&iter))) { 4066 /* skip read-only or write-only properties */ 4067 if (!prop->get || !prop->set) { 4068 continue; 4069 } 4070 4071 /* "hotplugged" is the only property that is configurable 4072 * on the command-line but will be set differently on CPUs 4073 * created using "-cpu ... -smp ..." and by CPUs created 4074 * on the fly by x86_cpu_from_model() for querying. Skip it. 4075 */ 4076 if (!strcmp(prop->name, "hotplugged")) { 4077 continue; 4078 } 4079 x86_cpu_expand_prop(cpu, props, prop->name); 4080 } 4081 } 4082 4083 static void object_apply_props(Object *obj, QDict *props, Error **errp) 4084 { 4085 const QDictEntry *prop; 4086 Error *err = NULL; 4087 4088 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 4089 object_property_set_qobject(obj, qdict_entry_value(prop), 4090 qdict_entry_key(prop), &err); 4091 if (err) { 4092 break; 4093 } 4094 } 4095 4096 error_propagate(errp, err); 4097 } 4098 4099 /* Create X86CPU object according to model+props specification */ 4100 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 4101 { 4102 X86CPU *xc = NULL; 4103 X86CPUClass *xcc; 4104 Error *err = NULL; 4105 4106 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 4107 if (xcc == NULL) { 4108 error_setg(&err, "CPU model '%s' not found", model); 4109 goto out; 4110 } 4111 4112 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 4113 if (props) { 4114 object_apply_props(OBJECT(xc), props, &err); 4115 if (err) { 4116 goto out; 4117 } 4118 } 4119 4120 x86_cpu_expand_features(xc, &err); 4121 if (err) { 4122 goto out; 4123 } 4124 4125 out: 4126 if (err) { 4127 error_propagate(errp, err); 4128 object_unref(OBJECT(xc)); 4129 xc = NULL; 4130 } 4131 return xc; 4132 } 4133 4134 CpuModelExpansionInfo * 4135 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 4136 CpuModelInfo *model, 4137 Error **errp) 4138 { 4139 X86CPU *xc = NULL; 4140 Error *err = NULL; 4141 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 4142 QDict *props = NULL; 4143 const char *base_name; 4144 4145 xc = x86_cpu_from_model(model->name, 4146 model->has_props ? 4147 qobject_to(QDict, model->props) : 4148 NULL, &err); 4149 if (err) { 4150 goto out; 4151 } 4152 4153 props = qdict_new(); 4154 ret->model = g_new0(CpuModelInfo, 1); 4155 ret->model->props = QOBJECT(props); 4156 ret->model->has_props = true; 4157 4158 switch (type) { 4159 case CPU_MODEL_EXPANSION_TYPE_STATIC: 4160 /* Static expansion will be based on "base" only */ 4161 base_name = "base"; 4162 x86_cpu_to_dict(xc, props); 4163 break; 4164 case CPU_MODEL_EXPANSION_TYPE_FULL: 4165 /* As we don't return every single property, full expansion needs 4166 * to keep the original model name+props, and add extra 4167 * properties on top of that. 4168 */ 4169 base_name = model->name; 4170 x86_cpu_to_dict_full(xc, props); 4171 break; 4172 default: 4173 error_setg(&err, "Unsupported expansion type"); 4174 goto out; 4175 } 4176 4177 x86_cpu_to_dict(xc, props); 4178 4179 ret->model->name = g_strdup(base_name); 4180 4181 out: 4182 object_unref(OBJECT(xc)); 4183 if (err) { 4184 error_propagate(errp, err); 4185 qapi_free_CpuModelExpansionInfo(ret); 4186 ret = NULL; 4187 } 4188 return ret; 4189 } 4190 #endif /* !CONFIG_USER_ONLY */ 4191 4192 static gchar *x86_gdb_arch_name(CPUState *cs) 4193 { 4194 #ifdef TARGET_X86_64 4195 return g_strdup("i386:x86-64"); 4196 #else 4197 return g_strdup("i386"); 4198 #endif 4199 } 4200 4201 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 4202 { 4203 X86CPUDefinition *cpudef = data; 4204 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4205 4206 xcc->cpu_def = cpudef; 4207 xcc->migration_safe = true; 4208 } 4209 4210 static void x86_register_cpudef_type(X86CPUDefinition *def) 4211 { 4212 char *typename = x86_cpu_type_name(def->name); 4213 TypeInfo ti = { 4214 .name = typename, 4215 .parent = TYPE_X86_CPU, 4216 .class_init = x86_cpu_cpudef_class_init, 4217 .class_data = def, 4218 }; 4219 4220 /* AMD aliases are handled at runtime based on CPUID vendor, so 4221 * they shouldn't be set on the CPU model table. 4222 */ 4223 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 4224 /* catch mistakes instead of silently truncating model_id when too long */ 4225 assert(def->model_id && strlen(def->model_id) <= 48); 4226 4227 4228 type_register(&ti); 4229 g_free(typename); 4230 } 4231 4232 #if !defined(CONFIG_USER_ONLY) 4233 4234 void cpu_clear_apic_feature(CPUX86State *env) 4235 { 4236 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 4237 } 4238 4239 #endif /* !CONFIG_USER_ONLY */ 4240 4241 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 4242 uint32_t *eax, uint32_t *ebx, 4243 uint32_t *ecx, uint32_t *edx) 4244 { 4245 X86CPU *cpu = env_archcpu(env); 4246 CPUState *cs = env_cpu(env); 4247 uint32_t pkg_offset; 4248 uint32_t limit; 4249 uint32_t signature[3]; 4250 4251 /* Calculate & apply limits for different index ranges */ 4252 if (index >= 0xC0000000) { 4253 limit = env->cpuid_xlevel2; 4254 } else if (index >= 0x80000000) { 4255 limit = env->cpuid_xlevel; 4256 } else if (index >= 0x40000000) { 4257 limit = 0x40000001; 4258 } else { 4259 limit = env->cpuid_level; 4260 } 4261 4262 if (index > limit) { 4263 /* Intel documentation states that invalid EAX input will 4264 * return the same information as EAX=cpuid_level 4265 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 4266 */ 4267 index = env->cpuid_level; 4268 } 4269 4270 switch(index) { 4271 case 0: 4272 *eax = env->cpuid_level; 4273 *ebx = env->cpuid_vendor1; 4274 *edx = env->cpuid_vendor2; 4275 *ecx = env->cpuid_vendor3; 4276 break; 4277 case 1: 4278 *eax = env->cpuid_version; 4279 *ebx = (cpu->apic_id << 24) | 4280 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 4281 *ecx = env->features[FEAT_1_ECX]; 4282 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 4283 *ecx |= CPUID_EXT_OSXSAVE; 4284 } 4285 *edx = env->features[FEAT_1_EDX]; 4286 if (cs->nr_cores * cs->nr_threads > 1) { 4287 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 4288 *edx |= CPUID_HT; 4289 } 4290 break; 4291 case 2: 4292 /* cache info: needed for Pentium Pro compatibility */ 4293 if (cpu->cache_info_passthrough) { 4294 host_cpuid(index, 0, eax, ebx, ecx, edx); 4295 break; 4296 } 4297 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 4298 *ebx = 0; 4299 if (!cpu->enable_l3_cache) { 4300 *ecx = 0; 4301 } else { 4302 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 4303 } 4304 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 4305 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 4306 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 4307 break; 4308 case 4: 4309 /* cache info: needed for Core compatibility */ 4310 if (cpu->cache_info_passthrough) { 4311 host_cpuid(index, count, eax, ebx, ecx, edx); 4312 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 4313 *eax &= ~0xFC000000; 4314 if ((*eax & 31) && cs->nr_cores > 1) { 4315 *eax |= (cs->nr_cores - 1) << 26; 4316 } 4317 } else { 4318 *eax = 0; 4319 switch (count) { 4320 case 0: /* L1 dcache info */ 4321 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 4322 1, cs->nr_cores, 4323 eax, ebx, ecx, edx); 4324 break; 4325 case 1: /* L1 icache info */ 4326 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 4327 1, cs->nr_cores, 4328 eax, ebx, ecx, edx); 4329 break; 4330 case 2: /* L2 cache info */ 4331 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 4332 cs->nr_threads, cs->nr_cores, 4333 eax, ebx, ecx, edx); 4334 break; 4335 case 3: /* L3 cache info */ 4336 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 4337 if (cpu->enable_l3_cache) { 4338 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 4339 (1 << pkg_offset), cs->nr_cores, 4340 eax, ebx, ecx, edx); 4341 break; 4342 } 4343 /* fall through */ 4344 default: /* end of info */ 4345 *eax = *ebx = *ecx = *edx = 0; 4346 break; 4347 } 4348 } 4349 break; 4350 case 5: 4351 /* MONITOR/MWAIT Leaf */ 4352 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 4353 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 4354 *ecx = cpu->mwait.ecx; /* flags */ 4355 *edx = cpu->mwait.edx; /* mwait substates */ 4356 break; 4357 case 6: 4358 /* Thermal and Power Leaf */ 4359 *eax = env->features[FEAT_6_EAX]; 4360 *ebx = 0; 4361 *ecx = 0; 4362 *edx = 0; 4363 break; 4364 case 7: 4365 /* Structured Extended Feature Flags Enumeration Leaf */ 4366 if (count == 0) { 4367 *eax = 0; /* Maximum ECX value for sub-leaves */ 4368 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 4369 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 4370 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 4371 *ecx |= CPUID_7_0_ECX_OSPKE; 4372 } 4373 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 4374 } else { 4375 *eax = 0; 4376 *ebx = 0; 4377 *ecx = 0; 4378 *edx = 0; 4379 } 4380 break; 4381 case 9: 4382 /* Direct Cache Access Information Leaf */ 4383 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 4384 *ebx = 0; 4385 *ecx = 0; 4386 *edx = 0; 4387 break; 4388 case 0xA: 4389 /* Architectural Performance Monitoring Leaf */ 4390 if (kvm_enabled() && cpu->enable_pmu) { 4391 KVMState *s = cs->kvm_state; 4392 4393 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 4394 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 4395 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 4396 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 4397 } else if (hvf_enabled() && cpu->enable_pmu) { 4398 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 4399 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 4400 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 4401 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 4402 } else { 4403 *eax = 0; 4404 *ebx = 0; 4405 *ecx = 0; 4406 *edx = 0; 4407 } 4408 break; 4409 case 0xB: 4410 /* Extended Topology Enumeration Leaf */ 4411 if (!cpu->enable_cpuid_0xb) { 4412 *eax = *ebx = *ecx = *edx = 0; 4413 break; 4414 } 4415 4416 *ecx = count & 0xff; 4417 *edx = cpu->apic_id; 4418 4419 switch (count) { 4420 case 0: 4421 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads); 4422 *ebx = cs->nr_threads; 4423 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 4424 break; 4425 case 1: 4426 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 4427 *ebx = cs->nr_cores * cs->nr_threads; 4428 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 4429 break; 4430 default: 4431 *eax = 0; 4432 *ebx = 0; 4433 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 4434 } 4435 4436 assert(!(*eax & ~0x1f)); 4437 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 4438 break; 4439 case 0xD: { 4440 /* Processor Extended State */ 4441 *eax = 0; 4442 *ebx = 0; 4443 *ecx = 0; 4444 *edx = 0; 4445 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 4446 break; 4447 } 4448 4449 if (count == 0) { 4450 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 4451 *eax = env->features[FEAT_XSAVE_COMP_LO]; 4452 *edx = env->features[FEAT_XSAVE_COMP_HI]; 4453 *ebx = xsave_area_size(env->xcr0); 4454 } else if (count == 1) { 4455 *eax = env->features[FEAT_XSAVE]; 4456 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 4457 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 4458 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 4459 *eax = esa->size; 4460 *ebx = esa->offset; 4461 } 4462 } 4463 break; 4464 } 4465 case 0x14: { 4466 /* Intel Processor Trace Enumeration */ 4467 *eax = 0; 4468 *ebx = 0; 4469 *ecx = 0; 4470 *edx = 0; 4471 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 4472 !kvm_enabled()) { 4473 break; 4474 } 4475 4476 if (count == 0) { 4477 *eax = INTEL_PT_MAX_SUBLEAF; 4478 *ebx = INTEL_PT_MINIMAL_EBX; 4479 *ecx = INTEL_PT_MINIMAL_ECX; 4480 } else if (count == 1) { 4481 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 4482 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 4483 } 4484 break; 4485 } 4486 case 0x40000000: 4487 /* 4488 * CPUID code in kvm_arch_init_vcpu() ignores stuff 4489 * set here, but we restrict to TCG none the less. 4490 */ 4491 if (tcg_enabled() && cpu->expose_tcg) { 4492 memcpy(signature, "TCGTCGTCGTCG", 12); 4493 *eax = 0x40000001; 4494 *ebx = signature[0]; 4495 *ecx = signature[1]; 4496 *edx = signature[2]; 4497 } else { 4498 *eax = 0; 4499 *ebx = 0; 4500 *ecx = 0; 4501 *edx = 0; 4502 } 4503 break; 4504 case 0x40000001: 4505 *eax = 0; 4506 *ebx = 0; 4507 *ecx = 0; 4508 *edx = 0; 4509 break; 4510 case 0x80000000: 4511 *eax = env->cpuid_xlevel; 4512 *ebx = env->cpuid_vendor1; 4513 *edx = env->cpuid_vendor2; 4514 *ecx = env->cpuid_vendor3; 4515 break; 4516 case 0x80000001: 4517 *eax = env->cpuid_version; 4518 *ebx = 0; 4519 *ecx = env->features[FEAT_8000_0001_ECX]; 4520 *edx = env->features[FEAT_8000_0001_EDX]; 4521 4522 /* The Linux kernel checks for the CMPLegacy bit and 4523 * discards multiple thread information if it is set. 4524 * So don't set it here for Intel to make Linux guests happy. 4525 */ 4526 if (cs->nr_cores * cs->nr_threads > 1) { 4527 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 4528 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 4529 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 4530 *ecx |= 1 << 1; /* CmpLegacy bit */ 4531 } 4532 } 4533 break; 4534 case 0x80000002: 4535 case 0x80000003: 4536 case 0x80000004: 4537 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 4538 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 4539 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 4540 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 4541 break; 4542 case 0x80000005: 4543 /* cache info (L1 cache) */ 4544 if (cpu->cache_info_passthrough) { 4545 host_cpuid(index, 0, eax, ebx, ecx, edx); 4546 break; 4547 } 4548 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ 4549 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 4550 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ 4551 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 4552 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 4553 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 4554 break; 4555 case 0x80000006: 4556 /* cache info (L2 cache) */ 4557 if (cpu->cache_info_passthrough) { 4558 host_cpuid(index, 0, eax, ebx, ecx, edx); 4559 break; 4560 } 4561 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ 4562 (L2_DTLB_2M_ENTRIES << 16) | \ 4563 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ 4564 (L2_ITLB_2M_ENTRIES); 4565 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ 4566 (L2_DTLB_4K_ENTRIES << 16) | \ 4567 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ 4568 (L2_ITLB_4K_ENTRIES); 4569 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 4570 cpu->enable_l3_cache ? 4571 env->cache_info_amd.l3_cache : NULL, 4572 ecx, edx); 4573 break; 4574 case 0x80000007: 4575 *eax = 0; 4576 *ebx = 0; 4577 *ecx = 0; 4578 *edx = env->features[FEAT_8000_0007_EDX]; 4579 break; 4580 case 0x80000008: 4581 /* virtual & phys address size in low 2 bytes. */ 4582 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 4583 /* 64 bit processor */ 4584 *eax = cpu->phys_bits; /* configurable physical bits */ 4585 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 4586 *eax |= 0x00003900; /* 57 bits virtual */ 4587 } else { 4588 *eax |= 0x00003000; /* 48 bits virtual */ 4589 } 4590 } else { 4591 *eax = cpu->phys_bits; 4592 } 4593 *ebx = env->features[FEAT_8000_0008_EBX]; 4594 *ecx = 0; 4595 *edx = 0; 4596 if (cs->nr_cores * cs->nr_threads > 1) { 4597 *ecx |= (cs->nr_cores * cs->nr_threads) - 1; 4598 } 4599 break; 4600 case 0x8000000A: 4601 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 4602 *eax = 0x00000001; /* SVM Revision */ 4603 *ebx = 0x00000010; /* nr of ASIDs */ 4604 *ecx = 0; 4605 *edx = env->features[FEAT_SVM]; /* optional features */ 4606 } else { 4607 *eax = 0; 4608 *ebx = 0; 4609 *ecx = 0; 4610 *edx = 0; 4611 } 4612 break; 4613 case 0x8000001D: 4614 *eax = 0; 4615 if (cpu->cache_info_passthrough) { 4616 host_cpuid(index, count, eax, ebx, ecx, edx); 4617 break; 4618 } 4619 switch (count) { 4620 case 0: /* L1 dcache info */ 4621 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs, 4622 eax, ebx, ecx, edx); 4623 break; 4624 case 1: /* L1 icache info */ 4625 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs, 4626 eax, ebx, ecx, edx); 4627 break; 4628 case 2: /* L2 cache info */ 4629 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs, 4630 eax, ebx, ecx, edx); 4631 break; 4632 case 3: /* L3 cache info */ 4633 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs, 4634 eax, ebx, ecx, edx); 4635 break; 4636 default: /* end of info */ 4637 *eax = *ebx = *ecx = *edx = 0; 4638 break; 4639 } 4640 break; 4641 case 0x8000001E: 4642 assert(cpu->core_id <= 255); 4643 encode_topo_cpuid8000001e(cs, cpu, 4644 eax, ebx, ecx, edx); 4645 break; 4646 case 0xC0000000: 4647 *eax = env->cpuid_xlevel2; 4648 *ebx = 0; 4649 *ecx = 0; 4650 *edx = 0; 4651 break; 4652 case 0xC0000001: 4653 /* Support for VIA CPU's CPUID instruction */ 4654 *eax = env->cpuid_version; 4655 *ebx = 0; 4656 *ecx = 0; 4657 *edx = env->features[FEAT_C000_0001_EDX]; 4658 break; 4659 case 0xC0000002: 4660 case 0xC0000003: 4661 case 0xC0000004: 4662 /* Reserved for the future, and now filled with zero */ 4663 *eax = 0; 4664 *ebx = 0; 4665 *ecx = 0; 4666 *edx = 0; 4667 break; 4668 case 0x8000001F: 4669 *eax = sev_enabled() ? 0x2 : 0; 4670 *ebx = sev_get_cbit_position(); 4671 *ebx |= sev_get_reduced_phys_bits() << 6; 4672 *ecx = 0; 4673 *edx = 0; 4674 break; 4675 default: 4676 /* reserved values: zero */ 4677 *eax = 0; 4678 *ebx = 0; 4679 *ecx = 0; 4680 *edx = 0; 4681 break; 4682 } 4683 } 4684 4685 /* CPUClass::reset() */ 4686 static void x86_cpu_reset(CPUState *s) 4687 { 4688 X86CPU *cpu = X86_CPU(s); 4689 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 4690 CPUX86State *env = &cpu->env; 4691 target_ulong cr4; 4692 uint64_t xcr0; 4693 int i; 4694 4695 xcc->parent_reset(s); 4696 4697 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 4698 4699 env->old_exception = -1; 4700 4701 /* init to reset state */ 4702 4703 env->hflags2 |= HF2_GIF_MASK; 4704 4705 cpu_x86_update_cr0(env, 0x60000010); 4706 env->a20_mask = ~0x0; 4707 env->smbase = 0x30000; 4708 env->msr_smi_count = 0; 4709 4710 env->idt.limit = 0xffff; 4711 env->gdt.limit = 0xffff; 4712 env->ldt.limit = 0xffff; 4713 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 4714 env->tr.limit = 0xffff; 4715 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 4716 4717 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 4718 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 4719 DESC_R_MASK | DESC_A_MASK); 4720 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 4721 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4722 DESC_A_MASK); 4723 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 4724 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4725 DESC_A_MASK); 4726 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 4727 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4728 DESC_A_MASK); 4729 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 4730 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4731 DESC_A_MASK); 4732 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 4733 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4734 DESC_A_MASK); 4735 4736 env->eip = 0xfff0; 4737 env->regs[R_EDX] = env->cpuid_version; 4738 4739 env->eflags = 0x2; 4740 4741 /* FPU init */ 4742 for (i = 0; i < 8; i++) { 4743 env->fptags[i] = 1; 4744 } 4745 cpu_set_fpuc(env, 0x37f); 4746 4747 env->mxcsr = 0x1f80; 4748 /* All units are in INIT state. */ 4749 env->xstate_bv = 0; 4750 4751 env->pat = 0x0007040600070406ULL; 4752 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 4753 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 4754 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 4755 } 4756 4757 memset(env->dr, 0, sizeof(env->dr)); 4758 env->dr[6] = DR6_FIXED_1; 4759 env->dr[7] = DR7_FIXED_1; 4760 cpu_breakpoint_remove_all(s, BP_CPU); 4761 cpu_watchpoint_remove_all(s, BP_CPU); 4762 4763 cr4 = 0; 4764 xcr0 = XSTATE_FP_MASK; 4765 4766 #ifdef CONFIG_USER_ONLY 4767 /* Enable all the features for user-mode. */ 4768 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 4769 xcr0 |= XSTATE_SSE_MASK; 4770 } 4771 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 4772 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 4773 if (env->features[esa->feature] & esa->bits) { 4774 xcr0 |= 1ull << i; 4775 } 4776 } 4777 4778 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 4779 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 4780 } 4781 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 4782 cr4 |= CR4_FSGSBASE_MASK; 4783 } 4784 #endif 4785 4786 env->xcr0 = xcr0; 4787 cpu_x86_update_cr4(env, cr4); 4788 4789 /* 4790 * SDM 11.11.5 requires: 4791 * - IA32_MTRR_DEF_TYPE MSR.E = 0 4792 * - IA32_MTRR_PHYSMASKn.V = 0 4793 * All other bits are undefined. For simplification, zero it all. 4794 */ 4795 env->mtrr_deftype = 0; 4796 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 4797 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 4798 4799 env->interrupt_injected = -1; 4800 env->exception_injected = -1; 4801 env->nmi_injected = false; 4802 #if !defined(CONFIG_USER_ONLY) 4803 /* We hard-wire the BSP to the first CPU. */ 4804 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 4805 4806 s->halted = !cpu_is_bsp(cpu); 4807 4808 if (kvm_enabled()) { 4809 kvm_arch_reset_vcpu(cpu); 4810 } 4811 else if (hvf_enabled()) { 4812 hvf_reset_vcpu(s); 4813 } 4814 #endif 4815 } 4816 4817 #ifndef CONFIG_USER_ONLY 4818 bool cpu_is_bsp(X86CPU *cpu) 4819 { 4820 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 4821 } 4822 4823 /* TODO: remove me, when reset over QOM tree is implemented */ 4824 static void x86_cpu_machine_reset_cb(void *opaque) 4825 { 4826 X86CPU *cpu = opaque; 4827 cpu_reset(CPU(cpu)); 4828 } 4829 #endif 4830 4831 static void mce_init(X86CPU *cpu) 4832 { 4833 CPUX86State *cenv = &cpu->env; 4834 unsigned int bank; 4835 4836 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 4837 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 4838 (CPUID_MCE | CPUID_MCA)) { 4839 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 4840 (cpu->enable_lmce ? MCG_LMCE_P : 0); 4841 cenv->mcg_ctl = ~(uint64_t)0; 4842 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 4843 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 4844 } 4845 } 4846 } 4847 4848 #ifndef CONFIG_USER_ONLY 4849 APICCommonClass *apic_get_class(void) 4850 { 4851 const char *apic_type = "apic"; 4852 4853 /* TODO: in-kernel irqchip for hvf */ 4854 if (kvm_apic_in_kernel()) { 4855 apic_type = "kvm-apic"; 4856 } else if (xen_enabled()) { 4857 apic_type = "xen-apic"; 4858 } 4859 4860 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 4861 } 4862 4863 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 4864 { 4865 APICCommonState *apic; 4866 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 4867 4868 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class))); 4869 4870 object_property_add_child(OBJECT(cpu), "lapic", 4871 OBJECT(cpu->apic_state), &error_abort); 4872 object_unref(OBJECT(cpu->apic_state)); 4873 4874 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 4875 /* TODO: convert to link<> */ 4876 apic = APIC_COMMON(cpu->apic_state); 4877 apic->cpu = cpu; 4878 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 4879 } 4880 4881 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4882 { 4883 APICCommonState *apic; 4884 static bool apic_mmio_map_once; 4885 4886 if (cpu->apic_state == NULL) { 4887 return; 4888 } 4889 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized", 4890 errp); 4891 4892 /* Map APIC MMIO area */ 4893 apic = APIC_COMMON(cpu->apic_state); 4894 if (!apic_mmio_map_once) { 4895 memory_region_add_subregion_overlap(get_system_memory(), 4896 apic->apicbase & 4897 MSR_IA32_APICBASE_BASE, 4898 &apic->io_memory, 4899 0x1000); 4900 apic_mmio_map_once = true; 4901 } 4902 } 4903 4904 static void x86_cpu_machine_done(Notifier *n, void *unused) 4905 { 4906 X86CPU *cpu = container_of(n, X86CPU, machine_done); 4907 MemoryRegion *smram = 4908 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 4909 4910 if (smram) { 4911 cpu->smram = g_new(MemoryRegion, 1); 4912 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 4913 smram, 0, 1ull << 32); 4914 memory_region_set_enabled(cpu->smram, true); 4915 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 4916 } 4917 } 4918 #else 4919 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4920 { 4921 } 4922 #endif 4923 4924 /* Note: Only safe for use on x86(-64) hosts */ 4925 static uint32_t x86_host_phys_bits(void) 4926 { 4927 uint32_t eax; 4928 uint32_t host_phys_bits; 4929 4930 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 4931 if (eax >= 0x80000008) { 4932 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 4933 /* Note: According to AMD doc 25481 rev 2.34 they have a field 4934 * at 23:16 that can specify a maximum physical address bits for 4935 * the guest that can override this value; but I've not seen 4936 * anything with that set. 4937 */ 4938 host_phys_bits = eax & 0xff; 4939 } else { 4940 /* It's an odd 64 bit machine that doesn't have the leaf for 4941 * physical address bits; fall back to 36 that's most older 4942 * Intel. 4943 */ 4944 host_phys_bits = 36; 4945 } 4946 4947 return host_phys_bits; 4948 } 4949 4950 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 4951 { 4952 if (*min < value) { 4953 *min = value; 4954 } 4955 } 4956 4957 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 4958 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 4959 { 4960 CPUX86State *env = &cpu->env; 4961 FeatureWordInfo *fi = &feature_word_info[w]; 4962 uint32_t eax = fi->cpuid.eax; 4963 uint32_t region = eax & 0xF0000000; 4964 4965 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 4966 if (!env->features[w]) { 4967 return; 4968 } 4969 4970 switch (region) { 4971 case 0x00000000: 4972 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 4973 break; 4974 case 0x80000000: 4975 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 4976 break; 4977 case 0xC0000000: 4978 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 4979 break; 4980 } 4981 } 4982 4983 /* Calculate XSAVE components based on the configured CPU feature flags */ 4984 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 4985 { 4986 CPUX86State *env = &cpu->env; 4987 int i; 4988 uint64_t mask; 4989 4990 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 4991 return; 4992 } 4993 4994 mask = 0; 4995 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 4996 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 4997 if (env->features[esa->feature] & esa->bits) { 4998 mask |= (1ULL << i); 4999 } 5000 } 5001 5002 env->features[FEAT_XSAVE_COMP_LO] = mask; 5003 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 5004 } 5005 5006 /***** Steps involved on loading and filtering CPUID data 5007 * 5008 * When initializing and realizing a CPU object, the steps 5009 * involved in setting up CPUID data are: 5010 * 5011 * 1) Loading CPU model definition (X86CPUDefinition). This is 5012 * implemented by x86_cpu_load_def() and should be completely 5013 * transparent, as it is done automatically by instance_init. 5014 * No code should need to look at X86CPUDefinition structs 5015 * outside instance_init. 5016 * 5017 * 2) CPU expansion. This is done by realize before CPUID 5018 * filtering, and will make sure host/accelerator data is 5019 * loaded for CPU models that depend on host capabilities 5020 * (e.g. "host"). Done by x86_cpu_expand_features(). 5021 * 5022 * 3) CPUID filtering. This initializes extra data related to 5023 * CPUID, and checks if the host supports all capabilities 5024 * required by the CPU. Runnability of a CPU model is 5025 * determined at this step. Done by x86_cpu_filter_features(). 5026 * 5027 * Some operations don't require all steps to be performed. 5028 * More precisely: 5029 * 5030 * - CPU instance creation (instance_init) will run only CPU 5031 * model loading. CPU expansion can't run at instance_init-time 5032 * because host/accelerator data may be not available yet. 5033 * - CPU realization will perform both CPU model expansion and CPUID 5034 * filtering, and return an error in case one of them fails. 5035 * - query-cpu-definitions needs to run all 3 steps. It needs 5036 * to run CPUID filtering, as the 'unavailable-features' 5037 * field is set based on the filtering results. 5038 * - The query-cpu-model-expansion QMP command only needs to run 5039 * CPU model loading and CPU expansion. It should not filter 5040 * any CPUID data based on host capabilities. 5041 */ 5042 5043 /* Expand CPU configuration data, based on configured features 5044 * and host/accelerator capabilities when appropriate. 5045 */ 5046 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 5047 { 5048 CPUX86State *env = &cpu->env; 5049 FeatureWord w; 5050 GList *l; 5051 Error *local_err = NULL; 5052 5053 /*TODO: Now cpu->max_features doesn't overwrite features 5054 * set using QOM properties, and we can convert 5055 * plus_features & minus_features to global properties 5056 * inside x86_cpu_parse_featurestr() too. 5057 */ 5058 if (cpu->max_features) { 5059 for (w = 0; w < FEATURE_WORDS; w++) { 5060 /* Override only features that weren't set explicitly 5061 * by the user. 5062 */ 5063 env->features[w] |= 5064 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 5065 ~env->user_features[w] & \ 5066 ~feature_word_info[w].no_autoenable_flags; 5067 } 5068 } 5069 5070 for (l = plus_features; l; l = l->next) { 5071 const char *prop = l->data; 5072 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 5073 if (local_err) { 5074 goto out; 5075 } 5076 } 5077 5078 for (l = minus_features; l; l = l->next) { 5079 const char *prop = l->data; 5080 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 5081 if (local_err) { 5082 goto out; 5083 } 5084 } 5085 5086 if (!kvm_enabled() || !cpu->expose_kvm) { 5087 env->features[FEAT_KVM] = 0; 5088 } 5089 5090 x86_cpu_enable_xsave_components(cpu); 5091 5092 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 5093 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 5094 if (cpu->full_cpuid_auto_level) { 5095 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 5096 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 5097 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 5098 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 5099 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 5100 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 5101 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 5102 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 5103 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 5104 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 5105 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 5106 5107 /* Intel Processor Trace requires CPUID[0x14] */ 5108 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 5109 kvm_enabled() && cpu->intel_pt_auto_level) { 5110 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 5111 } 5112 5113 /* SVM requires CPUID[0x8000000A] */ 5114 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5115 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 5116 } 5117 5118 /* SEV requires CPUID[0x8000001F] */ 5119 if (sev_enabled()) { 5120 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 5121 } 5122 } 5123 5124 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 5125 if (env->cpuid_level == UINT32_MAX) { 5126 env->cpuid_level = env->cpuid_min_level; 5127 } 5128 if (env->cpuid_xlevel == UINT32_MAX) { 5129 env->cpuid_xlevel = env->cpuid_min_xlevel; 5130 } 5131 if (env->cpuid_xlevel2 == UINT32_MAX) { 5132 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 5133 } 5134 5135 out: 5136 if (local_err != NULL) { 5137 error_propagate(errp, local_err); 5138 } 5139 } 5140 5141 /* 5142 * Finishes initialization of CPUID data, filters CPU feature 5143 * words based on host availability of each feature. 5144 * 5145 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 5146 */ 5147 static int x86_cpu_filter_features(X86CPU *cpu) 5148 { 5149 CPUX86State *env = &cpu->env; 5150 FeatureWord w; 5151 int rv = 0; 5152 5153 for (w = 0; w < FEATURE_WORDS; w++) { 5154 uint32_t host_feat = 5155 x86_cpu_get_supported_feature_word(w, false); 5156 uint32_t requested_features = env->features[w]; 5157 env->features[w] &= host_feat; 5158 cpu->filtered_features[w] = requested_features & ~env->features[w]; 5159 if (cpu->filtered_features[w]) { 5160 rv = 1; 5161 } 5162 } 5163 5164 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 5165 kvm_enabled()) { 5166 KVMState *s = CPU(cpu)->kvm_state; 5167 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 5168 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 5169 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 5170 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 5171 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 5172 5173 if (!eax_0 || 5174 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 5175 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 5176 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 5177 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 5178 INTEL_PT_ADDR_RANGES_NUM) || 5179 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 5180 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 5181 (ecx_0 & INTEL_PT_IP_LIP)) { 5182 /* 5183 * Processor Trace capabilities aren't configurable, so if the 5184 * host can't emulate the capabilities we report on 5185 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 5186 */ 5187 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT; 5188 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT; 5189 rv = 1; 5190 } 5191 } 5192 5193 return rv; 5194 } 5195 5196 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ 5197 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ 5198 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) 5199 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ 5200 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ 5201 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) 5202 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 5203 { 5204 CPUState *cs = CPU(dev); 5205 X86CPU *cpu = X86_CPU(dev); 5206 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 5207 CPUX86State *env = &cpu->env; 5208 Error *local_err = NULL; 5209 static bool ht_warned; 5210 5211 if (xcc->host_cpuid_required) { 5212 if (!accel_uses_host_cpuid()) { 5213 char *name = x86_cpu_class_get_model_name(xcc); 5214 error_setg(&local_err, "CPU model '%s' requires KVM", name); 5215 g_free(name); 5216 goto out; 5217 } 5218 5219 if (enable_cpu_pm) { 5220 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 5221 &cpu->mwait.ecx, &cpu->mwait.edx); 5222 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 5223 } 5224 } 5225 5226 /* mwait extended info: needed for Core compatibility */ 5227 /* We always wake on interrupt even if host does not have the capability */ 5228 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 5229 5230 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 5231 error_setg(errp, "apic-id property was not initialized properly"); 5232 return; 5233 } 5234 5235 x86_cpu_expand_features(cpu, &local_err); 5236 if (local_err) { 5237 goto out; 5238 } 5239 5240 if (x86_cpu_filter_features(cpu) && 5241 (cpu->check_cpuid || cpu->enforce_cpuid)) { 5242 x86_cpu_report_filtered_features(cpu); 5243 if (cpu->enforce_cpuid) { 5244 error_setg(&local_err, 5245 accel_uses_host_cpuid() ? 5246 "Host doesn't support requested features" : 5247 "TCG doesn't support requested features"); 5248 goto out; 5249 } 5250 } 5251 5252 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 5253 * CPUID[1].EDX. 5254 */ 5255 if (IS_AMD_CPU(env)) { 5256 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 5257 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 5258 & CPUID_EXT2_AMD_ALIASES); 5259 } 5260 5261 /* For 64bit systems think about the number of physical bits to present. 5262 * ideally this should be the same as the host; anything other than matching 5263 * the host can cause incorrect guest behaviour. 5264 * QEMU used to pick the magic value of 40 bits that corresponds to 5265 * consumer AMD devices but nothing else. 5266 */ 5267 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5268 if (accel_uses_host_cpuid()) { 5269 uint32_t host_phys_bits = x86_host_phys_bits(); 5270 static bool warned; 5271 5272 if (cpu->host_phys_bits) { 5273 /* The user asked for us to use the host physical bits */ 5274 cpu->phys_bits = host_phys_bits; 5275 if (cpu->host_phys_bits_limit && 5276 cpu->phys_bits > cpu->host_phys_bits_limit) { 5277 cpu->phys_bits = cpu->host_phys_bits_limit; 5278 } 5279 } 5280 5281 /* Print a warning if the user set it to a value that's not the 5282 * host value. 5283 */ 5284 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 5285 !warned) { 5286 warn_report("Host physical bits (%u)" 5287 " does not match phys-bits property (%u)", 5288 host_phys_bits, cpu->phys_bits); 5289 warned = true; 5290 } 5291 5292 if (cpu->phys_bits && 5293 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 5294 cpu->phys_bits < 32)) { 5295 error_setg(errp, "phys-bits should be between 32 and %u " 5296 " (but is %u)", 5297 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 5298 return; 5299 } 5300 } else { 5301 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 5302 error_setg(errp, "TCG only supports phys-bits=%u", 5303 TCG_PHYS_ADDR_BITS); 5304 return; 5305 } 5306 } 5307 /* 0 means it was not explicitly set by the user (or by machine 5308 * compat_props or by the host code above). In this case, the default 5309 * is the value used by TCG (40). 5310 */ 5311 if (cpu->phys_bits == 0) { 5312 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 5313 } 5314 } else { 5315 /* For 32 bit systems don't use the user set value, but keep 5316 * phys_bits consistent with what we tell the guest. 5317 */ 5318 if (cpu->phys_bits != 0) { 5319 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 5320 return; 5321 } 5322 5323 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 5324 cpu->phys_bits = 36; 5325 } else { 5326 cpu->phys_bits = 32; 5327 } 5328 } 5329 5330 /* Cache information initialization */ 5331 if (!cpu->legacy_cache) { 5332 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) { 5333 char *name = x86_cpu_class_get_model_name(xcc); 5334 error_setg(errp, 5335 "CPU model '%s' doesn't support legacy-cache=off", name); 5336 g_free(name); 5337 return; 5338 } 5339 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 5340 *xcc->cpu_def->cache_info; 5341 } else { 5342 /* Build legacy cache information */ 5343 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 5344 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 5345 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 5346 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 5347 5348 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 5349 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 5350 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 5351 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 5352 5353 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 5354 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 5355 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 5356 env->cache_info_amd.l3_cache = &legacy_l3_cache; 5357 } 5358 5359 5360 cpu_exec_realizefn(cs, &local_err); 5361 if (local_err != NULL) { 5362 error_propagate(errp, local_err); 5363 return; 5364 } 5365 5366 #ifndef CONFIG_USER_ONLY 5367 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 5368 5369 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) { 5370 x86_cpu_apic_create(cpu, &local_err); 5371 if (local_err != NULL) { 5372 goto out; 5373 } 5374 } 5375 #endif 5376 5377 mce_init(cpu); 5378 5379 #ifndef CONFIG_USER_ONLY 5380 if (tcg_enabled()) { 5381 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 5382 cpu->cpu_as_root = g_new(MemoryRegion, 1); 5383 5384 /* Outer container... */ 5385 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 5386 memory_region_set_enabled(cpu->cpu_as_root, true); 5387 5388 /* ... with two regions inside: normal system memory with low 5389 * priority, and... 5390 */ 5391 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 5392 get_system_memory(), 0, ~0ull); 5393 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 5394 memory_region_set_enabled(cpu->cpu_as_mem, true); 5395 5396 cs->num_ases = 2; 5397 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 5398 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 5399 5400 /* ... SMRAM with higher priority, linked from /machine/smram. */ 5401 cpu->machine_done.notify = x86_cpu_machine_done; 5402 qemu_add_machine_init_done_notifier(&cpu->machine_done); 5403 } 5404 #endif 5405 5406 qemu_init_vcpu(cs); 5407 5408 /* 5409 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 5410 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 5411 * based on inputs (sockets,cores,threads), it is still better to give 5412 * users a warning. 5413 * 5414 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 5415 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 5416 */ 5417 if (IS_AMD_CPU(env) && 5418 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 5419 cs->nr_threads > 1 && !ht_warned) { 5420 warn_report("This family of AMD CPU doesn't support " 5421 "hyperthreading(%d)", 5422 cs->nr_threads); 5423 error_printf("Please configure -smp options properly" 5424 " or try enabling topoext feature.\n"); 5425 ht_warned = true; 5426 } 5427 5428 x86_cpu_apic_realize(cpu, &local_err); 5429 if (local_err != NULL) { 5430 goto out; 5431 } 5432 cpu_reset(cs); 5433 5434 xcc->parent_realize(dev, &local_err); 5435 5436 out: 5437 if (local_err != NULL) { 5438 error_propagate(errp, local_err); 5439 return; 5440 } 5441 } 5442 5443 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) 5444 { 5445 X86CPU *cpu = X86_CPU(dev); 5446 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 5447 Error *local_err = NULL; 5448 5449 #ifndef CONFIG_USER_ONLY 5450 cpu_remove_sync(CPU(dev)); 5451 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 5452 #endif 5453 5454 if (cpu->apic_state) { 5455 object_unparent(OBJECT(cpu->apic_state)); 5456 cpu->apic_state = NULL; 5457 } 5458 5459 xcc->parent_unrealize(dev, &local_err); 5460 if (local_err != NULL) { 5461 error_propagate(errp, local_err); 5462 return; 5463 } 5464 } 5465 5466 typedef struct BitProperty { 5467 FeatureWord w; 5468 uint32_t mask; 5469 } BitProperty; 5470 5471 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 5472 void *opaque, Error **errp) 5473 { 5474 X86CPU *cpu = X86_CPU(obj); 5475 BitProperty *fp = opaque; 5476 uint32_t f = cpu->env.features[fp->w]; 5477 bool value = (f & fp->mask) == fp->mask; 5478 visit_type_bool(v, name, &value, errp); 5479 } 5480 5481 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 5482 void *opaque, Error **errp) 5483 { 5484 DeviceState *dev = DEVICE(obj); 5485 X86CPU *cpu = X86_CPU(obj); 5486 BitProperty *fp = opaque; 5487 Error *local_err = NULL; 5488 bool value; 5489 5490 if (dev->realized) { 5491 qdev_prop_set_after_realize(dev, name, errp); 5492 return; 5493 } 5494 5495 visit_type_bool(v, name, &value, &local_err); 5496 if (local_err) { 5497 error_propagate(errp, local_err); 5498 return; 5499 } 5500 5501 if (value) { 5502 cpu->env.features[fp->w] |= fp->mask; 5503 } else { 5504 cpu->env.features[fp->w] &= ~fp->mask; 5505 } 5506 cpu->env.user_features[fp->w] |= fp->mask; 5507 } 5508 5509 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 5510 void *opaque) 5511 { 5512 BitProperty *prop = opaque; 5513 g_free(prop); 5514 } 5515 5516 /* Register a boolean property to get/set a single bit in a uint32_t field. 5517 * 5518 * The same property name can be registered multiple times to make it affect 5519 * multiple bits in the same FeatureWord. In that case, the getter will return 5520 * true only if all bits are set. 5521 */ 5522 static void x86_cpu_register_bit_prop(X86CPU *cpu, 5523 const char *prop_name, 5524 FeatureWord w, 5525 int bitnr) 5526 { 5527 BitProperty *fp; 5528 ObjectProperty *op; 5529 uint32_t mask = (1UL << bitnr); 5530 5531 op = object_property_find(OBJECT(cpu), prop_name, NULL); 5532 if (op) { 5533 fp = op->opaque; 5534 assert(fp->w == w); 5535 fp->mask |= mask; 5536 } else { 5537 fp = g_new0(BitProperty, 1); 5538 fp->w = w; 5539 fp->mask = mask; 5540 object_property_add(OBJECT(cpu), prop_name, "bool", 5541 x86_cpu_get_bit_prop, 5542 x86_cpu_set_bit_prop, 5543 x86_cpu_release_bit_prop, fp, &error_abort); 5544 } 5545 } 5546 5547 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 5548 FeatureWord w, 5549 int bitnr) 5550 { 5551 FeatureWordInfo *fi = &feature_word_info[w]; 5552 const char *name = fi->feat_names[bitnr]; 5553 5554 if (!name) { 5555 return; 5556 } 5557 5558 /* Property names should use "-" instead of "_". 5559 * Old names containing underscores are registered as aliases 5560 * using object_property_add_alias() 5561 */ 5562 assert(!strchr(name, '_')); 5563 /* aliases don't use "|" delimiters anymore, they are registered 5564 * manually using object_property_add_alias() */ 5565 assert(!strchr(name, '|')); 5566 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 5567 } 5568 5569 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 5570 { 5571 X86CPU *cpu = X86_CPU(cs); 5572 CPUX86State *env = &cpu->env; 5573 GuestPanicInformation *panic_info = NULL; 5574 5575 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 5576 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 5577 5578 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 5579 5580 assert(HV_CRASH_PARAMS >= 5); 5581 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 5582 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 5583 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 5584 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 5585 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 5586 } 5587 5588 return panic_info; 5589 } 5590 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 5591 const char *name, void *opaque, 5592 Error **errp) 5593 { 5594 CPUState *cs = CPU(obj); 5595 GuestPanicInformation *panic_info; 5596 5597 if (!cs->crash_occurred) { 5598 error_setg(errp, "No crash occured"); 5599 return; 5600 } 5601 5602 panic_info = x86_cpu_get_crash_info(cs); 5603 if (panic_info == NULL) { 5604 error_setg(errp, "No crash information"); 5605 return; 5606 } 5607 5608 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 5609 errp); 5610 qapi_free_GuestPanicInformation(panic_info); 5611 } 5612 5613 static void x86_cpu_initfn(Object *obj) 5614 { 5615 X86CPU *cpu = X86_CPU(obj); 5616 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 5617 CPUX86State *env = &cpu->env; 5618 FeatureWord w; 5619 5620 cpu_set_cpustate_pointers(cpu); 5621 5622 object_property_add(obj, "family", "int", 5623 x86_cpuid_version_get_family, 5624 x86_cpuid_version_set_family, NULL, NULL, NULL); 5625 object_property_add(obj, "model", "int", 5626 x86_cpuid_version_get_model, 5627 x86_cpuid_version_set_model, NULL, NULL, NULL); 5628 object_property_add(obj, "stepping", "int", 5629 x86_cpuid_version_get_stepping, 5630 x86_cpuid_version_set_stepping, NULL, NULL, NULL); 5631 object_property_add_str(obj, "vendor", 5632 x86_cpuid_get_vendor, 5633 x86_cpuid_set_vendor, NULL); 5634 object_property_add_str(obj, "model-id", 5635 x86_cpuid_get_model_id, 5636 x86_cpuid_set_model_id, NULL); 5637 object_property_add(obj, "tsc-frequency", "int", 5638 x86_cpuid_get_tsc_freq, 5639 x86_cpuid_set_tsc_freq, NULL, NULL, NULL); 5640 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 5641 x86_cpu_get_feature_words, 5642 NULL, NULL, (void *)env->features, NULL); 5643 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 5644 x86_cpu_get_feature_words, 5645 NULL, NULL, (void *)cpu->filtered_features, NULL); 5646 /* 5647 * The "unavailable-features" property has the same semantics as 5648 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 5649 * QMP command: they list the features that would have prevented the 5650 * CPU from running if the "enforce" flag was set. 5651 */ 5652 object_property_add(obj, "unavailable-features", "strList", 5653 x86_cpu_get_unavailable_features, 5654 NULL, NULL, NULL, &error_abort); 5655 5656 object_property_add(obj, "crash-information", "GuestPanicInformation", 5657 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); 5658 5659 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY; 5660 5661 for (w = 0; w < FEATURE_WORDS; w++) { 5662 int bitnr; 5663 5664 for (bitnr = 0; bitnr < 32; bitnr++) { 5665 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 5666 } 5667 } 5668 5669 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort); 5670 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort); 5671 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort); 5672 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort); 5673 object_property_add_alias(obj, "xd", obj, "nx", &error_abort); 5674 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort); 5675 object_property_add_alias(obj, "i64", obj, "lm", &error_abort); 5676 5677 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort); 5678 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort); 5679 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort); 5680 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort); 5681 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort); 5682 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort); 5683 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort); 5684 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort); 5685 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort); 5686 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort); 5687 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort); 5688 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort); 5689 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort); 5690 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort); 5691 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort); 5692 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort); 5693 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort); 5694 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort); 5695 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort); 5696 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort); 5697 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort); 5698 5699 if (xcc->cpu_def) { 5700 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort); 5701 } 5702 } 5703 5704 static int64_t x86_cpu_get_arch_id(CPUState *cs) 5705 { 5706 X86CPU *cpu = X86_CPU(cs); 5707 5708 return cpu->apic_id; 5709 } 5710 5711 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 5712 { 5713 X86CPU *cpu = X86_CPU(cs); 5714 5715 return cpu->env.cr[0] & CR0_PG_MASK; 5716 } 5717 5718 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 5719 { 5720 X86CPU *cpu = X86_CPU(cs); 5721 5722 cpu->env.eip = value; 5723 } 5724 5725 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 5726 { 5727 X86CPU *cpu = X86_CPU(cs); 5728 5729 cpu->env.eip = tb->pc - tb->cs_base; 5730 } 5731 5732 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 5733 { 5734 X86CPU *cpu = X86_CPU(cs); 5735 CPUX86State *env = &cpu->env; 5736 5737 #if !defined(CONFIG_USER_ONLY) 5738 if (interrupt_request & CPU_INTERRUPT_POLL) { 5739 return CPU_INTERRUPT_POLL; 5740 } 5741 #endif 5742 if (interrupt_request & CPU_INTERRUPT_SIPI) { 5743 return CPU_INTERRUPT_SIPI; 5744 } 5745 5746 if (env->hflags2 & HF2_GIF_MASK) { 5747 if ((interrupt_request & CPU_INTERRUPT_SMI) && 5748 !(env->hflags & HF_SMM_MASK)) { 5749 return CPU_INTERRUPT_SMI; 5750 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 5751 !(env->hflags2 & HF2_NMI_MASK)) { 5752 return CPU_INTERRUPT_NMI; 5753 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 5754 return CPU_INTERRUPT_MCE; 5755 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 5756 (((env->hflags2 & HF2_VINTR_MASK) && 5757 (env->hflags2 & HF2_HIF_MASK)) || 5758 (!(env->hflags2 & HF2_VINTR_MASK) && 5759 (env->eflags & IF_MASK && 5760 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 5761 return CPU_INTERRUPT_HARD; 5762 #if !defined(CONFIG_USER_ONLY) 5763 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 5764 (env->eflags & IF_MASK) && 5765 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 5766 return CPU_INTERRUPT_VIRQ; 5767 #endif 5768 } 5769 } 5770 5771 return 0; 5772 } 5773 5774 static bool x86_cpu_has_work(CPUState *cs) 5775 { 5776 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 5777 } 5778 5779 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 5780 { 5781 X86CPU *cpu = X86_CPU(cs); 5782 CPUX86State *env = &cpu->env; 5783 5784 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 5785 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 5786 : bfd_mach_i386_i8086); 5787 info->print_insn = print_insn_i386; 5788 5789 info->cap_arch = CS_ARCH_X86; 5790 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 5791 : env->hflags & HF_CS32_MASK ? CS_MODE_32 5792 : CS_MODE_16); 5793 info->cap_insn_unit = 1; 5794 info->cap_insn_split = 8; 5795 } 5796 5797 void x86_update_hflags(CPUX86State *env) 5798 { 5799 uint32_t hflags; 5800 #define HFLAG_COPY_MASK \ 5801 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 5802 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 5803 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 5804 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 5805 5806 hflags = env->hflags & HFLAG_COPY_MASK; 5807 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 5808 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 5809 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 5810 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 5811 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 5812 5813 if (env->cr[4] & CR4_OSFXSR_MASK) { 5814 hflags |= HF_OSFXSR_MASK; 5815 } 5816 5817 if (env->efer & MSR_EFER_LMA) { 5818 hflags |= HF_LMA_MASK; 5819 } 5820 5821 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 5822 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 5823 } else { 5824 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 5825 (DESC_B_SHIFT - HF_CS32_SHIFT); 5826 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 5827 (DESC_B_SHIFT - HF_SS32_SHIFT); 5828 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 5829 !(hflags & HF_CS32_MASK)) { 5830 hflags |= HF_ADDSEG_MASK; 5831 } else { 5832 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 5833 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 5834 } 5835 } 5836 env->hflags = hflags; 5837 } 5838 5839 static Property x86_cpu_properties[] = { 5840 #ifdef CONFIG_USER_ONLY 5841 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 5842 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 5843 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 5844 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 5845 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 5846 #else 5847 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 5848 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 5849 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 5850 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 5851 #endif 5852 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 5853 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 5854 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks }, 5855 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false), 5856 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false), 5857 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false), 5858 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false), 5859 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false), 5860 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false), 5861 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false), 5862 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false), 5863 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false), 5864 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false), 5865 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false), 5866 DEFINE_PROP_BOOL("hv-tlbflush", X86CPU, hyperv_tlbflush, false), 5867 DEFINE_PROP_BOOL("hv-evmcs", X86CPU, hyperv_evmcs, false), 5868 DEFINE_PROP_BOOL("hv-ipi", X86CPU, hyperv_ipi, false), 5869 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 5870 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 5871 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 5872 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 5873 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 5874 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 5875 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 5876 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 5877 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 5878 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 5879 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 5880 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 5881 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 5882 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 5883 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 5884 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 5885 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 5886 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 5887 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 5888 false), 5889 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 5890 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 5891 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 5892 true), 5893 /* 5894 * lecacy_cache defaults to true unless the CPU model provides its 5895 * own cache information (see x86_cpu_load_def()). 5896 */ 5897 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 5898 5899 /* 5900 * From "Requirements for Implementing the Microsoft 5901 * Hypervisor Interface": 5902 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 5903 * 5904 * "Starting with Windows Server 2012 and Windows 8, if 5905 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 5906 * the hypervisor imposes no specific limit to the number of VPs. 5907 * In this case, Windows Server 2012 guest VMs may use more than 5908 * 64 VPs, up to the maximum supported number of processors applicable 5909 * to the specific Windows version being used." 5910 */ 5911 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 5912 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 5913 false), 5914 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 5915 true), 5916 DEFINE_PROP_END_OF_LIST() 5917 }; 5918 5919 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 5920 { 5921 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5922 CPUClass *cc = CPU_CLASS(oc); 5923 DeviceClass *dc = DEVICE_CLASS(oc); 5924 5925 device_class_set_parent_realize(dc, x86_cpu_realizefn, 5926 &xcc->parent_realize); 5927 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 5928 &xcc->parent_unrealize); 5929 dc->props = x86_cpu_properties; 5930 5931 xcc->parent_reset = cc->reset; 5932 cc->reset = x86_cpu_reset; 5933 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 5934 5935 cc->class_by_name = x86_cpu_class_by_name; 5936 cc->parse_features = x86_cpu_parse_featurestr; 5937 cc->has_work = x86_cpu_has_work; 5938 #ifdef CONFIG_TCG 5939 cc->do_interrupt = x86_cpu_do_interrupt; 5940 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 5941 #endif 5942 cc->dump_state = x86_cpu_dump_state; 5943 cc->get_crash_info = x86_cpu_get_crash_info; 5944 cc->set_pc = x86_cpu_set_pc; 5945 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 5946 cc->gdb_read_register = x86_cpu_gdb_read_register; 5947 cc->gdb_write_register = x86_cpu_gdb_write_register; 5948 cc->get_arch_id = x86_cpu_get_arch_id; 5949 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 5950 #ifndef CONFIG_USER_ONLY 5951 cc->asidx_from_attrs = x86_asidx_from_attrs; 5952 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 5953 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; 5954 cc->write_elf64_note = x86_cpu_write_elf64_note; 5955 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 5956 cc->write_elf32_note = x86_cpu_write_elf32_note; 5957 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 5958 cc->vmsd = &vmstate_x86_cpu; 5959 #endif 5960 cc->gdb_arch_name = x86_gdb_arch_name; 5961 #ifdef TARGET_X86_64 5962 cc->gdb_core_xml_file = "i386-64bit.xml"; 5963 cc->gdb_num_core_regs = 66; 5964 #else 5965 cc->gdb_core_xml_file = "i386-32bit.xml"; 5966 cc->gdb_num_core_regs = 50; 5967 #endif 5968 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 5969 cc->debug_excp_handler = breakpoint_handler; 5970 #endif 5971 cc->cpu_exec_enter = x86_cpu_exec_enter; 5972 cc->cpu_exec_exit = x86_cpu_exec_exit; 5973 #ifdef CONFIG_TCG 5974 cc->tcg_initialize = tcg_x86_init; 5975 cc->tlb_fill = x86_cpu_tlb_fill; 5976 #endif 5977 cc->disas_set_info = x86_disas_set_info; 5978 5979 dc->user_creatable = true; 5980 } 5981 5982 static const TypeInfo x86_cpu_type_info = { 5983 .name = TYPE_X86_CPU, 5984 .parent = TYPE_CPU, 5985 .instance_size = sizeof(X86CPU), 5986 .instance_init = x86_cpu_initfn, 5987 .abstract = true, 5988 .class_size = sizeof(X86CPUClass), 5989 .class_init = x86_cpu_common_class_init, 5990 }; 5991 5992 5993 /* "base" CPU model, used by query-cpu-model-expansion */ 5994 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 5995 { 5996 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5997 5998 xcc->static_model = true; 5999 xcc->migration_safe = true; 6000 xcc->model_description = "base CPU model type with no features enabled"; 6001 xcc->ordering = 8; 6002 } 6003 6004 static const TypeInfo x86_base_cpu_type_info = { 6005 .name = X86_CPU_TYPE_NAME("base"), 6006 .parent = TYPE_X86_CPU, 6007 .class_init = x86_cpu_base_class_init, 6008 }; 6009 6010 static void x86_cpu_register_types(void) 6011 { 6012 int i; 6013 6014 type_register_static(&x86_cpu_type_info); 6015 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 6016 x86_register_cpudef_type(&builtin_x86_defs[i]); 6017 } 6018 type_register_static(&max_x86_cpu_type_info); 6019 type_register_static(&x86_base_cpu_type_info); 6020 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 6021 type_register_static(&host_x86_cpu_type_info); 6022 #endif 6023 } 6024 6025 type_init(x86_cpu_register_types) 6026