1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/bitops.h" 23 24 #include "cpu.h" 25 #include "exec/exec-all.h" 26 #include "sysemu/kvm.h" 27 #include "sysemu/hvf.h" 28 #include "sysemu/cpus.h" 29 #include "kvm_i386.h" 30 #include "sev_i386.h" 31 32 #include "qemu/error-report.h" 33 #include "qemu/option.h" 34 #include "qemu/config-file.h" 35 #include "qapi/error.h" 36 #include "qapi/qapi-visit-misc.h" 37 #include "qapi/qapi-visit-run-state.h" 38 #include "qapi/qmp/qdict.h" 39 #include "qapi/qmp/qerror.h" 40 #include "qapi/visitor.h" 41 #include "qom/qom-qobject.h" 42 #include "sysemu/arch_init.h" 43 44 #include "standard-headers/asm-x86/kvm_para.h" 45 46 #include "sysemu/sysemu.h" 47 #include "hw/qdev-properties.h" 48 #include "hw/i386/topology.h" 49 #ifndef CONFIG_USER_ONLY 50 #include "exec/address-spaces.h" 51 #include "hw/hw.h" 52 #include "hw/xen/xen.h" 53 #include "hw/i386/apic_internal.h" 54 #endif 55 56 #include "disas/capstone.h" 57 58 /* Helpers for building CPUID[2] descriptors: */ 59 60 struct CPUID2CacheDescriptorInfo { 61 enum CacheType type; 62 int level; 63 int size; 64 int line_size; 65 int associativity; 66 }; 67 68 #define KiB 1024 69 #define MiB (1024 * 1024) 70 71 /* 72 * Known CPUID 2 cache descriptors. 73 * From Intel SDM Volume 2A, CPUID instruction 74 */ 75 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 76 [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB, 77 .associativity = 4, .line_size = 32, }, 78 [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB, 79 .associativity = 4, .line_size = 32, }, 80 [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB, 81 .associativity = 4, .line_size = 64, }, 82 [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB, 83 .associativity = 2, .line_size = 32, }, 84 [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB, 85 .associativity = 4, .line_size = 32, }, 86 [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB, 87 .associativity = 4, .line_size = 64, }, 88 [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB, 89 .associativity = 6, .line_size = 64, }, 90 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 91 .associativity = 2, .line_size = 64, }, 92 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 93 .associativity = 8, .line_size = 64, }, 94 /* lines per sector is not supported cpuid2_cache_descriptor(), 95 * so descriptors 0x22, 0x23 are not included 96 */ 97 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 98 .associativity = 16, .line_size = 64, }, 99 /* lines per sector is not supported cpuid2_cache_descriptor(), 100 * so descriptors 0x25, 0x20 are not included 101 */ 102 [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB, 103 .associativity = 8, .line_size = 64, }, 104 [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB, 105 .associativity = 8, .line_size = 64, }, 106 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 107 .associativity = 4, .line_size = 32, }, 108 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 109 .associativity = 4, .line_size = 32, }, 110 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 111 .associativity = 4, .line_size = 32, }, 112 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 113 .associativity = 4, .line_size = 32, }, 114 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 115 .associativity = 4, .line_size = 32, }, 116 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 117 .associativity = 4, .line_size = 64, }, 118 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 119 .associativity = 8, .line_size = 64, }, 120 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 121 .associativity = 12, .line_size = 64, }, 122 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 123 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 124 .associativity = 12, .line_size = 64, }, 125 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 126 .associativity = 16, .line_size = 64, }, 127 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 128 .associativity = 12, .line_size = 64, }, 129 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 130 .associativity = 16, .line_size = 64, }, 131 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 132 .associativity = 24, .line_size = 64, }, 133 [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB, 134 .associativity = 8, .line_size = 64, }, 135 [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB, 136 .associativity = 4, .line_size = 64, }, 137 [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB, 138 .associativity = 4, .line_size = 64, }, 139 [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB, 140 .associativity = 4, .line_size = 64, }, 141 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 142 .associativity = 4, .line_size = 64, }, 143 /* lines per sector is not supported cpuid2_cache_descriptor(), 144 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 145 */ 146 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 147 .associativity = 8, .line_size = 64, }, 148 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 149 .associativity = 2, .line_size = 64, }, 150 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 151 .associativity = 8, .line_size = 64, }, 152 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 153 .associativity = 8, .line_size = 32, }, 154 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 155 .associativity = 8, .line_size = 32, }, 156 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 157 .associativity = 8, .line_size = 32, }, 158 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 159 .associativity = 8, .line_size = 32, }, 160 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 161 .associativity = 4, .line_size = 64, }, 162 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 163 .associativity = 8, .line_size = 64, }, 164 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 165 .associativity = 4, .line_size = 64, }, 166 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 167 .associativity = 4, .line_size = 64, }, 168 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 169 .associativity = 4, .line_size = 64, }, 170 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 171 .associativity = 8, .line_size = 64, }, 172 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 173 .associativity = 8, .line_size = 64, }, 174 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 175 .associativity = 8, .line_size = 64, }, 176 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 177 .associativity = 12, .line_size = 64, }, 178 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 179 .associativity = 12, .line_size = 64, }, 180 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 181 .associativity = 12, .line_size = 64, }, 182 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 183 .associativity = 16, .line_size = 64, }, 184 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 185 .associativity = 16, .line_size = 64, }, 186 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 187 .associativity = 16, .line_size = 64, }, 188 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 189 .associativity = 24, .line_size = 64, }, 190 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 191 .associativity = 24, .line_size = 64, }, 192 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 193 .associativity = 24, .line_size = 64, }, 194 }; 195 196 /* 197 * "CPUID leaf 2 does not report cache descriptor information, 198 * use CPUID leaf 4 to query cache parameters" 199 */ 200 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 201 202 /* 203 * Return a CPUID 2 cache descriptor for a given cache. 204 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 205 */ 206 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 207 { 208 int i; 209 210 assert(cache->size > 0); 211 assert(cache->level > 0); 212 assert(cache->line_size > 0); 213 assert(cache->associativity > 0); 214 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 215 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 216 if (d->level == cache->level && d->type == cache->type && 217 d->size == cache->size && d->line_size == cache->line_size && 218 d->associativity == cache->associativity) { 219 return i; 220 } 221 } 222 223 return CACHE_DESCRIPTOR_UNAVAILABLE; 224 } 225 226 /* CPUID Leaf 4 constants: */ 227 228 /* EAX: */ 229 #define CACHE_TYPE_D 1 230 #define CACHE_TYPE_I 2 231 #define CACHE_TYPE_UNIFIED 3 232 233 #define CACHE_LEVEL(l) (l << 5) 234 235 #define CACHE_SELF_INIT_LEVEL (1 << 8) 236 237 /* EDX: */ 238 #define CACHE_NO_INVD_SHARING (1 << 0) 239 #define CACHE_INCLUSIVE (1 << 1) 240 #define CACHE_COMPLEX_IDX (1 << 2) 241 242 /* Encode CacheType for CPUID[4].EAX */ 243 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \ 244 ((t) == ICACHE) ? CACHE_TYPE_I : \ 245 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 246 0 /* Invalid value */) 247 248 249 /* Encode cache info for CPUID[4] */ 250 static void encode_cache_cpuid4(CPUCacheInfo *cache, 251 int num_apic_ids, int num_cores, 252 uint32_t *eax, uint32_t *ebx, 253 uint32_t *ecx, uint32_t *edx) 254 { 255 assert(cache->size == cache->line_size * cache->associativity * 256 cache->partitions * cache->sets); 257 258 assert(num_apic_ids > 0); 259 *eax = CACHE_TYPE(cache->type) | 260 CACHE_LEVEL(cache->level) | 261 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 262 ((num_cores - 1) << 26) | 263 ((num_apic_ids - 1) << 14); 264 265 assert(cache->line_size > 0); 266 assert(cache->partitions > 0); 267 assert(cache->associativity > 0); 268 /* We don't implement fully-associative caches */ 269 assert(cache->associativity < cache->sets); 270 *ebx = (cache->line_size - 1) | 271 ((cache->partitions - 1) << 12) | 272 ((cache->associativity - 1) << 22); 273 274 assert(cache->sets > 0); 275 *ecx = cache->sets - 1; 276 277 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 278 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 279 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 280 } 281 282 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 283 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 284 { 285 assert(cache->size % 1024 == 0); 286 assert(cache->lines_per_tag > 0); 287 assert(cache->associativity > 0); 288 assert(cache->line_size > 0); 289 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 290 (cache->lines_per_tag << 8) | (cache->line_size); 291 } 292 293 #define ASSOC_FULL 0xFF 294 295 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 296 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 297 a == 2 ? 0x2 : \ 298 a == 4 ? 0x4 : \ 299 a == 8 ? 0x6 : \ 300 a == 16 ? 0x8 : \ 301 a == 32 ? 0xA : \ 302 a == 48 ? 0xB : \ 303 a == 64 ? 0xC : \ 304 a == 96 ? 0xD : \ 305 a == 128 ? 0xE : \ 306 a == ASSOC_FULL ? 0xF : \ 307 0 /* invalid value */) 308 309 /* 310 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 311 * @l3 can be NULL. 312 */ 313 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 314 CPUCacheInfo *l3, 315 uint32_t *ecx, uint32_t *edx) 316 { 317 assert(l2->size % 1024 == 0); 318 assert(l2->associativity > 0); 319 assert(l2->lines_per_tag > 0); 320 assert(l2->line_size > 0); 321 *ecx = ((l2->size / 1024) << 16) | 322 (AMD_ENC_ASSOC(l2->associativity) << 12) | 323 (l2->lines_per_tag << 8) | (l2->line_size); 324 325 if (l3) { 326 assert(l3->size % (512 * 1024) == 0); 327 assert(l3->associativity > 0); 328 assert(l3->lines_per_tag > 0); 329 assert(l3->line_size > 0); 330 *edx = ((l3->size / (512 * 1024)) << 18) | 331 (AMD_ENC_ASSOC(l3->associativity) << 12) | 332 (l3->lines_per_tag << 8) | (l3->line_size); 333 } else { 334 *edx = 0; 335 } 336 } 337 338 /* 339 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E 340 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3. 341 * Define the constants to build the cpu topology. Right now, TOPOEXT 342 * feature is enabled only on EPYC. So, these constants are based on 343 * EPYC supported configurations. We may need to handle the cases if 344 * these values change in future. 345 */ 346 /* Maximum core complexes in a node */ 347 #define MAX_CCX 2 348 /* Maximum cores in a core complex */ 349 #define MAX_CORES_IN_CCX 4 350 /* Maximum cores in a node */ 351 #define MAX_CORES_IN_NODE 8 352 /* Maximum nodes in a socket */ 353 #define MAX_NODES_PER_SOCKET 4 354 355 /* 356 * Figure out the number of nodes required to build this config. 357 * Max cores in a node is 8 358 */ 359 static int nodes_in_socket(int nr_cores) 360 { 361 int nodes; 362 363 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE); 364 365 /* Hardware does not support config with 3 nodes, return 4 in that case */ 366 return (nodes == 3) ? 4 : nodes; 367 } 368 369 /* 370 * Decide the number of cores in a core complex with the given nr_cores using 371 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and 372 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible 373 * L3 cache is shared across all cores in a core complex. So, this will also 374 * tell us how many cores are sharing the L3 cache. 375 */ 376 static int cores_in_core_complex(int nr_cores) 377 { 378 int nodes; 379 380 /* Check if we can fit all the cores in one core complex */ 381 if (nr_cores <= MAX_CORES_IN_CCX) { 382 return nr_cores; 383 } 384 /* Get the number of nodes required to build this config */ 385 nodes = nodes_in_socket(nr_cores); 386 387 /* 388 * Divide the cores accros all the core complexes 389 * Return rounded up value 390 */ 391 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX); 392 } 393 394 /* Encode cache info for CPUID[8000001D] */ 395 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs, 396 uint32_t *eax, uint32_t *ebx, 397 uint32_t *ecx, uint32_t *edx) 398 { 399 uint32_t l3_cores; 400 assert(cache->size == cache->line_size * cache->associativity * 401 cache->partitions * cache->sets); 402 403 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 404 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 405 406 /* L3 is shared among multiple cores */ 407 if (cache->level == 3) { 408 l3_cores = cores_in_core_complex(cs->nr_cores); 409 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14; 410 } else { 411 *eax |= ((cs->nr_threads - 1) << 14); 412 } 413 414 assert(cache->line_size > 0); 415 assert(cache->partitions > 0); 416 assert(cache->associativity > 0); 417 /* We don't implement fully-associative caches */ 418 assert(cache->associativity < cache->sets); 419 *ebx = (cache->line_size - 1) | 420 ((cache->partitions - 1) << 12) | 421 ((cache->associativity - 1) << 22); 422 423 assert(cache->sets > 0); 424 *ecx = cache->sets - 1; 425 426 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 427 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 428 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 429 } 430 431 /* Data structure to hold the configuration info for a given core index */ 432 struct core_topology { 433 /* core complex id of the current core index */ 434 int ccx_id; 435 /* 436 * Adjusted core index for this core in the topology 437 * This can be 0,1,2,3 with max 4 cores in a core complex 438 */ 439 int core_id; 440 /* Node id for this core index */ 441 int node_id; 442 /* Number of nodes in this config */ 443 int num_nodes; 444 }; 445 446 /* 447 * Build the configuration closely match the EPYC hardware. Using the EPYC 448 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE) 449 * right now. This could change in future. 450 * nr_cores : Total number of cores in the config 451 * core_id : Core index of the current CPU 452 * topo : Data structure to hold all the config info for this core index 453 */ 454 static void build_core_topology(int nr_cores, int core_id, 455 struct core_topology *topo) 456 { 457 int nodes, cores_in_ccx; 458 459 /* First get the number of nodes required */ 460 nodes = nodes_in_socket(nr_cores); 461 462 cores_in_ccx = cores_in_core_complex(nr_cores); 463 464 topo->node_id = core_id / (cores_in_ccx * MAX_CCX); 465 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx; 466 topo->core_id = core_id % cores_in_ccx; 467 topo->num_nodes = nodes; 468 } 469 470 /* Encode cache info for CPUID[8000001E] */ 471 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu, 472 uint32_t *eax, uint32_t *ebx, 473 uint32_t *ecx, uint32_t *edx) 474 { 475 struct core_topology topo = {0}; 476 unsigned long nodes; 477 int shift; 478 479 build_core_topology(cs->nr_cores, cpu->core_id, &topo); 480 *eax = cpu->apic_id; 481 /* 482 * CPUID_Fn8000001E_EBX 483 * 31:16 Reserved 484 * 15:8 Threads per core (The number of threads per core is 485 * Threads per core + 1) 486 * 7:0 Core id (see bit decoding below) 487 * SMT: 488 * 4:3 node id 489 * 2 Core complex id 490 * 1:0 Core id 491 * Non SMT: 492 * 5:4 node id 493 * 3 Core complex id 494 * 1:0 Core id 495 */ 496 if (cs->nr_threads - 1) { 497 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) | 498 (topo.ccx_id << 2) | topo.core_id; 499 } else { 500 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id; 501 } 502 /* 503 * CPUID_Fn8000001E_ECX 504 * 31:11 Reserved 505 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) 506 * 7:0 Node id (see bit decoding below) 507 * 2 Socket id 508 * 1:0 Node id 509 */ 510 if (topo.num_nodes <= 4) { 511 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) | 512 topo.node_id; 513 } else { 514 /* 515 * Node id fix up. Actual hardware supports up to 4 nodes. But with 516 * more than 32 cores, we may end up with more than 4 nodes. 517 * Node id is a combination of socket id and node id. Only requirement 518 * here is that this number should be unique accross the system. 519 * Shift the socket id to accommodate more nodes. We dont expect both 520 * socket id and node id to be big number at the same time. This is not 521 * an ideal config but we need to to support it. Max nodes we can have 522 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need 523 * 5 bits for nodes. Find the left most set bit to represent the total 524 * number of nodes. find_last_bit returns last set bit(0 based). Left 525 * shift(+1) the socket id to represent all the nodes. 526 */ 527 nodes = topo.num_nodes - 1; 528 shift = find_last_bit(&nodes, 8); 529 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) | 530 topo.node_id; 531 } 532 *edx = 0; 533 } 534 535 /* 536 * Definitions of the hardcoded cache entries we expose: 537 * These are legacy cache values. If there is a need to change any 538 * of these values please use builtin_x86_defs 539 */ 540 541 /* L1 data cache: */ 542 static CPUCacheInfo legacy_l1d_cache = { 543 .type = DCACHE, 544 .level = 1, 545 .size = 32 * KiB, 546 .self_init = 1, 547 .line_size = 64, 548 .associativity = 8, 549 .sets = 64, 550 .partitions = 1, 551 .no_invd_sharing = true, 552 }; 553 554 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 555 static CPUCacheInfo legacy_l1d_cache_amd = { 556 .type = DCACHE, 557 .level = 1, 558 .size = 64 * KiB, 559 .self_init = 1, 560 .line_size = 64, 561 .associativity = 2, 562 .sets = 512, 563 .partitions = 1, 564 .lines_per_tag = 1, 565 .no_invd_sharing = true, 566 }; 567 568 /* L1 instruction cache: */ 569 static CPUCacheInfo legacy_l1i_cache = { 570 .type = ICACHE, 571 .level = 1, 572 .size = 32 * KiB, 573 .self_init = 1, 574 .line_size = 64, 575 .associativity = 8, 576 .sets = 64, 577 .partitions = 1, 578 .no_invd_sharing = true, 579 }; 580 581 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 582 static CPUCacheInfo legacy_l1i_cache_amd = { 583 .type = ICACHE, 584 .level = 1, 585 .size = 64 * KiB, 586 .self_init = 1, 587 .line_size = 64, 588 .associativity = 2, 589 .sets = 512, 590 .partitions = 1, 591 .lines_per_tag = 1, 592 .no_invd_sharing = true, 593 }; 594 595 /* Level 2 unified cache: */ 596 static CPUCacheInfo legacy_l2_cache = { 597 .type = UNIFIED_CACHE, 598 .level = 2, 599 .size = 4 * MiB, 600 .self_init = 1, 601 .line_size = 64, 602 .associativity = 16, 603 .sets = 4096, 604 .partitions = 1, 605 .no_invd_sharing = true, 606 }; 607 608 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 609 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 610 .type = UNIFIED_CACHE, 611 .level = 2, 612 .size = 2 * MiB, 613 .line_size = 64, 614 .associativity = 8, 615 }; 616 617 618 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 619 static CPUCacheInfo legacy_l2_cache_amd = { 620 .type = UNIFIED_CACHE, 621 .level = 2, 622 .size = 512 * KiB, 623 .line_size = 64, 624 .lines_per_tag = 1, 625 .associativity = 16, 626 .sets = 512, 627 .partitions = 1, 628 }; 629 630 /* Level 3 unified cache: */ 631 static CPUCacheInfo legacy_l3_cache = { 632 .type = UNIFIED_CACHE, 633 .level = 3, 634 .size = 16 * MiB, 635 .line_size = 64, 636 .associativity = 16, 637 .sets = 16384, 638 .partitions = 1, 639 .lines_per_tag = 1, 640 .self_init = true, 641 .inclusive = true, 642 .complex_indexing = true, 643 }; 644 645 /* TLB definitions: */ 646 647 #define L1_DTLB_2M_ASSOC 1 648 #define L1_DTLB_2M_ENTRIES 255 649 #define L1_DTLB_4K_ASSOC 1 650 #define L1_DTLB_4K_ENTRIES 255 651 652 #define L1_ITLB_2M_ASSOC 1 653 #define L1_ITLB_2M_ENTRIES 255 654 #define L1_ITLB_4K_ASSOC 1 655 #define L1_ITLB_4K_ENTRIES 255 656 657 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 658 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 659 #define L2_DTLB_4K_ASSOC 4 660 #define L2_DTLB_4K_ENTRIES 512 661 662 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 663 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 664 #define L2_ITLB_4K_ASSOC 4 665 #define L2_ITLB_4K_ENTRIES 512 666 667 /* CPUID Leaf 0x14 constants: */ 668 #define INTEL_PT_MAX_SUBLEAF 0x1 669 /* 670 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 671 * MSR can be accessed; 672 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 673 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 674 * of Intel PT MSRs across warm reset; 675 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 676 */ 677 #define INTEL_PT_MINIMAL_EBX 0xf 678 /* 679 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 680 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 681 * accessed; 682 * bit[01]: ToPA tables can hold any number of output entries, up to the 683 * maximum allowed by the MaskOrTableOffset field of 684 * IA32_RTIT_OUTPUT_MASK_PTRS; 685 * bit[02]: Support Single-Range Output scheme; 686 */ 687 #define INTEL_PT_MINIMAL_ECX 0x7 688 /* generated packets which contain IP payloads have LIP values */ 689 #define INTEL_PT_IP_LIP (1 << 31) 690 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 691 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 692 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 693 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 694 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 695 696 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 697 uint32_t vendor2, uint32_t vendor3) 698 { 699 int i; 700 for (i = 0; i < 4; i++) { 701 dst[i] = vendor1 >> (8 * i); 702 dst[i + 4] = vendor2 >> (8 * i); 703 dst[i + 8] = vendor3 >> (8 * i); 704 } 705 dst[CPUID_VENDOR_SZ] = '\0'; 706 } 707 708 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 709 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 710 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 711 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 712 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 713 CPUID_PSE36 | CPUID_FXSR) 714 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 715 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 716 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 717 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 718 CPUID_PAE | CPUID_SEP | CPUID_APIC) 719 720 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 721 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 722 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 723 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 724 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 725 /* partly implemented: 726 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 727 /* missing: 728 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 729 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 730 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 731 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 732 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 733 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR) 734 /* missing: 735 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 736 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 737 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 738 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 739 CPUID_EXT_F16C, CPUID_EXT_RDRAND */ 740 741 #ifdef TARGET_X86_64 742 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 743 #else 744 #define TCG_EXT2_X86_64_FEATURES 0 745 #endif 746 747 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 748 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 749 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 750 TCG_EXT2_X86_64_FEATURES) 751 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 752 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 753 #define TCG_EXT4_FEATURES 0 754 #define TCG_SVM_FEATURES 0 755 #define TCG_KVM_FEATURES 0 756 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 757 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 758 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 759 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 760 CPUID_7_0_EBX_ERMS) 761 /* missing: 762 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 763 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 764 CPUID_7_0_EBX_RDSEED */ 765 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 766 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 767 CPUID_7_0_ECX_LA57) 768 #define TCG_7_0_EDX_FEATURES 0 769 #define TCG_APM_FEATURES 0 770 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 771 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 772 /* missing: 773 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 774 775 typedef struct FeatureWordInfo { 776 /* feature flags names are taken from "Intel Processor Identification and 777 * the CPUID Instruction" and AMD's "CPUID Specification". 778 * In cases of disagreement between feature naming conventions, 779 * aliases may be added. 780 */ 781 const char *feat_names[32]; 782 uint32_t cpuid_eax; /* Input EAX for CPUID */ 783 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */ 784 uint32_t cpuid_ecx; /* Input ECX value for CPUID */ 785 int cpuid_reg; /* output register (R_* constant) */ 786 uint32_t tcg_features; /* Feature flags supported by TCG */ 787 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */ 788 uint32_t migratable_flags; /* Feature flags known to be migratable */ 789 /* Features that shouldn't be auto-enabled by "-cpu host" */ 790 uint32_t no_autoenable_flags; 791 } FeatureWordInfo; 792 793 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 794 [FEAT_1_EDX] = { 795 .feat_names = { 796 "fpu", "vme", "de", "pse", 797 "tsc", "msr", "pae", "mce", 798 "cx8", "apic", NULL, "sep", 799 "mtrr", "pge", "mca", "cmov", 800 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 801 NULL, "ds" /* Intel dts */, "acpi", "mmx", 802 "fxsr", "sse", "sse2", "ss", 803 "ht" /* Intel htt */, "tm", "ia64", "pbe", 804 }, 805 .cpuid_eax = 1, .cpuid_reg = R_EDX, 806 .tcg_features = TCG_FEATURES, 807 }, 808 [FEAT_1_ECX] = { 809 .feat_names = { 810 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 811 "ds-cpl", "vmx", "smx", "est", 812 "tm2", "ssse3", "cid", NULL, 813 "fma", "cx16", "xtpr", "pdcm", 814 NULL, "pcid", "dca", "sse4.1", 815 "sse4.2", "x2apic", "movbe", "popcnt", 816 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 817 "avx", "f16c", "rdrand", "hypervisor", 818 }, 819 .cpuid_eax = 1, .cpuid_reg = R_ECX, 820 .tcg_features = TCG_EXT_FEATURES, 821 }, 822 /* Feature names that are already defined on feature_name[] but 823 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 824 * names on feat_names below. They are copied automatically 825 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 826 */ 827 [FEAT_8000_0001_EDX] = { 828 .feat_names = { 829 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 830 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 831 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 832 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 833 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 834 "nx", NULL, "mmxext", NULL /* mmx */, 835 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 836 NULL, "lm", "3dnowext", "3dnow", 837 }, 838 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX, 839 .tcg_features = TCG_EXT2_FEATURES, 840 }, 841 [FEAT_8000_0001_ECX] = { 842 .feat_names = { 843 "lahf-lm", "cmp-legacy", "svm", "extapic", 844 "cr8legacy", "abm", "sse4a", "misalignsse", 845 "3dnowprefetch", "osvw", "ibs", "xop", 846 "skinit", "wdt", NULL, "lwp", 847 "fma4", "tce", NULL, "nodeid-msr", 848 NULL, "tbm", "topoext", "perfctr-core", 849 "perfctr-nb", NULL, NULL, NULL, 850 NULL, NULL, NULL, NULL, 851 }, 852 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX, 853 .tcg_features = TCG_EXT3_FEATURES, 854 }, 855 [FEAT_C000_0001_EDX] = { 856 .feat_names = { 857 NULL, NULL, "xstore", "xstore-en", 858 NULL, NULL, "xcrypt", "xcrypt-en", 859 "ace2", "ace2-en", "phe", "phe-en", 860 "pmm", "pmm-en", NULL, NULL, 861 NULL, NULL, NULL, NULL, 862 NULL, NULL, NULL, NULL, 863 NULL, NULL, NULL, NULL, 864 NULL, NULL, NULL, NULL, 865 }, 866 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX, 867 .tcg_features = TCG_EXT4_FEATURES, 868 }, 869 [FEAT_KVM] = { 870 .feat_names = { 871 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 872 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 873 NULL, "kvm-pv-tlb-flush", NULL, NULL, 874 NULL, NULL, NULL, NULL, 875 NULL, NULL, NULL, NULL, 876 NULL, NULL, NULL, NULL, 877 "kvmclock-stable-bit", NULL, NULL, NULL, 878 NULL, NULL, NULL, NULL, 879 }, 880 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX, 881 .tcg_features = TCG_KVM_FEATURES, 882 }, 883 [FEAT_KVM_HINTS] = { 884 .feat_names = { 885 "kvm-hint-dedicated", NULL, NULL, NULL, 886 NULL, NULL, NULL, NULL, 887 NULL, NULL, NULL, NULL, 888 NULL, NULL, NULL, NULL, 889 NULL, NULL, NULL, NULL, 890 NULL, NULL, NULL, NULL, 891 NULL, NULL, NULL, NULL, 892 NULL, NULL, NULL, NULL, 893 }, 894 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX, 895 .tcg_features = TCG_KVM_FEATURES, 896 /* 897 * KVM hints aren't auto-enabled by -cpu host, they need to be 898 * explicitly enabled in the command-line. 899 */ 900 .no_autoenable_flags = ~0U, 901 }, 902 [FEAT_HYPERV_EAX] = { 903 .feat_names = { 904 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 905 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 906 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 907 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 908 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 909 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 910 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 911 NULL, NULL, 912 NULL, NULL, NULL, NULL, 913 NULL, NULL, NULL, NULL, 914 NULL, NULL, NULL, NULL, 915 NULL, NULL, NULL, NULL, 916 }, 917 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX, 918 }, 919 [FEAT_HYPERV_EBX] = { 920 .feat_names = { 921 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 922 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 923 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 924 NULL /* hv_create_port */, NULL /* hv_connect_port */, 925 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 926 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 927 NULL, NULL, 928 NULL, NULL, NULL, NULL, 929 NULL, NULL, NULL, NULL, 930 NULL, NULL, NULL, NULL, 931 NULL, NULL, NULL, NULL, 932 }, 933 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX, 934 }, 935 [FEAT_HYPERV_EDX] = { 936 .feat_names = { 937 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 938 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 939 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 940 NULL, NULL, 941 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 942 NULL, NULL, NULL, NULL, 943 NULL, NULL, NULL, NULL, 944 NULL, NULL, NULL, NULL, 945 NULL, NULL, NULL, NULL, 946 NULL, NULL, NULL, NULL, 947 }, 948 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX, 949 }, 950 [FEAT_SVM] = { 951 .feat_names = { 952 "npt", "lbrv", "svm-lock", "nrip-save", 953 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 954 NULL, NULL, "pause-filter", NULL, 955 "pfthreshold", NULL, NULL, NULL, 956 NULL, NULL, NULL, NULL, 957 NULL, NULL, NULL, NULL, 958 NULL, NULL, NULL, NULL, 959 NULL, NULL, NULL, NULL, 960 }, 961 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX, 962 .tcg_features = TCG_SVM_FEATURES, 963 }, 964 [FEAT_7_0_EBX] = { 965 .feat_names = { 966 "fsgsbase", "tsc-adjust", NULL, "bmi1", 967 "hle", "avx2", NULL, "smep", 968 "bmi2", "erms", "invpcid", "rtm", 969 NULL, NULL, "mpx", NULL, 970 "avx512f", "avx512dq", "rdseed", "adx", 971 "smap", "avx512ifma", "pcommit", "clflushopt", 972 "clwb", "intel-pt", "avx512pf", "avx512er", 973 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 974 }, 975 .cpuid_eax = 7, 976 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 977 .cpuid_reg = R_EBX, 978 .tcg_features = TCG_7_0_EBX_FEATURES, 979 }, 980 [FEAT_7_0_ECX] = { 981 .feat_names = { 982 NULL, "avx512vbmi", "umip", "pku", 983 NULL /* ospke */, NULL, "avx512vbmi2", NULL, 984 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 985 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 986 "la57", NULL, NULL, NULL, 987 NULL, NULL, "rdpid", NULL, 988 NULL, "cldemote", NULL, NULL, 989 NULL, NULL, NULL, NULL, 990 }, 991 .cpuid_eax = 7, 992 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 993 .cpuid_reg = R_ECX, 994 .tcg_features = TCG_7_0_ECX_FEATURES, 995 }, 996 [FEAT_7_0_EDX] = { 997 .feat_names = { 998 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 999 NULL, NULL, NULL, NULL, 1000 NULL, NULL, NULL, NULL, 1001 NULL, NULL, NULL, NULL, 1002 NULL, NULL, NULL, NULL, 1003 NULL, NULL, NULL, NULL, 1004 NULL, NULL, "spec-ctrl", NULL, 1005 NULL, NULL, NULL, "ssbd", 1006 }, 1007 .cpuid_eax = 7, 1008 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 1009 .cpuid_reg = R_EDX, 1010 .tcg_features = TCG_7_0_EDX_FEATURES, 1011 }, 1012 [FEAT_8000_0007_EDX] = { 1013 .feat_names = { 1014 NULL, NULL, NULL, NULL, 1015 NULL, NULL, NULL, NULL, 1016 "invtsc", NULL, NULL, NULL, 1017 NULL, NULL, NULL, NULL, 1018 NULL, NULL, NULL, NULL, 1019 NULL, NULL, NULL, NULL, 1020 NULL, NULL, NULL, NULL, 1021 NULL, NULL, NULL, NULL, 1022 }, 1023 .cpuid_eax = 0x80000007, 1024 .cpuid_reg = R_EDX, 1025 .tcg_features = TCG_APM_FEATURES, 1026 .unmigratable_flags = CPUID_APM_INVTSC, 1027 }, 1028 [FEAT_8000_0008_EBX] = { 1029 .feat_names = { 1030 NULL, NULL, NULL, NULL, 1031 NULL, NULL, NULL, NULL, 1032 NULL, NULL, NULL, NULL, 1033 "ibpb", NULL, NULL, NULL, 1034 NULL, NULL, NULL, NULL, 1035 NULL, NULL, NULL, NULL, 1036 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1037 NULL, NULL, NULL, NULL, 1038 }, 1039 .cpuid_eax = 0x80000008, 1040 .cpuid_reg = R_EBX, 1041 .tcg_features = 0, 1042 .unmigratable_flags = 0, 1043 }, 1044 [FEAT_XSAVE] = { 1045 .feat_names = { 1046 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1047 NULL, NULL, NULL, NULL, 1048 NULL, NULL, NULL, NULL, 1049 NULL, NULL, NULL, NULL, 1050 NULL, NULL, NULL, NULL, 1051 NULL, NULL, NULL, NULL, 1052 NULL, NULL, NULL, NULL, 1053 NULL, NULL, NULL, NULL, 1054 }, 1055 .cpuid_eax = 0xd, 1056 .cpuid_needs_ecx = true, .cpuid_ecx = 1, 1057 .cpuid_reg = R_EAX, 1058 .tcg_features = TCG_XSAVE_FEATURES, 1059 }, 1060 [FEAT_6_EAX] = { 1061 .feat_names = { 1062 NULL, NULL, "arat", NULL, 1063 NULL, NULL, NULL, NULL, 1064 NULL, NULL, NULL, NULL, 1065 NULL, NULL, NULL, NULL, 1066 NULL, NULL, NULL, NULL, 1067 NULL, NULL, NULL, NULL, 1068 NULL, NULL, NULL, NULL, 1069 NULL, NULL, NULL, NULL, 1070 }, 1071 .cpuid_eax = 6, .cpuid_reg = R_EAX, 1072 .tcg_features = TCG_6_EAX_FEATURES, 1073 }, 1074 [FEAT_XSAVE_COMP_LO] = { 1075 .cpuid_eax = 0xD, 1076 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 1077 .cpuid_reg = R_EAX, 1078 .tcg_features = ~0U, 1079 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1080 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1081 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1082 XSTATE_PKRU_MASK, 1083 }, 1084 [FEAT_XSAVE_COMP_HI] = { 1085 .cpuid_eax = 0xD, 1086 .cpuid_needs_ecx = true, .cpuid_ecx = 0, 1087 .cpuid_reg = R_EDX, 1088 .tcg_features = ~0U, 1089 }, 1090 }; 1091 1092 typedef struct X86RegisterInfo32 { 1093 /* Name of register */ 1094 const char *name; 1095 /* QAPI enum value register */ 1096 X86CPURegister32 qapi_enum; 1097 } X86RegisterInfo32; 1098 1099 #define REGISTER(reg) \ 1100 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1101 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1102 REGISTER(EAX), 1103 REGISTER(ECX), 1104 REGISTER(EDX), 1105 REGISTER(EBX), 1106 REGISTER(ESP), 1107 REGISTER(EBP), 1108 REGISTER(ESI), 1109 REGISTER(EDI), 1110 }; 1111 #undef REGISTER 1112 1113 typedef struct ExtSaveArea { 1114 uint32_t feature, bits; 1115 uint32_t offset, size; 1116 } ExtSaveArea; 1117 1118 static const ExtSaveArea x86_ext_save_areas[] = { 1119 [XSTATE_FP_BIT] = { 1120 /* x87 FP state component is always enabled if XSAVE is supported */ 1121 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1122 /* x87 state is in the legacy region of the XSAVE area */ 1123 .offset = 0, 1124 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1125 }, 1126 [XSTATE_SSE_BIT] = { 1127 /* SSE state component is always enabled if XSAVE is supported */ 1128 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1129 /* SSE state is in the legacy region of the XSAVE area */ 1130 .offset = 0, 1131 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1132 }, 1133 [XSTATE_YMM_BIT] = 1134 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1135 .offset = offsetof(X86XSaveArea, avx_state), 1136 .size = sizeof(XSaveAVX) }, 1137 [XSTATE_BNDREGS_BIT] = 1138 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1139 .offset = offsetof(X86XSaveArea, bndreg_state), 1140 .size = sizeof(XSaveBNDREG) }, 1141 [XSTATE_BNDCSR_BIT] = 1142 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1143 .offset = offsetof(X86XSaveArea, bndcsr_state), 1144 .size = sizeof(XSaveBNDCSR) }, 1145 [XSTATE_OPMASK_BIT] = 1146 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1147 .offset = offsetof(X86XSaveArea, opmask_state), 1148 .size = sizeof(XSaveOpmask) }, 1149 [XSTATE_ZMM_Hi256_BIT] = 1150 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1151 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1152 .size = sizeof(XSaveZMM_Hi256) }, 1153 [XSTATE_Hi16_ZMM_BIT] = 1154 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1155 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1156 .size = sizeof(XSaveHi16_ZMM) }, 1157 [XSTATE_PKRU_BIT] = 1158 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1159 .offset = offsetof(X86XSaveArea, pkru_state), 1160 .size = sizeof(XSavePKRU) }, 1161 }; 1162 1163 static uint32_t xsave_area_size(uint64_t mask) 1164 { 1165 int i; 1166 uint64_t ret = 0; 1167 1168 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1169 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1170 if ((mask >> i) & 1) { 1171 ret = MAX(ret, esa->offset + esa->size); 1172 } 1173 } 1174 return ret; 1175 } 1176 1177 static inline bool accel_uses_host_cpuid(void) 1178 { 1179 return kvm_enabled() || hvf_enabled(); 1180 } 1181 1182 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1183 { 1184 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1185 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1186 } 1187 1188 const char *get_register_name_32(unsigned int reg) 1189 { 1190 if (reg >= CPU_NB_REGS32) { 1191 return NULL; 1192 } 1193 return x86_reg_info_32[reg].name; 1194 } 1195 1196 /* 1197 * Returns the set of feature flags that are supported and migratable by 1198 * QEMU, for a given FeatureWord. 1199 */ 1200 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w) 1201 { 1202 FeatureWordInfo *wi = &feature_word_info[w]; 1203 uint32_t r = 0; 1204 int i; 1205 1206 for (i = 0; i < 32; i++) { 1207 uint32_t f = 1U << i; 1208 1209 /* If the feature name is known, it is implicitly considered migratable, 1210 * unless it is explicitly set in unmigratable_flags */ 1211 if ((wi->migratable_flags & f) || 1212 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1213 r |= f; 1214 } 1215 } 1216 return r; 1217 } 1218 1219 void host_cpuid(uint32_t function, uint32_t count, 1220 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1221 { 1222 uint32_t vec[4]; 1223 1224 #ifdef __x86_64__ 1225 asm volatile("cpuid" 1226 : "=a"(vec[0]), "=b"(vec[1]), 1227 "=c"(vec[2]), "=d"(vec[3]) 1228 : "0"(function), "c"(count) : "cc"); 1229 #elif defined(__i386__) 1230 asm volatile("pusha \n\t" 1231 "cpuid \n\t" 1232 "mov %%eax, 0(%2) \n\t" 1233 "mov %%ebx, 4(%2) \n\t" 1234 "mov %%ecx, 8(%2) \n\t" 1235 "mov %%edx, 12(%2) \n\t" 1236 "popa" 1237 : : "a"(function), "c"(count), "S"(vec) 1238 : "memory", "cc"); 1239 #else 1240 abort(); 1241 #endif 1242 1243 if (eax) 1244 *eax = vec[0]; 1245 if (ebx) 1246 *ebx = vec[1]; 1247 if (ecx) 1248 *ecx = vec[2]; 1249 if (edx) 1250 *edx = vec[3]; 1251 } 1252 1253 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1254 { 1255 uint32_t eax, ebx, ecx, edx; 1256 1257 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1258 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1259 1260 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1261 if (family) { 1262 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1263 } 1264 if (model) { 1265 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1266 } 1267 if (stepping) { 1268 *stepping = eax & 0x0F; 1269 } 1270 } 1271 1272 /* CPU class name definitions: */ 1273 1274 /* Return type name for a given CPU model name 1275 * Caller is responsible for freeing the returned string. 1276 */ 1277 static char *x86_cpu_type_name(const char *model_name) 1278 { 1279 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1280 } 1281 1282 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1283 { 1284 ObjectClass *oc; 1285 char *typename = x86_cpu_type_name(cpu_model); 1286 oc = object_class_by_name(typename); 1287 g_free(typename); 1288 return oc; 1289 } 1290 1291 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1292 { 1293 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1294 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1295 return g_strndup(class_name, 1296 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1297 } 1298 1299 struct X86CPUDefinition { 1300 const char *name; 1301 uint32_t level; 1302 uint32_t xlevel; 1303 /* vendor is zero-terminated, 12 character ASCII string */ 1304 char vendor[CPUID_VENDOR_SZ + 1]; 1305 int family; 1306 int model; 1307 int stepping; 1308 FeatureWordArray features; 1309 const char *model_id; 1310 CPUCaches *cache_info; 1311 }; 1312 1313 static CPUCaches epyc_cache_info = { 1314 .l1d_cache = &(CPUCacheInfo) { 1315 .type = DCACHE, 1316 .level = 1, 1317 .size = 32 * KiB, 1318 .line_size = 64, 1319 .associativity = 8, 1320 .partitions = 1, 1321 .sets = 64, 1322 .lines_per_tag = 1, 1323 .self_init = 1, 1324 .no_invd_sharing = true, 1325 }, 1326 .l1i_cache = &(CPUCacheInfo) { 1327 .type = ICACHE, 1328 .level = 1, 1329 .size = 64 * KiB, 1330 .line_size = 64, 1331 .associativity = 4, 1332 .partitions = 1, 1333 .sets = 256, 1334 .lines_per_tag = 1, 1335 .self_init = 1, 1336 .no_invd_sharing = true, 1337 }, 1338 .l2_cache = &(CPUCacheInfo) { 1339 .type = UNIFIED_CACHE, 1340 .level = 2, 1341 .size = 512 * KiB, 1342 .line_size = 64, 1343 .associativity = 8, 1344 .partitions = 1, 1345 .sets = 1024, 1346 .lines_per_tag = 1, 1347 }, 1348 .l3_cache = &(CPUCacheInfo) { 1349 .type = UNIFIED_CACHE, 1350 .level = 3, 1351 .size = 8 * MiB, 1352 .line_size = 64, 1353 .associativity = 16, 1354 .partitions = 1, 1355 .sets = 8192, 1356 .lines_per_tag = 1, 1357 .self_init = true, 1358 .inclusive = true, 1359 .complex_indexing = true, 1360 }, 1361 }; 1362 1363 static X86CPUDefinition builtin_x86_defs[] = { 1364 { 1365 .name = "qemu64", 1366 .level = 0xd, 1367 .vendor = CPUID_VENDOR_AMD, 1368 .family = 6, 1369 .model = 6, 1370 .stepping = 3, 1371 .features[FEAT_1_EDX] = 1372 PPRO_FEATURES | 1373 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1374 CPUID_PSE36, 1375 .features[FEAT_1_ECX] = 1376 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1377 .features[FEAT_8000_0001_EDX] = 1378 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1379 .features[FEAT_8000_0001_ECX] = 1380 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1381 .xlevel = 0x8000000A, 1382 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1383 }, 1384 { 1385 .name = "phenom", 1386 .level = 5, 1387 .vendor = CPUID_VENDOR_AMD, 1388 .family = 16, 1389 .model = 2, 1390 .stepping = 3, 1391 /* Missing: CPUID_HT */ 1392 .features[FEAT_1_EDX] = 1393 PPRO_FEATURES | 1394 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1395 CPUID_PSE36 | CPUID_VME, 1396 .features[FEAT_1_ECX] = 1397 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1398 CPUID_EXT_POPCNT, 1399 .features[FEAT_8000_0001_EDX] = 1400 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1401 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1402 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1403 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1404 CPUID_EXT3_CR8LEG, 1405 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1406 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1407 .features[FEAT_8000_0001_ECX] = 1408 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1409 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1410 /* Missing: CPUID_SVM_LBRV */ 1411 .features[FEAT_SVM] = 1412 CPUID_SVM_NPT, 1413 .xlevel = 0x8000001A, 1414 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1415 }, 1416 { 1417 .name = "core2duo", 1418 .level = 10, 1419 .vendor = CPUID_VENDOR_INTEL, 1420 .family = 6, 1421 .model = 15, 1422 .stepping = 11, 1423 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1424 .features[FEAT_1_EDX] = 1425 PPRO_FEATURES | 1426 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1427 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1428 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1429 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1430 .features[FEAT_1_ECX] = 1431 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1432 CPUID_EXT_CX16, 1433 .features[FEAT_8000_0001_EDX] = 1434 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1435 .features[FEAT_8000_0001_ECX] = 1436 CPUID_EXT3_LAHF_LM, 1437 .xlevel = 0x80000008, 1438 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1439 }, 1440 { 1441 .name = "kvm64", 1442 .level = 0xd, 1443 .vendor = CPUID_VENDOR_INTEL, 1444 .family = 15, 1445 .model = 6, 1446 .stepping = 1, 1447 /* Missing: CPUID_HT */ 1448 .features[FEAT_1_EDX] = 1449 PPRO_FEATURES | CPUID_VME | 1450 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1451 CPUID_PSE36, 1452 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1453 .features[FEAT_1_ECX] = 1454 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1455 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1456 .features[FEAT_8000_0001_EDX] = 1457 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1458 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1459 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1460 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1461 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1462 .features[FEAT_8000_0001_ECX] = 1463 0, 1464 .xlevel = 0x80000008, 1465 .model_id = "Common KVM processor" 1466 }, 1467 { 1468 .name = "qemu32", 1469 .level = 4, 1470 .vendor = CPUID_VENDOR_INTEL, 1471 .family = 6, 1472 .model = 6, 1473 .stepping = 3, 1474 .features[FEAT_1_EDX] = 1475 PPRO_FEATURES, 1476 .features[FEAT_1_ECX] = 1477 CPUID_EXT_SSE3, 1478 .xlevel = 0x80000004, 1479 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1480 }, 1481 { 1482 .name = "kvm32", 1483 .level = 5, 1484 .vendor = CPUID_VENDOR_INTEL, 1485 .family = 15, 1486 .model = 6, 1487 .stepping = 1, 1488 .features[FEAT_1_EDX] = 1489 PPRO_FEATURES | CPUID_VME | 1490 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1491 .features[FEAT_1_ECX] = 1492 CPUID_EXT_SSE3, 1493 .features[FEAT_8000_0001_ECX] = 1494 0, 1495 .xlevel = 0x80000008, 1496 .model_id = "Common 32-bit KVM processor" 1497 }, 1498 { 1499 .name = "coreduo", 1500 .level = 10, 1501 .vendor = CPUID_VENDOR_INTEL, 1502 .family = 6, 1503 .model = 14, 1504 .stepping = 8, 1505 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1506 .features[FEAT_1_EDX] = 1507 PPRO_FEATURES | CPUID_VME | 1508 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 1509 CPUID_SS, 1510 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 1511 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1512 .features[FEAT_1_ECX] = 1513 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 1514 .features[FEAT_8000_0001_EDX] = 1515 CPUID_EXT2_NX, 1516 .xlevel = 0x80000008, 1517 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 1518 }, 1519 { 1520 .name = "486", 1521 .level = 1, 1522 .vendor = CPUID_VENDOR_INTEL, 1523 .family = 4, 1524 .model = 8, 1525 .stepping = 0, 1526 .features[FEAT_1_EDX] = 1527 I486_FEATURES, 1528 .xlevel = 0, 1529 .model_id = "", 1530 }, 1531 { 1532 .name = "pentium", 1533 .level = 1, 1534 .vendor = CPUID_VENDOR_INTEL, 1535 .family = 5, 1536 .model = 4, 1537 .stepping = 3, 1538 .features[FEAT_1_EDX] = 1539 PENTIUM_FEATURES, 1540 .xlevel = 0, 1541 .model_id = "", 1542 }, 1543 { 1544 .name = "pentium2", 1545 .level = 2, 1546 .vendor = CPUID_VENDOR_INTEL, 1547 .family = 6, 1548 .model = 5, 1549 .stepping = 2, 1550 .features[FEAT_1_EDX] = 1551 PENTIUM2_FEATURES, 1552 .xlevel = 0, 1553 .model_id = "", 1554 }, 1555 { 1556 .name = "pentium3", 1557 .level = 3, 1558 .vendor = CPUID_VENDOR_INTEL, 1559 .family = 6, 1560 .model = 7, 1561 .stepping = 3, 1562 .features[FEAT_1_EDX] = 1563 PENTIUM3_FEATURES, 1564 .xlevel = 0, 1565 .model_id = "", 1566 }, 1567 { 1568 .name = "athlon", 1569 .level = 2, 1570 .vendor = CPUID_VENDOR_AMD, 1571 .family = 6, 1572 .model = 2, 1573 .stepping = 3, 1574 .features[FEAT_1_EDX] = 1575 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 1576 CPUID_MCA, 1577 .features[FEAT_8000_0001_EDX] = 1578 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 1579 .xlevel = 0x80000008, 1580 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1581 }, 1582 { 1583 .name = "n270", 1584 .level = 10, 1585 .vendor = CPUID_VENDOR_INTEL, 1586 .family = 6, 1587 .model = 28, 1588 .stepping = 2, 1589 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1590 .features[FEAT_1_EDX] = 1591 PPRO_FEATURES | 1592 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 1593 CPUID_ACPI | CPUID_SS, 1594 /* Some CPUs got no CPUID_SEP */ 1595 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 1596 * CPUID_EXT_XTPR */ 1597 .features[FEAT_1_ECX] = 1598 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1599 CPUID_EXT_MOVBE, 1600 .features[FEAT_8000_0001_EDX] = 1601 CPUID_EXT2_NX, 1602 .features[FEAT_8000_0001_ECX] = 1603 CPUID_EXT3_LAHF_LM, 1604 .xlevel = 0x80000008, 1605 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 1606 }, 1607 { 1608 .name = "Conroe", 1609 .level = 10, 1610 .vendor = CPUID_VENDOR_INTEL, 1611 .family = 6, 1612 .model = 15, 1613 .stepping = 3, 1614 .features[FEAT_1_EDX] = 1615 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1616 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1617 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1618 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1619 CPUID_DE | CPUID_FP87, 1620 .features[FEAT_1_ECX] = 1621 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1622 .features[FEAT_8000_0001_EDX] = 1623 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1624 .features[FEAT_8000_0001_ECX] = 1625 CPUID_EXT3_LAHF_LM, 1626 .xlevel = 0x80000008, 1627 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 1628 }, 1629 { 1630 .name = "Penryn", 1631 .level = 10, 1632 .vendor = CPUID_VENDOR_INTEL, 1633 .family = 6, 1634 .model = 23, 1635 .stepping = 3, 1636 .features[FEAT_1_EDX] = 1637 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1638 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1639 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1640 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1641 CPUID_DE | CPUID_FP87, 1642 .features[FEAT_1_ECX] = 1643 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1644 CPUID_EXT_SSE3, 1645 .features[FEAT_8000_0001_EDX] = 1646 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1647 .features[FEAT_8000_0001_ECX] = 1648 CPUID_EXT3_LAHF_LM, 1649 .xlevel = 0x80000008, 1650 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 1651 }, 1652 { 1653 .name = "Nehalem", 1654 .level = 11, 1655 .vendor = CPUID_VENDOR_INTEL, 1656 .family = 6, 1657 .model = 26, 1658 .stepping = 3, 1659 .features[FEAT_1_EDX] = 1660 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1661 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1662 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1663 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1664 CPUID_DE | CPUID_FP87, 1665 .features[FEAT_1_ECX] = 1666 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1667 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1668 .features[FEAT_8000_0001_EDX] = 1669 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1670 .features[FEAT_8000_0001_ECX] = 1671 CPUID_EXT3_LAHF_LM, 1672 .xlevel = 0x80000008, 1673 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 1674 }, 1675 { 1676 .name = "Nehalem-IBRS", 1677 .level = 11, 1678 .vendor = CPUID_VENDOR_INTEL, 1679 .family = 6, 1680 .model = 26, 1681 .stepping = 3, 1682 .features[FEAT_1_EDX] = 1683 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1684 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1685 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1686 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1687 CPUID_DE | CPUID_FP87, 1688 .features[FEAT_1_ECX] = 1689 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1690 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1691 .features[FEAT_7_0_EDX] = 1692 CPUID_7_0_EDX_SPEC_CTRL, 1693 .features[FEAT_8000_0001_EDX] = 1694 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1695 .features[FEAT_8000_0001_ECX] = 1696 CPUID_EXT3_LAHF_LM, 1697 .xlevel = 0x80000008, 1698 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)", 1699 }, 1700 { 1701 .name = "Westmere", 1702 .level = 11, 1703 .vendor = CPUID_VENDOR_INTEL, 1704 .family = 6, 1705 .model = 44, 1706 .stepping = 1, 1707 .features[FEAT_1_EDX] = 1708 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1709 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1710 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1711 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1712 CPUID_DE | CPUID_FP87, 1713 .features[FEAT_1_ECX] = 1714 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1715 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1716 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1717 .features[FEAT_8000_0001_EDX] = 1718 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1719 .features[FEAT_8000_0001_ECX] = 1720 CPUID_EXT3_LAHF_LM, 1721 .features[FEAT_6_EAX] = 1722 CPUID_6_EAX_ARAT, 1723 .xlevel = 0x80000008, 1724 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 1725 }, 1726 { 1727 .name = "Westmere-IBRS", 1728 .level = 11, 1729 .vendor = CPUID_VENDOR_INTEL, 1730 .family = 6, 1731 .model = 44, 1732 .stepping = 1, 1733 .features[FEAT_1_EDX] = 1734 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1735 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1736 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1737 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1738 CPUID_DE | CPUID_FP87, 1739 .features[FEAT_1_ECX] = 1740 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1741 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1742 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1743 .features[FEAT_8000_0001_EDX] = 1744 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1745 .features[FEAT_8000_0001_ECX] = 1746 CPUID_EXT3_LAHF_LM, 1747 .features[FEAT_7_0_EDX] = 1748 CPUID_7_0_EDX_SPEC_CTRL, 1749 .features[FEAT_6_EAX] = 1750 CPUID_6_EAX_ARAT, 1751 .xlevel = 0x80000008, 1752 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)", 1753 }, 1754 { 1755 .name = "SandyBridge", 1756 .level = 0xd, 1757 .vendor = CPUID_VENDOR_INTEL, 1758 .family = 6, 1759 .model = 42, 1760 .stepping = 1, 1761 .features[FEAT_1_EDX] = 1762 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1763 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1764 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1765 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1766 CPUID_DE | CPUID_FP87, 1767 .features[FEAT_1_ECX] = 1768 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1769 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1770 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1771 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1772 CPUID_EXT_SSE3, 1773 .features[FEAT_8000_0001_EDX] = 1774 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1775 CPUID_EXT2_SYSCALL, 1776 .features[FEAT_8000_0001_ECX] = 1777 CPUID_EXT3_LAHF_LM, 1778 .features[FEAT_XSAVE] = 1779 CPUID_XSAVE_XSAVEOPT, 1780 .features[FEAT_6_EAX] = 1781 CPUID_6_EAX_ARAT, 1782 .xlevel = 0x80000008, 1783 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 1784 }, 1785 { 1786 .name = "SandyBridge-IBRS", 1787 .level = 0xd, 1788 .vendor = CPUID_VENDOR_INTEL, 1789 .family = 6, 1790 .model = 42, 1791 .stepping = 1, 1792 .features[FEAT_1_EDX] = 1793 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1794 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1795 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1796 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1797 CPUID_DE | CPUID_FP87, 1798 .features[FEAT_1_ECX] = 1799 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1800 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1801 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1802 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1803 CPUID_EXT_SSE3, 1804 .features[FEAT_8000_0001_EDX] = 1805 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1806 CPUID_EXT2_SYSCALL, 1807 .features[FEAT_8000_0001_ECX] = 1808 CPUID_EXT3_LAHF_LM, 1809 .features[FEAT_7_0_EDX] = 1810 CPUID_7_0_EDX_SPEC_CTRL, 1811 .features[FEAT_XSAVE] = 1812 CPUID_XSAVE_XSAVEOPT, 1813 .features[FEAT_6_EAX] = 1814 CPUID_6_EAX_ARAT, 1815 .xlevel = 0x80000008, 1816 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)", 1817 }, 1818 { 1819 .name = "IvyBridge", 1820 .level = 0xd, 1821 .vendor = CPUID_VENDOR_INTEL, 1822 .family = 6, 1823 .model = 58, 1824 .stepping = 9, 1825 .features[FEAT_1_EDX] = 1826 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1827 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1828 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1829 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1830 CPUID_DE | CPUID_FP87, 1831 .features[FEAT_1_ECX] = 1832 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1833 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1834 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1835 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1836 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1837 .features[FEAT_7_0_EBX] = 1838 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1839 CPUID_7_0_EBX_ERMS, 1840 .features[FEAT_8000_0001_EDX] = 1841 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1842 CPUID_EXT2_SYSCALL, 1843 .features[FEAT_8000_0001_ECX] = 1844 CPUID_EXT3_LAHF_LM, 1845 .features[FEAT_XSAVE] = 1846 CPUID_XSAVE_XSAVEOPT, 1847 .features[FEAT_6_EAX] = 1848 CPUID_6_EAX_ARAT, 1849 .xlevel = 0x80000008, 1850 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 1851 }, 1852 { 1853 .name = "IvyBridge-IBRS", 1854 .level = 0xd, 1855 .vendor = CPUID_VENDOR_INTEL, 1856 .family = 6, 1857 .model = 58, 1858 .stepping = 9, 1859 .features[FEAT_1_EDX] = 1860 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1861 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1862 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1863 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1864 CPUID_DE | CPUID_FP87, 1865 .features[FEAT_1_ECX] = 1866 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1867 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1868 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1869 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1870 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1871 .features[FEAT_7_0_EBX] = 1872 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1873 CPUID_7_0_EBX_ERMS, 1874 .features[FEAT_8000_0001_EDX] = 1875 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1876 CPUID_EXT2_SYSCALL, 1877 .features[FEAT_8000_0001_ECX] = 1878 CPUID_EXT3_LAHF_LM, 1879 .features[FEAT_7_0_EDX] = 1880 CPUID_7_0_EDX_SPEC_CTRL, 1881 .features[FEAT_XSAVE] = 1882 CPUID_XSAVE_XSAVEOPT, 1883 .features[FEAT_6_EAX] = 1884 CPUID_6_EAX_ARAT, 1885 .xlevel = 0x80000008, 1886 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)", 1887 }, 1888 { 1889 .name = "Haswell-noTSX", 1890 .level = 0xd, 1891 .vendor = CPUID_VENDOR_INTEL, 1892 .family = 6, 1893 .model = 60, 1894 .stepping = 1, 1895 .features[FEAT_1_EDX] = 1896 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1897 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1898 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1899 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1900 CPUID_DE | CPUID_FP87, 1901 .features[FEAT_1_ECX] = 1902 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1903 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1904 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1905 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1906 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1907 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1908 .features[FEAT_8000_0001_EDX] = 1909 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1910 CPUID_EXT2_SYSCALL, 1911 .features[FEAT_8000_0001_ECX] = 1912 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1913 .features[FEAT_7_0_EBX] = 1914 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1915 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1916 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID, 1917 .features[FEAT_XSAVE] = 1918 CPUID_XSAVE_XSAVEOPT, 1919 .features[FEAT_6_EAX] = 1920 CPUID_6_EAX_ARAT, 1921 .xlevel = 0x80000008, 1922 .model_id = "Intel Core Processor (Haswell, no TSX)", 1923 }, 1924 { 1925 .name = "Haswell-noTSX-IBRS", 1926 .level = 0xd, 1927 .vendor = CPUID_VENDOR_INTEL, 1928 .family = 6, 1929 .model = 60, 1930 .stepping = 1, 1931 .features[FEAT_1_EDX] = 1932 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1933 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1934 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1935 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1936 CPUID_DE | CPUID_FP87, 1937 .features[FEAT_1_ECX] = 1938 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1939 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1940 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1941 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1942 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1943 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1944 .features[FEAT_8000_0001_EDX] = 1945 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1946 CPUID_EXT2_SYSCALL, 1947 .features[FEAT_8000_0001_ECX] = 1948 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1949 .features[FEAT_7_0_EDX] = 1950 CPUID_7_0_EDX_SPEC_CTRL, 1951 .features[FEAT_7_0_EBX] = 1952 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1953 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1954 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID, 1955 .features[FEAT_XSAVE] = 1956 CPUID_XSAVE_XSAVEOPT, 1957 .features[FEAT_6_EAX] = 1958 CPUID_6_EAX_ARAT, 1959 .xlevel = 0x80000008, 1960 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)", 1961 }, 1962 { 1963 .name = "Haswell", 1964 .level = 0xd, 1965 .vendor = CPUID_VENDOR_INTEL, 1966 .family = 6, 1967 .model = 60, 1968 .stepping = 4, 1969 .features[FEAT_1_EDX] = 1970 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1971 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1972 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1973 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1974 CPUID_DE | CPUID_FP87, 1975 .features[FEAT_1_ECX] = 1976 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1977 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 1978 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1979 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 1980 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 1981 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1982 .features[FEAT_8000_0001_EDX] = 1983 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1984 CPUID_EXT2_SYSCALL, 1985 .features[FEAT_8000_0001_ECX] = 1986 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 1987 .features[FEAT_7_0_EBX] = 1988 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 1989 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 1990 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 1991 CPUID_7_0_EBX_RTM, 1992 .features[FEAT_XSAVE] = 1993 CPUID_XSAVE_XSAVEOPT, 1994 .features[FEAT_6_EAX] = 1995 CPUID_6_EAX_ARAT, 1996 .xlevel = 0x80000008, 1997 .model_id = "Intel Core Processor (Haswell)", 1998 }, 1999 { 2000 .name = "Haswell-IBRS", 2001 .level = 0xd, 2002 .vendor = CPUID_VENDOR_INTEL, 2003 .family = 6, 2004 .model = 60, 2005 .stepping = 4, 2006 .features[FEAT_1_EDX] = 2007 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2008 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2009 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2010 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2011 CPUID_DE | CPUID_FP87, 2012 .features[FEAT_1_ECX] = 2013 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2014 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2015 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2016 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2017 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2018 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2019 .features[FEAT_8000_0001_EDX] = 2020 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2021 CPUID_EXT2_SYSCALL, 2022 .features[FEAT_8000_0001_ECX] = 2023 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2024 .features[FEAT_7_0_EDX] = 2025 CPUID_7_0_EDX_SPEC_CTRL, 2026 .features[FEAT_7_0_EBX] = 2027 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2028 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2029 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2030 CPUID_7_0_EBX_RTM, 2031 .features[FEAT_XSAVE] = 2032 CPUID_XSAVE_XSAVEOPT, 2033 .features[FEAT_6_EAX] = 2034 CPUID_6_EAX_ARAT, 2035 .xlevel = 0x80000008, 2036 .model_id = "Intel Core Processor (Haswell, IBRS)", 2037 }, 2038 { 2039 .name = "Broadwell-noTSX", 2040 .level = 0xd, 2041 .vendor = CPUID_VENDOR_INTEL, 2042 .family = 6, 2043 .model = 61, 2044 .stepping = 2, 2045 .features[FEAT_1_EDX] = 2046 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2047 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2048 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2049 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2050 CPUID_DE | CPUID_FP87, 2051 .features[FEAT_1_ECX] = 2052 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2053 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2054 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2055 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2056 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2057 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2058 .features[FEAT_8000_0001_EDX] = 2059 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2060 CPUID_EXT2_SYSCALL, 2061 .features[FEAT_8000_0001_ECX] = 2062 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2063 .features[FEAT_7_0_EBX] = 2064 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2065 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2066 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2067 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2068 CPUID_7_0_EBX_SMAP, 2069 .features[FEAT_XSAVE] = 2070 CPUID_XSAVE_XSAVEOPT, 2071 .features[FEAT_6_EAX] = 2072 CPUID_6_EAX_ARAT, 2073 .xlevel = 0x80000008, 2074 .model_id = "Intel Core Processor (Broadwell, no TSX)", 2075 }, 2076 { 2077 .name = "Broadwell-noTSX-IBRS", 2078 .level = 0xd, 2079 .vendor = CPUID_VENDOR_INTEL, 2080 .family = 6, 2081 .model = 61, 2082 .stepping = 2, 2083 .features[FEAT_1_EDX] = 2084 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2085 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2086 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2087 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2088 CPUID_DE | CPUID_FP87, 2089 .features[FEAT_1_ECX] = 2090 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2091 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2092 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2093 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2094 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2095 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2096 .features[FEAT_8000_0001_EDX] = 2097 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2098 CPUID_EXT2_SYSCALL, 2099 .features[FEAT_8000_0001_ECX] = 2100 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2101 .features[FEAT_7_0_EDX] = 2102 CPUID_7_0_EDX_SPEC_CTRL, 2103 .features[FEAT_7_0_EBX] = 2104 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2105 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2106 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2107 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2108 CPUID_7_0_EBX_SMAP, 2109 .features[FEAT_XSAVE] = 2110 CPUID_XSAVE_XSAVEOPT, 2111 .features[FEAT_6_EAX] = 2112 CPUID_6_EAX_ARAT, 2113 .xlevel = 0x80000008, 2114 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)", 2115 }, 2116 { 2117 .name = "Broadwell", 2118 .level = 0xd, 2119 .vendor = CPUID_VENDOR_INTEL, 2120 .family = 6, 2121 .model = 61, 2122 .stepping = 2, 2123 .features[FEAT_1_EDX] = 2124 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2125 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2126 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2127 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2128 CPUID_DE | CPUID_FP87, 2129 .features[FEAT_1_ECX] = 2130 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2131 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2132 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2133 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2134 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2135 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2136 .features[FEAT_8000_0001_EDX] = 2137 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2138 CPUID_EXT2_SYSCALL, 2139 .features[FEAT_8000_0001_ECX] = 2140 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2141 .features[FEAT_7_0_EBX] = 2142 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2143 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2144 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2145 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2146 CPUID_7_0_EBX_SMAP, 2147 .features[FEAT_XSAVE] = 2148 CPUID_XSAVE_XSAVEOPT, 2149 .features[FEAT_6_EAX] = 2150 CPUID_6_EAX_ARAT, 2151 .xlevel = 0x80000008, 2152 .model_id = "Intel Core Processor (Broadwell)", 2153 }, 2154 { 2155 .name = "Broadwell-IBRS", 2156 .level = 0xd, 2157 .vendor = CPUID_VENDOR_INTEL, 2158 .family = 6, 2159 .model = 61, 2160 .stepping = 2, 2161 .features[FEAT_1_EDX] = 2162 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2163 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2164 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2165 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2166 CPUID_DE | CPUID_FP87, 2167 .features[FEAT_1_ECX] = 2168 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2169 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2170 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2171 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2172 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2173 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2174 .features[FEAT_8000_0001_EDX] = 2175 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2176 CPUID_EXT2_SYSCALL, 2177 .features[FEAT_8000_0001_ECX] = 2178 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2179 .features[FEAT_7_0_EDX] = 2180 CPUID_7_0_EDX_SPEC_CTRL, 2181 .features[FEAT_7_0_EBX] = 2182 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2183 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2184 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2185 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2186 CPUID_7_0_EBX_SMAP, 2187 .features[FEAT_XSAVE] = 2188 CPUID_XSAVE_XSAVEOPT, 2189 .features[FEAT_6_EAX] = 2190 CPUID_6_EAX_ARAT, 2191 .xlevel = 0x80000008, 2192 .model_id = "Intel Core Processor (Broadwell, IBRS)", 2193 }, 2194 { 2195 .name = "Skylake-Client", 2196 .level = 0xd, 2197 .vendor = CPUID_VENDOR_INTEL, 2198 .family = 6, 2199 .model = 94, 2200 .stepping = 3, 2201 .features[FEAT_1_EDX] = 2202 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2203 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2204 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2205 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2206 CPUID_DE | CPUID_FP87, 2207 .features[FEAT_1_ECX] = 2208 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2209 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2210 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2211 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2212 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2213 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2214 .features[FEAT_8000_0001_EDX] = 2215 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2216 CPUID_EXT2_SYSCALL, 2217 .features[FEAT_8000_0001_ECX] = 2218 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2219 .features[FEAT_7_0_EBX] = 2220 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2221 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2222 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2223 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2224 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX, 2225 /* Missing: XSAVES (not supported by some Linux versions, 2226 * including v4.1 to v4.12). 2227 * KVM doesn't yet expose any XSAVES state save component, 2228 * and the only one defined in Skylake (processor tracing) 2229 * probably will block migration anyway. 2230 */ 2231 .features[FEAT_XSAVE] = 2232 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2233 CPUID_XSAVE_XGETBV1, 2234 .features[FEAT_6_EAX] = 2235 CPUID_6_EAX_ARAT, 2236 .xlevel = 0x80000008, 2237 .model_id = "Intel Core Processor (Skylake)", 2238 }, 2239 { 2240 .name = "Skylake-Client-IBRS", 2241 .level = 0xd, 2242 .vendor = CPUID_VENDOR_INTEL, 2243 .family = 6, 2244 .model = 94, 2245 .stepping = 3, 2246 .features[FEAT_1_EDX] = 2247 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2248 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2249 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2250 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2251 CPUID_DE | CPUID_FP87, 2252 .features[FEAT_1_ECX] = 2253 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2254 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2255 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2256 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2257 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2258 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2259 .features[FEAT_8000_0001_EDX] = 2260 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2261 CPUID_EXT2_SYSCALL, 2262 .features[FEAT_8000_0001_ECX] = 2263 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2264 .features[FEAT_7_0_EDX] = 2265 CPUID_7_0_EDX_SPEC_CTRL, 2266 .features[FEAT_7_0_EBX] = 2267 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2268 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2269 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2270 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2271 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX, 2272 /* Missing: XSAVES (not supported by some Linux versions, 2273 * including v4.1 to v4.12). 2274 * KVM doesn't yet expose any XSAVES state save component, 2275 * and the only one defined in Skylake (processor tracing) 2276 * probably will block migration anyway. 2277 */ 2278 .features[FEAT_XSAVE] = 2279 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2280 CPUID_XSAVE_XGETBV1, 2281 .features[FEAT_6_EAX] = 2282 CPUID_6_EAX_ARAT, 2283 .xlevel = 0x80000008, 2284 .model_id = "Intel Core Processor (Skylake, IBRS)", 2285 }, 2286 { 2287 .name = "Skylake-Server", 2288 .level = 0xd, 2289 .vendor = CPUID_VENDOR_INTEL, 2290 .family = 6, 2291 .model = 85, 2292 .stepping = 4, 2293 .features[FEAT_1_EDX] = 2294 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2295 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2296 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2297 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2298 CPUID_DE | CPUID_FP87, 2299 .features[FEAT_1_ECX] = 2300 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2301 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2302 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2303 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2304 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2305 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2306 .features[FEAT_8000_0001_EDX] = 2307 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2308 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2309 .features[FEAT_8000_0001_ECX] = 2310 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2311 .features[FEAT_7_0_EBX] = 2312 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2313 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2314 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2315 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2316 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB | 2317 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2318 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2319 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2320 /* Missing: XSAVES (not supported by some Linux versions, 2321 * including v4.1 to v4.12). 2322 * KVM doesn't yet expose any XSAVES state save component, 2323 * and the only one defined in Skylake (processor tracing) 2324 * probably will block migration anyway. 2325 */ 2326 .features[FEAT_XSAVE] = 2327 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2328 CPUID_XSAVE_XGETBV1, 2329 .features[FEAT_6_EAX] = 2330 CPUID_6_EAX_ARAT, 2331 .xlevel = 0x80000008, 2332 .model_id = "Intel Xeon Processor (Skylake)", 2333 }, 2334 { 2335 .name = "Skylake-Server-IBRS", 2336 .level = 0xd, 2337 .vendor = CPUID_VENDOR_INTEL, 2338 .family = 6, 2339 .model = 85, 2340 .stepping = 4, 2341 .features[FEAT_1_EDX] = 2342 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2343 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2344 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2345 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2346 CPUID_DE | CPUID_FP87, 2347 .features[FEAT_1_ECX] = 2348 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2349 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2350 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2351 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2352 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2353 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2354 .features[FEAT_8000_0001_EDX] = 2355 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2356 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2357 .features[FEAT_8000_0001_ECX] = 2358 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2359 .features[FEAT_7_0_EDX] = 2360 CPUID_7_0_EDX_SPEC_CTRL, 2361 .features[FEAT_7_0_EBX] = 2362 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2363 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2364 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2365 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2366 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB | 2367 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2368 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2369 CPUID_7_0_EBX_AVX512VL, 2370 /* Missing: XSAVES (not supported by some Linux versions, 2371 * including v4.1 to v4.12). 2372 * KVM doesn't yet expose any XSAVES state save component, 2373 * and the only one defined in Skylake (processor tracing) 2374 * probably will block migration anyway. 2375 */ 2376 .features[FEAT_XSAVE] = 2377 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2378 CPUID_XSAVE_XGETBV1, 2379 .features[FEAT_6_EAX] = 2380 CPUID_6_EAX_ARAT, 2381 .xlevel = 0x80000008, 2382 .model_id = "Intel Xeon Processor (Skylake, IBRS)", 2383 }, 2384 { 2385 .name = "KnightsMill", 2386 .level = 0xd, 2387 .vendor = CPUID_VENDOR_INTEL, 2388 .family = 6, 2389 .model = 133, 2390 .stepping = 0, 2391 .features[FEAT_1_EDX] = 2392 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 2393 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 2394 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 2395 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 2396 CPUID_PSE | CPUID_DE | CPUID_FP87, 2397 .features[FEAT_1_ECX] = 2398 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2399 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2400 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2401 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2402 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2403 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2404 .features[FEAT_8000_0001_EDX] = 2405 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2406 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2407 .features[FEAT_8000_0001_ECX] = 2408 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2409 .features[FEAT_7_0_EBX] = 2410 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2411 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 2412 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 2413 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 2414 CPUID_7_0_EBX_AVX512ER, 2415 .features[FEAT_7_0_ECX] = 2416 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 2417 .features[FEAT_7_0_EDX] = 2418 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 2419 .features[FEAT_XSAVE] = 2420 CPUID_XSAVE_XSAVEOPT, 2421 .features[FEAT_6_EAX] = 2422 CPUID_6_EAX_ARAT, 2423 .xlevel = 0x80000008, 2424 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 2425 }, 2426 { 2427 .name = "Opteron_G1", 2428 .level = 5, 2429 .vendor = CPUID_VENDOR_AMD, 2430 .family = 15, 2431 .model = 6, 2432 .stepping = 1, 2433 .features[FEAT_1_EDX] = 2434 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2435 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2436 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2437 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2438 CPUID_DE | CPUID_FP87, 2439 .features[FEAT_1_ECX] = 2440 CPUID_EXT_SSE3, 2441 .features[FEAT_8000_0001_EDX] = 2442 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2443 .xlevel = 0x80000008, 2444 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 2445 }, 2446 { 2447 .name = "Opteron_G2", 2448 .level = 5, 2449 .vendor = CPUID_VENDOR_AMD, 2450 .family = 15, 2451 .model = 6, 2452 .stepping = 1, 2453 .features[FEAT_1_EDX] = 2454 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2455 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2456 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2457 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2458 CPUID_DE | CPUID_FP87, 2459 .features[FEAT_1_ECX] = 2460 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 2461 /* Missing: CPUID_EXT2_RDTSCP */ 2462 .features[FEAT_8000_0001_EDX] = 2463 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2464 .features[FEAT_8000_0001_ECX] = 2465 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2466 .xlevel = 0x80000008, 2467 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 2468 }, 2469 { 2470 .name = "Opteron_G3", 2471 .level = 5, 2472 .vendor = CPUID_VENDOR_AMD, 2473 .family = 16, 2474 .model = 2, 2475 .stepping = 3, 2476 .features[FEAT_1_EDX] = 2477 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2478 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2479 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2480 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2481 CPUID_DE | CPUID_FP87, 2482 .features[FEAT_1_ECX] = 2483 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 2484 CPUID_EXT_SSE3, 2485 /* Missing: CPUID_EXT2_RDTSCP */ 2486 .features[FEAT_8000_0001_EDX] = 2487 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2488 .features[FEAT_8000_0001_ECX] = 2489 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 2490 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2491 .xlevel = 0x80000008, 2492 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 2493 }, 2494 { 2495 .name = "Opteron_G4", 2496 .level = 0xd, 2497 .vendor = CPUID_VENDOR_AMD, 2498 .family = 21, 2499 .model = 1, 2500 .stepping = 2, 2501 .features[FEAT_1_EDX] = 2502 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2503 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2504 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2505 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2506 CPUID_DE | CPUID_FP87, 2507 .features[FEAT_1_ECX] = 2508 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2509 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2510 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2511 CPUID_EXT_SSE3, 2512 /* Missing: CPUID_EXT2_RDTSCP */ 2513 .features[FEAT_8000_0001_EDX] = 2514 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2515 CPUID_EXT2_SYSCALL, 2516 .features[FEAT_8000_0001_ECX] = 2517 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2518 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2519 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2520 CPUID_EXT3_LAHF_LM, 2521 /* no xsaveopt! */ 2522 .xlevel = 0x8000001A, 2523 .model_id = "AMD Opteron 62xx class CPU", 2524 }, 2525 { 2526 .name = "Opteron_G5", 2527 .level = 0xd, 2528 .vendor = CPUID_VENDOR_AMD, 2529 .family = 21, 2530 .model = 2, 2531 .stepping = 0, 2532 .features[FEAT_1_EDX] = 2533 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2534 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2535 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2536 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2537 CPUID_DE | CPUID_FP87, 2538 .features[FEAT_1_ECX] = 2539 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 2540 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2541 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 2542 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2543 /* Missing: CPUID_EXT2_RDTSCP */ 2544 .features[FEAT_8000_0001_EDX] = 2545 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2546 CPUID_EXT2_SYSCALL, 2547 .features[FEAT_8000_0001_ECX] = 2548 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2549 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2550 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2551 CPUID_EXT3_LAHF_LM, 2552 /* no xsaveopt! */ 2553 .xlevel = 0x8000001A, 2554 .model_id = "AMD Opteron 63xx class CPU", 2555 }, 2556 { 2557 .name = "EPYC", 2558 .level = 0xd, 2559 .vendor = CPUID_VENDOR_AMD, 2560 .family = 23, 2561 .model = 1, 2562 .stepping = 2, 2563 .features[FEAT_1_EDX] = 2564 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2565 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2566 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2567 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2568 CPUID_VME | CPUID_FP87, 2569 .features[FEAT_1_ECX] = 2570 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2571 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 2572 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2573 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2574 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2575 .features[FEAT_8000_0001_EDX] = 2576 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2577 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2578 CPUID_EXT2_SYSCALL, 2579 .features[FEAT_8000_0001_ECX] = 2580 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2581 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2582 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2583 CPUID_EXT3_TOPOEXT, 2584 .features[FEAT_7_0_EBX] = 2585 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2586 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2587 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2588 CPUID_7_0_EBX_SHA_NI, 2589 /* Missing: XSAVES (not supported by some Linux versions, 2590 * including v4.1 to v4.12). 2591 * KVM doesn't yet expose any XSAVES state save component. 2592 */ 2593 .features[FEAT_XSAVE] = 2594 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2595 CPUID_XSAVE_XGETBV1, 2596 .features[FEAT_6_EAX] = 2597 CPUID_6_EAX_ARAT, 2598 .xlevel = 0x8000001E, 2599 .model_id = "AMD EPYC Processor", 2600 .cache_info = &epyc_cache_info, 2601 }, 2602 { 2603 .name = "EPYC-IBPB", 2604 .level = 0xd, 2605 .vendor = CPUID_VENDOR_AMD, 2606 .family = 23, 2607 .model = 1, 2608 .stepping = 2, 2609 .features[FEAT_1_EDX] = 2610 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2611 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2612 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2613 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2614 CPUID_VME | CPUID_FP87, 2615 .features[FEAT_1_ECX] = 2616 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2617 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 2618 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2619 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2620 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2621 .features[FEAT_8000_0001_EDX] = 2622 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2623 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2624 CPUID_EXT2_SYSCALL, 2625 .features[FEAT_8000_0001_ECX] = 2626 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2627 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2628 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2629 CPUID_EXT3_TOPOEXT, 2630 .features[FEAT_8000_0008_EBX] = 2631 CPUID_8000_0008_EBX_IBPB, 2632 .features[FEAT_7_0_EBX] = 2633 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2634 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2635 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2636 CPUID_7_0_EBX_SHA_NI, 2637 /* Missing: XSAVES (not supported by some Linux versions, 2638 * including v4.1 to v4.12). 2639 * KVM doesn't yet expose any XSAVES state save component. 2640 */ 2641 .features[FEAT_XSAVE] = 2642 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2643 CPUID_XSAVE_XGETBV1, 2644 .features[FEAT_6_EAX] = 2645 CPUID_6_EAX_ARAT, 2646 .xlevel = 0x8000001E, 2647 .model_id = "AMD EPYC Processor (with IBPB)", 2648 .cache_info = &epyc_cache_info, 2649 }, 2650 }; 2651 2652 typedef struct PropValue { 2653 const char *prop, *value; 2654 } PropValue; 2655 2656 /* KVM-specific features that are automatically added/removed 2657 * from all CPU models when KVM is enabled. 2658 */ 2659 static PropValue kvm_default_props[] = { 2660 { "kvmclock", "on" }, 2661 { "kvm-nopiodelay", "on" }, 2662 { "kvm-asyncpf", "on" }, 2663 { "kvm-steal-time", "on" }, 2664 { "kvm-pv-eoi", "on" }, 2665 { "kvmclock-stable-bit", "on" }, 2666 { "x2apic", "on" }, 2667 { "acpi", "off" }, 2668 { "monitor", "off" }, 2669 { "svm", "off" }, 2670 { NULL, NULL }, 2671 }; 2672 2673 /* TCG-specific defaults that override all CPU models when using TCG 2674 */ 2675 static PropValue tcg_default_props[] = { 2676 { "vme", "off" }, 2677 { NULL, NULL }, 2678 }; 2679 2680 2681 void x86_cpu_change_kvm_default(const char *prop, const char *value) 2682 { 2683 PropValue *pv; 2684 for (pv = kvm_default_props; pv->prop; pv++) { 2685 if (!strcmp(pv->prop, prop)) { 2686 pv->value = value; 2687 break; 2688 } 2689 } 2690 2691 /* It is valid to call this function only for properties that 2692 * are already present in the kvm_default_props table. 2693 */ 2694 assert(pv->prop); 2695 } 2696 2697 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 2698 bool migratable_only); 2699 2700 static bool lmce_supported(void) 2701 { 2702 uint64_t mce_cap = 0; 2703 2704 #ifdef CONFIG_KVM 2705 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 2706 return false; 2707 } 2708 #endif 2709 2710 return !!(mce_cap & MCG_LMCE_P); 2711 } 2712 2713 #define CPUID_MODEL_ID_SZ 48 2714 2715 /** 2716 * cpu_x86_fill_model_id: 2717 * Get CPUID model ID string from host CPU. 2718 * 2719 * @str should have at least CPUID_MODEL_ID_SZ bytes 2720 * 2721 * The function does NOT add a null terminator to the string 2722 * automatically. 2723 */ 2724 static int cpu_x86_fill_model_id(char *str) 2725 { 2726 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 2727 int i; 2728 2729 for (i = 0; i < 3; i++) { 2730 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 2731 memcpy(str + i * 16 + 0, &eax, 4); 2732 memcpy(str + i * 16 + 4, &ebx, 4); 2733 memcpy(str + i * 16 + 8, &ecx, 4); 2734 memcpy(str + i * 16 + 12, &edx, 4); 2735 } 2736 return 0; 2737 } 2738 2739 static Property max_x86_cpu_properties[] = { 2740 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 2741 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 2742 DEFINE_PROP_END_OF_LIST() 2743 }; 2744 2745 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 2746 { 2747 DeviceClass *dc = DEVICE_CLASS(oc); 2748 X86CPUClass *xcc = X86_CPU_CLASS(oc); 2749 2750 xcc->ordering = 9; 2751 2752 xcc->model_description = 2753 "Enables all features supported by the accelerator in the current host"; 2754 2755 dc->props = max_x86_cpu_properties; 2756 } 2757 2758 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp); 2759 2760 static void max_x86_cpu_initfn(Object *obj) 2761 { 2762 X86CPU *cpu = X86_CPU(obj); 2763 CPUX86State *env = &cpu->env; 2764 KVMState *s = kvm_state; 2765 2766 /* We can't fill the features array here because we don't know yet if 2767 * "migratable" is true or false. 2768 */ 2769 cpu->max_features = true; 2770 2771 if (accel_uses_host_cpuid()) { 2772 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 2773 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 2774 int family, model, stepping; 2775 X86CPUDefinition host_cpudef = { }; 2776 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 2777 2778 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 2779 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx); 2780 2781 host_vendor_fms(vendor, &family, &model, &stepping); 2782 2783 cpu_x86_fill_model_id(model_id); 2784 2785 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort); 2786 object_property_set_int(OBJECT(cpu), family, "family", &error_abort); 2787 object_property_set_int(OBJECT(cpu), model, "model", &error_abort); 2788 object_property_set_int(OBJECT(cpu), stepping, "stepping", 2789 &error_abort); 2790 object_property_set_str(OBJECT(cpu), model_id, "model-id", 2791 &error_abort); 2792 2793 if (kvm_enabled()) { 2794 env->cpuid_min_level = 2795 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 2796 env->cpuid_min_xlevel = 2797 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 2798 env->cpuid_min_xlevel2 = 2799 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 2800 } else { 2801 env->cpuid_min_level = 2802 hvf_get_supported_cpuid(0x0, 0, R_EAX); 2803 env->cpuid_min_xlevel = 2804 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 2805 env->cpuid_min_xlevel2 = 2806 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 2807 } 2808 2809 if (lmce_supported()) { 2810 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 2811 } 2812 } else { 2813 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 2814 "vendor", &error_abort); 2815 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 2816 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 2817 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 2818 object_property_set_str(OBJECT(cpu), 2819 "QEMU TCG CPU version " QEMU_HW_VERSION, 2820 "model-id", &error_abort); 2821 } 2822 2823 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 2824 } 2825 2826 static const TypeInfo max_x86_cpu_type_info = { 2827 .name = X86_CPU_TYPE_NAME("max"), 2828 .parent = TYPE_X86_CPU, 2829 .instance_init = max_x86_cpu_initfn, 2830 .class_init = max_x86_cpu_class_init, 2831 }; 2832 2833 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 2834 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 2835 { 2836 X86CPUClass *xcc = X86_CPU_CLASS(oc); 2837 2838 xcc->host_cpuid_required = true; 2839 xcc->ordering = 8; 2840 2841 if (kvm_enabled()) { 2842 xcc->model_description = 2843 "KVM processor with all supported host features "; 2844 } else if (hvf_enabled()) { 2845 xcc->model_description = 2846 "HVF processor with all supported host features "; 2847 } 2848 } 2849 2850 static const TypeInfo host_x86_cpu_type_info = { 2851 .name = X86_CPU_TYPE_NAME("host"), 2852 .parent = X86_CPU_TYPE_NAME("max"), 2853 .class_init = host_x86_cpu_class_init, 2854 }; 2855 2856 #endif 2857 2858 static void report_unavailable_features(FeatureWord w, uint32_t mask) 2859 { 2860 FeatureWordInfo *f = &feature_word_info[w]; 2861 int i; 2862 2863 for (i = 0; i < 32; ++i) { 2864 if ((1UL << i) & mask) { 2865 const char *reg = get_register_name_32(f->cpuid_reg); 2866 assert(reg); 2867 warn_report("%s doesn't support requested feature: " 2868 "CPUID.%02XH:%s%s%s [bit %d]", 2869 accel_uses_host_cpuid() ? "host" : "TCG", 2870 f->cpuid_eax, reg, 2871 f->feat_names[i] ? "." : "", 2872 f->feat_names[i] ? f->feat_names[i] : "", i); 2873 } 2874 } 2875 } 2876 2877 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 2878 const char *name, void *opaque, 2879 Error **errp) 2880 { 2881 X86CPU *cpu = X86_CPU(obj); 2882 CPUX86State *env = &cpu->env; 2883 int64_t value; 2884 2885 value = (env->cpuid_version >> 8) & 0xf; 2886 if (value == 0xf) { 2887 value += (env->cpuid_version >> 20) & 0xff; 2888 } 2889 visit_type_int(v, name, &value, errp); 2890 } 2891 2892 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 2893 const char *name, void *opaque, 2894 Error **errp) 2895 { 2896 X86CPU *cpu = X86_CPU(obj); 2897 CPUX86State *env = &cpu->env; 2898 const int64_t min = 0; 2899 const int64_t max = 0xff + 0xf; 2900 Error *local_err = NULL; 2901 int64_t value; 2902 2903 visit_type_int(v, name, &value, &local_err); 2904 if (local_err) { 2905 error_propagate(errp, local_err); 2906 return; 2907 } 2908 if (value < min || value > max) { 2909 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 2910 name ? name : "null", value, min, max); 2911 return; 2912 } 2913 2914 env->cpuid_version &= ~0xff00f00; 2915 if (value > 0x0f) { 2916 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 2917 } else { 2918 env->cpuid_version |= value << 8; 2919 } 2920 } 2921 2922 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 2923 const char *name, void *opaque, 2924 Error **errp) 2925 { 2926 X86CPU *cpu = X86_CPU(obj); 2927 CPUX86State *env = &cpu->env; 2928 int64_t value; 2929 2930 value = (env->cpuid_version >> 4) & 0xf; 2931 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 2932 visit_type_int(v, name, &value, errp); 2933 } 2934 2935 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 2936 const char *name, void *opaque, 2937 Error **errp) 2938 { 2939 X86CPU *cpu = X86_CPU(obj); 2940 CPUX86State *env = &cpu->env; 2941 const int64_t min = 0; 2942 const int64_t max = 0xff; 2943 Error *local_err = NULL; 2944 int64_t value; 2945 2946 visit_type_int(v, name, &value, &local_err); 2947 if (local_err) { 2948 error_propagate(errp, local_err); 2949 return; 2950 } 2951 if (value < min || value > max) { 2952 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 2953 name ? name : "null", value, min, max); 2954 return; 2955 } 2956 2957 env->cpuid_version &= ~0xf00f0; 2958 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 2959 } 2960 2961 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 2962 const char *name, void *opaque, 2963 Error **errp) 2964 { 2965 X86CPU *cpu = X86_CPU(obj); 2966 CPUX86State *env = &cpu->env; 2967 int64_t value; 2968 2969 value = env->cpuid_version & 0xf; 2970 visit_type_int(v, name, &value, errp); 2971 } 2972 2973 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 2974 const char *name, void *opaque, 2975 Error **errp) 2976 { 2977 X86CPU *cpu = X86_CPU(obj); 2978 CPUX86State *env = &cpu->env; 2979 const int64_t min = 0; 2980 const int64_t max = 0xf; 2981 Error *local_err = NULL; 2982 int64_t value; 2983 2984 visit_type_int(v, name, &value, &local_err); 2985 if (local_err) { 2986 error_propagate(errp, local_err); 2987 return; 2988 } 2989 if (value < min || value > max) { 2990 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 2991 name ? name : "null", value, min, max); 2992 return; 2993 } 2994 2995 env->cpuid_version &= ~0xf; 2996 env->cpuid_version |= value & 0xf; 2997 } 2998 2999 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 3000 { 3001 X86CPU *cpu = X86_CPU(obj); 3002 CPUX86State *env = &cpu->env; 3003 char *value; 3004 3005 value = g_malloc(CPUID_VENDOR_SZ + 1); 3006 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 3007 env->cpuid_vendor3); 3008 return value; 3009 } 3010 3011 static void x86_cpuid_set_vendor(Object *obj, const char *value, 3012 Error **errp) 3013 { 3014 X86CPU *cpu = X86_CPU(obj); 3015 CPUX86State *env = &cpu->env; 3016 int i; 3017 3018 if (strlen(value) != CPUID_VENDOR_SZ) { 3019 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 3020 return; 3021 } 3022 3023 env->cpuid_vendor1 = 0; 3024 env->cpuid_vendor2 = 0; 3025 env->cpuid_vendor3 = 0; 3026 for (i = 0; i < 4; i++) { 3027 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 3028 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 3029 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 3030 } 3031 } 3032 3033 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 3034 { 3035 X86CPU *cpu = X86_CPU(obj); 3036 CPUX86State *env = &cpu->env; 3037 char *value; 3038 int i; 3039 3040 value = g_malloc(48 + 1); 3041 for (i = 0; i < 48; i++) { 3042 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 3043 } 3044 value[48] = '\0'; 3045 return value; 3046 } 3047 3048 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 3049 Error **errp) 3050 { 3051 X86CPU *cpu = X86_CPU(obj); 3052 CPUX86State *env = &cpu->env; 3053 int c, len, i; 3054 3055 if (model_id == NULL) { 3056 model_id = ""; 3057 } 3058 len = strlen(model_id); 3059 memset(env->cpuid_model, 0, 48); 3060 for (i = 0; i < 48; i++) { 3061 if (i >= len) { 3062 c = '\0'; 3063 } else { 3064 c = (uint8_t)model_id[i]; 3065 } 3066 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 3067 } 3068 } 3069 3070 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 3071 void *opaque, Error **errp) 3072 { 3073 X86CPU *cpu = X86_CPU(obj); 3074 int64_t value; 3075 3076 value = cpu->env.tsc_khz * 1000; 3077 visit_type_int(v, name, &value, errp); 3078 } 3079 3080 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 3081 void *opaque, Error **errp) 3082 { 3083 X86CPU *cpu = X86_CPU(obj); 3084 const int64_t min = 0; 3085 const int64_t max = INT64_MAX; 3086 Error *local_err = NULL; 3087 int64_t value; 3088 3089 visit_type_int(v, name, &value, &local_err); 3090 if (local_err) { 3091 error_propagate(errp, local_err); 3092 return; 3093 } 3094 if (value < min || value > max) { 3095 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3096 name ? name : "null", value, min, max); 3097 return; 3098 } 3099 3100 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 3101 } 3102 3103 /* Generic getter for "feature-words" and "filtered-features" properties */ 3104 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 3105 const char *name, void *opaque, 3106 Error **errp) 3107 { 3108 uint32_t *array = (uint32_t *)opaque; 3109 FeatureWord w; 3110 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 3111 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 3112 X86CPUFeatureWordInfoList *list = NULL; 3113 3114 for (w = 0; w < FEATURE_WORDS; w++) { 3115 FeatureWordInfo *wi = &feature_word_info[w]; 3116 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 3117 qwi->cpuid_input_eax = wi->cpuid_eax; 3118 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx; 3119 qwi->cpuid_input_ecx = wi->cpuid_ecx; 3120 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum; 3121 qwi->features = array[w]; 3122 3123 /* List will be in reverse order, but order shouldn't matter */ 3124 list_entries[w].next = list; 3125 list_entries[w].value = &word_infos[w]; 3126 list = &list_entries[w]; 3127 } 3128 3129 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 3130 } 3131 3132 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name, 3133 void *opaque, Error **errp) 3134 { 3135 X86CPU *cpu = X86_CPU(obj); 3136 int64_t value = cpu->hyperv_spinlock_attempts; 3137 3138 visit_type_int(v, name, &value, errp); 3139 } 3140 3141 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name, 3142 void *opaque, Error **errp) 3143 { 3144 const int64_t min = 0xFFF; 3145 const int64_t max = UINT_MAX; 3146 X86CPU *cpu = X86_CPU(obj); 3147 Error *err = NULL; 3148 int64_t value; 3149 3150 visit_type_int(v, name, &value, &err); 3151 if (err) { 3152 error_propagate(errp, err); 3153 return; 3154 } 3155 3156 if (value < min || value > max) { 3157 error_setg(errp, "Property %s.%s doesn't take value %" PRId64 3158 " (minimum: %" PRId64 ", maximum: %" PRId64 ")", 3159 object_get_typename(obj), name ? name : "null", 3160 value, min, max); 3161 return; 3162 } 3163 cpu->hyperv_spinlock_attempts = value; 3164 } 3165 3166 static const PropertyInfo qdev_prop_spinlocks = { 3167 .name = "int", 3168 .get = x86_get_hv_spinlocks, 3169 .set = x86_set_hv_spinlocks, 3170 }; 3171 3172 /* Convert all '_' in a feature string option name to '-', to make feature 3173 * name conform to QOM property naming rule, which uses '-' instead of '_'. 3174 */ 3175 static inline void feat2prop(char *s) 3176 { 3177 while ((s = strchr(s, '_'))) { 3178 *s = '-'; 3179 } 3180 } 3181 3182 /* Return the feature property name for a feature flag bit */ 3183 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 3184 { 3185 /* XSAVE components are automatically enabled by other features, 3186 * so return the original feature name instead 3187 */ 3188 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 3189 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 3190 3191 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 3192 x86_ext_save_areas[comp].bits) { 3193 w = x86_ext_save_areas[comp].feature; 3194 bitnr = ctz32(x86_ext_save_areas[comp].bits); 3195 } 3196 } 3197 3198 assert(bitnr < 32); 3199 assert(w < FEATURE_WORDS); 3200 return feature_word_info[w].feat_names[bitnr]; 3201 } 3202 3203 /* Compatibily hack to maintain legacy +-feat semantic, 3204 * where +-feat overwrites any feature set by 3205 * feat=on|feat even if the later is parsed after +-feat 3206 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 3207 */ 3208 static GList *plus_features, *minus_features; 3209 3210 static gint compare_string(gconstpointer a, gconstpointer b) 3211 { 3212 return g_strcmp0(a, b); 3213 } 3214 3215 /* Parse "+feature,-feature,feature=foo" CPU feature string 3216 */ 3217 static void x86_cpu_parse_featurestr(const char *typename, char *features, 3218 Error **errp) 3219 { 3220 char *featurestr; /* Single 'key=value" string being parsed */ 3221 static bool cpu_globals_initialized; 3222 bool ambiguous = false; 3223 3224 if (cpu_globals_initialized) { 3225 return; 3226 } 3227 cpu_globals_initialized = true; 3228 3229 if (!features) { 3230 return; 3231 } 3232 3233 for (featurestr = strtok(features, ","); 3234 featurestr; 3235 featurestr = strtok(NULL, ",")) { 3236 const char *name; 3237 const char *val = NULL; 3238 char *eq = NULL; 3239 char num[32]; 3240 GlobalProperty *prop; 3241 3242 /* Compatibility syntax: */ 3243 if (featurestr[0] == '+') { 3244 plus_features = g_list_append(plus_features, 3245 g_strdup(featurestr + 1)); 3246 continue; 3247 } else if (featurestr[0] == '-') { 3248 minus_features = g_list_append(minus_features, 3249 g_strdup(featurestr + 1)); 3250 continue; 3251 } 3252 3253 eq = strchr(featurestr, '='); 3254 if (eq) { 3255 *eq++ = 0; 3256 val = eq; 3257 } else { 3258 val = "on"; 3259 } 3260 3261 feat2prop(featurestr); 3262 name = featurestr; 3263 3264 if (g_list_find_custom(plus_features, name, compare_string)) { 3265 warn_report("Ambiguous CPU model string. " 3266 "Don't mix both \"+%s\" and \"%s=%s\"", 3267 name, name, val); 3268 ambiguous = true; 3269 } 3270 if (g_list_find_custom(minus_features, name, compare_string)) { 3271 warn_report("Ambiguous CPU model string. " 3272 "Don't mix both \"-%s\" and \"%s=%s\"", 3273 name, name, val); 3274 ambiguous = true; 3275 } 3276 3277 /* Special case: */ 3278 if (!strcmp(name, "tsc-freq")) { 3279 int ret; 3280 uint64_t tsc_freq; 3281 3282 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 3283 if (ret < 0 || tsc_freq > INT64_MAX) { 3284 error_setg(errp, "bad numerical value %s", val); 3285 return; 3286 } 3287 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 3288 val = num; 3289 name = "tsc-frequency"; 3290 } 3291 3292 prop = g_new0(typeof(*prop), 1); 3293 prop->driver = typename; 3294 prop->property = g_strdup(name); 3295 prop->value = g_strdup(val); 3296 prop->errp = &error_fatal; 3297 qdev_prop_register_global(prop); 3298 } 3299 3300 if (ambiguous) { 3301 warn_report("Compatibility of ambiguous CPU model " 3302 "strings won't be kept on future QEMU versions"); 3303 } 3304 } 3305 3306 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 3307 static int x86_cpu_filter_features(X86CPU *cpu); 3308 3309 /* Check for missing features that may prevent the CPU class from 3310 * running using the current machine and accelerator. 3311 */ 3312 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 3313 strList **missing_feats) 3314 { 3315 X86CPU *xc; 3316 FeatureWord w; 3317 Error *err = NULL; 3318 strList **next = missing_feats; 3319 3320 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 3321 strList *new = g_new0(strList, 1); 3322 new->value = g_strdup("kvm"); 3323 *missing_feats = new; 3324 return; 3325 } 3326 3327 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 3328 3329 x86_cpu_expand_features(xc, &err); 3330 if (err) { 3331 /* Errors at x86_cpu_expand_features should never happen, 3332 * but in case it does, just report the model as not 3333 * runnable at all using the "type" property. 3334 */ 3335 strList *new = g_new0(strList, 1); 3336 new->value = g_strdup("type"); 3337 *next = new; 3338 next = &new->next; 3339 } 3340 3341 x86_cpu_filter_features(xc); 3342 3343 for (w = 0; w < FEATURE_WORDS; w++) { 3344 uint32_t filtered = xc->filtered_features[w]; 3345 int i; 3346 for (i = 0; i < 32; i++) { 3347 if (filtered & (1UL << i)) { 3348 strList *new = g_new0(strList, 1); 3349 new->value = g_strdup(x86_cpu_feature_name(w, i)); 3350 *next = new; 3351 next = &new->next; 3352 } 3353 } 3354 } 3355 3356 object_unref(OBJECT(xc)); 3357 } 3358 3359 /* Print all cpuid feature names in featureset 3360 */ 3361 static void listflags(FILE *f, fprintf_function print, GList *features) 3362 { 3363 size_t len = 0; 3364 GList *tmp; 3365 3366 for (tmp = features; tmp; tmp = tmp->next) { 3367 const char *name = tmp->data; 3368 if ((len + strlen(name) + 1) >= 75) { 3369 print(f, "\n"); 3370 len = 0; 3371 } 3372 print(f, "%s%s", len == 0 ? " " : " ", name); 3373 len += strlen(name) + 1; 3374 } 3375 print(f, "\n"); 3376 } 3377 3378 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 3379 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 3380 { 3381 ObjectClass *class_a = (ObjectClass *)a; 3382 ObjectClass *class_b = (ObjectClass *)b; 3383 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 3384 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 3385 char *name_a, *name_b; 3386 int ret; 3387 3388 if (cc_a->ordering != cc_b->ordering) { 3389 ret = cc_a->ordering - cc_b->ordering; 3390 } else { 3391 name_a = x86_cpu_class_get_model_name(cc_a); 3392 name_b = x86_cpu_class_get_model_name(cc_b); 3393 ret = strcmp(name_a, name_b); 3394 g_free(name_a); 3395 g_free(name_b); 3396 } 3397 return ret; 3398 } 3399 3400 static GSList *get_sorted_cpu_model_list(void) 3401 { 3402 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 3403 list = g_slist_sort(list, x86_cpu_list_compare); 3404 return list; 3405 } 3406 3407 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 3408 { 3409 ObjectClass *oc = data; 3410 X86CPUClass *cc = X86_CPU_CLASS(oc); 3411 CPUListState *s = user_data; 3412 char *name = x86_cpu_class_get_model_name(cc); 3413 const char *desc = cc->model_description; 3414 if (!desc && cc->cpu_def) { 3415 desc = cc->cpu_def->model_id; 3416 } 3417 3418 (*s->cpu_fprintf)(s->file, "x86 %-20s %-48s\n", 3419 name, desc); 3420 g_free(name); 3421 } 3422 3423 /* list available CPU models and flags */ 3424 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf) 3425 { 3426 int i, j; 3427 CPUListState s = { 3428 .file = f, 3429 .cpu_fprintf = cpu_fprintf, 3430 }; 3431 GSList *list; 3432 GList *names = NULL; 3433 3434 (*cpu_fprintf)(f, "Available CPUs:\n"); 3435 list = get_sorted_cpu_model_list(); 3436 g_slist_foreach(list, x86_cpu_list_entry, &s); 3437 g_slist_free(list); 3438 3439 names = NULL; 3440 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 3441 FeatureWordInfo *fw = &feature_word_info[i]; 3442 for (j = 0; j < 32; j++) { 3443 if (fw->feat_names[j]) { 3444 names = g_list_append(names, (gpointer)fw->feat_names[j]); 3445 } 3446 } 3447 } 3448 3449 names = g_list_sort(names, (GCompareFunc)strcmp); 3450 3451 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n"); 3452 listflags(f, cpu_fprintf, names); 3453 (*cpu_fprintf)(f, "\n"); 3454 g_list_free(names); 3455 } 3456 3457 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 3458 { 3459 ObjectClass *oc = data; 3460 X86CPUClass *cc = X86_CPU_CLASS(oc); 3461 CpuDefinitionInfoList **cpu_list = user_data; 3462 CpuDefinitionInfoList *entry; 3463 CpuDefinitionInfo *info; 3464 3465 info = g_malloc0(sizeof(*info)); 3466 info->name = x86_cpu_class_get_model_name(cc); 3467 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 3468 info->has_unavailable_features = true; 3469 info->q_typename = g_strdup(object_class_get_name(oc)); 3470 info->migration_safe = cc->migration_safe; 3471 info->has_migration_safe = true; 3472 info->q_static = cc->static_model; 3473 3474 entry = g_malloc0(sizeof(*entry)); 3475 entry->value = info; 3476 entry->next = *cpu_list; 3477 *cpu_list = entry; 3478 } 3479 3480 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp) 3481 { 3482 CpuDefinitionInfoList *cpu_list = NULL; 3483 GSList *list = get_sorted_cpu_model_list(); 3484 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 3485 g_slist_free(list); 3486 return cpu_list; 3487 } 3488 3489 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 3490 bool migratable_only) 3491 { 3492 FeatureWordInfo *wi = &feature_word_info[w]; 3493 uint32_t r; 3494 3495 if (kvm_enabled()) { 3496 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax, 3497 wi->cpuid_ecx, 3498 wi->cpuid_reg); 3499 } else if (hvf_enabled()) { 3500 r = hvf_get_supported_cpuid(wi->cpuid_eax, 3501 wi->cpuid_ecx, 3502 wi->cpuid_reg); 3503 } else if (tcg_enabled()) { 3504 r = wi->tcg_features; 3505 } else { 3506 return ~0; 3507 } 3508 if (migratable_only) { 3509 r &= x86_cpu_get_migratable_flags(w); 3510 } 3511 return r; 3512 } 3513 3514 static void x86_cpu_report_filtered_features(X86CPU *cpu) 3515 { 3516 FeatureWord w; 3517 3518 for (w = 0; w < FEATURE_WORDS; w++) { 3519 report_unavailable_features(w, cpu->filtered_features[w]); 3520 } 3521 } 3522 3523 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 3524 { 3525 PropValue *pv; 3526 for (pv = props; pv->prop; pv++) { 3527 if (!pv->value) { 3528 continue; 3529 } 3530 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 3531 &error_abort); 3532 } 3533 } 3534 3535 /* Load data from X86CPUDefinition into a X86CPU object 3536 */ 3537 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp) 3538 { 3539 CPUX86State *env = &cpu->env; 3540 const char *vendor; 3541 char host_vendor[CPUID_VENDOR_SZ + 1]; 3542 FeatureWord w; 3543 3544 /*NOTE: any property set by this function should be returned by 3545 * x86_cpu_static_props(), so static expansion of 3546 * query-cpu-model-expansion is always complete. 3547 */ 3548 3549 /* CPU models only set _minimum_ values for level/xlevel: */ 3550 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp); 3551 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp); 3552 3553 object_property_set_int(OBJECT(cpu), def->family, "family", errp); 3554 object_property_set_int(OBJECT(cpu), def->model, "model", errp); 3555 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp); 3556 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); 3557 for (w = 0; w < FEATURE_WORDS; w++) { 3558 env->features[w] = def->features[w]; 3559 } 3560 3561 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 3562 cpu->legacy_cache = !def->cache_info; 3563 3564 /* Special cases not set in the X86CPUDefinition structs: */ 3565 /* TODO: in-kernel irqchip for hvf */ 3566 if (kvm_enabled()) { 3567 if (!kvm_irqchip_in_kernel()) { 3568 x86_cpu_change_kvm_default("x2apic", "off"); 3569 } 3570 3571 x86_cpu_apply_props(cpu, kvm_default_props); 3572 } else if (tcg_enabled()) { 3573 x86_cpu_apply_props(cpu, tcg_default_props); 3574 } 3575 3576 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 3577 3578 /* sysenter isn't supported in compatibility mode on AMD, 3579 * syscall isn't supported in compatibility mode on Intel. 3580 * Normally we advertise the actual CPU vendor, but you can 3581 * override this using the 'vendor' property if you want to use 3582 * KVM's sysenter/syscall emulation in compatibility mode and 3583 * when doing cross vendor migration 3584 */ 3585 vendor = def->vendor; 3586 if (accel_uses_host_cpuid()) { 3587 uint32_t ebx = 0, ecx = 0, edx = 0; 3588 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 3589 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 3590 vendor = host_vendor; 3591 } 3592 3593 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp); 3594 3595 } 3596 3597 /* Return a QDict containing keys for all properties that can be included 3598 * in static expansion of CPU models. All properties set by x86_cpu_load_def() 3599 * must be included in the dictionary. 3600 */ 3601 static QDict *x86_cpu_static_props(void) 3602 { 3603 FeatureWord w; 3604 int i; 3605 static const char *props[] = { 3606 "min-level", 3607 "min-xlevel", 3608 "family", 3609 "model", 3610 "stepping", 3611 "model-id", 3612 "vendor", 3613 "lmce", 3614 NULL, 3615 }; 3616 static QDict *d; 3617 3618 if (d) { 3619 return d; 3620 } 3621 3622 d = qdict_new(); 3623 for (i = 0; props[i]; i++) { 3624 qdict_put_null(d, props[i]); 3625 } 3626 3627 for (w = 0; w < FEATURE_WORDS; w++) { 3628 FeatureWordInfo *fi = &feature_word_info[w]; 3629 int bit; 3630 for (bit = 0; bit < 32; bit++) { 3631 if (!fi->feat_names[bit]) { 3632 continue; 3633 } 3634 qdict_put_null(d, fi->feat_names[bit]); 3635 } 3636 } 3637 3638 return d; 3639 } 3640 3641 /* Add an entry to @props dict, with the value for property. */ 3642 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 3643 { 3644 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 3645 &error_abort); 3646 3647 qdict_put_obj(props, prop, value); 3648 } 3649 3650 /* Convert CPU model data from X86CPU object to a property dictionary 3651 * that can recreate exactly the same CPU model. 3652 */ 3653 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 3654 { 3655 QDict *sprops = x86_cpu_static_props(); 3656 const QDictEntry *e; 3657 3658 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 3659 const char *prop = qdict_entry_key(e); 3660 x86_cpu_expand_prop(cpu, props, prop); 3661 } 3662 } 3663 3664 /* Convert CPU model data from X86CPU object to a property dictionary 3665 * that can recreate exactly the same CPU model, including every 3666 * writeable QOM property. 3667 */ 3668 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 3669 { 3670 ObjectPropertyIterator iter; 3671 ObjectProperty *prop; 3672 3673 object_property_iter_init(&iter, OBJECT(cpu)); 3674 while ((prop = object_property_iter_next(&iter))) { 3675 /* skip read-only or write-only properties */ 3676 if (!prop->get || !prop->set) { 3677 continue; 3678 } 3679 3680 /* "hotplugged" is the only property that is configurable 3681 * on the command-line but will be set differently on CPUs 3682 * created using "-cpu ... -smp ..." and by CPUs created 3683 * on the fly by x86_cpu_from_model() for querying. Skip it. 3684 */ 3685 if (!strcmp(prop->name, "hotplugged")) { 3686 continue; 3687 } 3688 x86_cpu_expand_prop(cpu, props, prop->name); 3689 } 3690 } 3691 3692 static void object_apply_props(Object *obj, QDict *props, Error **errp) 3693 { 3694 const QDictEntry *prop; 3695 Error *err = NULL; 3696 3697 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 3698 object_property_set_qobject(obj, qdict_entry_value(prop), 3699 qdict_entry_key(prop), &err); 3700 if (err) { 3701 break; 3702 } 3703 } 3704 3705 error_propagate(errp, err); 3706 } 3707 3708 /* Create X86CPU object according to model+props specification */ 3709 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 3710 { 3711 X86CPU *xc = NULL; 3712 X86CPUClass *xcc; 3713 Error *err = NULL; 3714 3715 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 3716 if (xcc == NULL) { 3717 error_setg(&err, "CPU model '%s' not found", model); 3718 goto out; 3719 } 3720 3721 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 3722 if (props) { 3723 object_apply_props(OBJECT(xc), props, &err); 3724 if (err) { 3725 goto out; 3726 } 3727 } 3728 3729 x86_cpu_expand_features(xc, &err); 3730 if (err) { 3731 goto out; 3732 } 3733 3734 out: 3735 if (err) { 3736 error_propagate(errp, err); 3737 object_unref(OBJECT(xc)); 3738 xc = NULL; 3739 } 3740 return xc; 3741 } 3742 3743 CpuModelExpansionInfo * 3744 arch_query_cpu_model_expansion(CpuModelExpansionType type, 3745 CpuModelInfo *model, 3746 Error **errp) 3747 { 3748 X86CPU *xc = NULL; 3749 Error *err = NULL; 3750 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 3751 QDict *props = NULL; 3752 const char *base_name; 3753 3754 xc = x86_cpu_from_model(model->name, 3755 model->has_props ? 3756 qobject_to(QDict, model->props) : 3757 NULL, &err); 3758 if (err) { 3759 goto out; 3760 } 3761 3762 props = qdict_new(); 3763 3764 switch (type) { 3765 case CPU_MODEL_EXPANSION_TYPE_STATIC: 3766 /* Static expansion will be based on "base" only */ 3767 base_name = "base"; 3768 x86_cpu_to_dict(xc, props); 3769 break; 3770 case CPU_MODEL_EXPANSION_TYPE_FULL: 3771 /* As we don't return every single property, full expansion needs 3772 * to keep the original model name+props, and add extra 3773 * properties on top of that. 3774 */ 3775 base_name = model->name; 3776 x86_cpu_to_dict_full(xc, props); 3777 break; 3778 default: 3779 error_setg(&err, "Unsupportted expansion type"); 3780 goto out; 3781 } 3782 3783 if (!props) { 3784 props = qdict_new(); 3785 } 3786 x86_cpu_to_dict(xc, props); 3787 3788 ret->model = g_new0(CpuModelInfo, 1); 3789 ret->model->name = g_strdup(base_name); 3790 ret->model->props = QOBJECT(props); 3791 ret->model->has_props = true; 3792 3793 out: 3794 object_unref(OBJECT(xc)); 3795 if (err) { 3796 error_propagate(errp, err); 3797 qapi_free_CpuModelExpansionInfo(ret); 3798 ret = NULL; 3799 } 3800 return ret; 3801 } 3802 3803 static gchar *x86_gdb_arch_name(CPUState *cs) 3804 { 3805 #ifdef TARGET_X86_64 3806 return g_strdup("i386:x86-64"); 3807 #else 3808 return g_strdup("i386"); 3809 #endif 3810 } 3811 3812 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 3813 { 3814 X86CPUDefinition *cpudef = data; 3815 X86CPUClass *xcc = X86_CPU_CLASS(oc); 3816 3817 xcc->cpu_def = cpudef; 3818 xcc->migration_safe = true; 3819 } 3820 3821 static void x86_register_cpudef_type(X86CPUDefinition *def) 3822 { 3823 char *typename = x86_cpu_type_name(def->name); 3824 TypeInfo ti = { 3825 .name = typename, 3826 .parent = TYPE_X86_CPU, 3827 .class_init = x86_cpu_cpudef_class_init, 3828 .class_data = def, 3829 }; 3830 3831 /* AMD aliases are handled at runtime based on CPUID vendor, so 3832 * they shouldn't be set on the CPU model table. 3833 */ 3834 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 3835 /* catch mistakes instead of silently truncating model_id when too long */ 3836 assert(def->model_id && strlen(def->model_id) <= 48); 3837 3838 3839 type_register(&ti); 3840 g_free(typename); 3841 } 3842 3843 #if !defined(CONFIG_USER_ONLY) 3844 3845 void cpu_clear_apic_feature(CPUX86State *env) 3846 { 3847 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 3848 } 3849 3850 #endif /* !CONFIG_USER_ONLY */ 3851 3852 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 3853 uint32_t *eax, uint32_t *ebx, 3854 uint32_t *ecx, uint32_t *edx) 3855 { 3856 X86CPU *cpu = x86_env_get_cpu(env); 3857 CPUState *cs = CPU(cpu); 3858 uint32_t pkg_offset; 3859 uint32_t limit; 3860 uint32_t signature[3]; 3861 3862 /* Calculate & apply limits for different index ranges */ 3863 if (index >= 0xC0000000) { 3864 limit = env->cpuid_xlevel2; 3865 } else if (index >= 0x80000000) { 3866 limit = env->cpuid_xlevel; 3867 } else if (index >= 0x40000000) { 3868 limit = 0x40000001; 3869 } else { 3870 limit = env->cpuid_level; 3871 } 3872 3873 if (index > limit) { 3874 /* Intel documentation states that invalid EAX input will 3875 * return the same information as EAX=cpuid_level 3876 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 3877 */ 3878 index = env->cpuid_level; 3879 } 3880 3881 switch(index) { 3882 case 0: 3883 *eax = env->cpuid_level; 3884 *ebx = env->cpuid_vendor1; 3885 *edx = env->cpuid_vendor2; 3886 *ecx = env->cpuid_vendor3; 3887 break; 3888 case 1: 3889 *eax = env->cpuid_version; 3890 *ebx = (cpu->apic_id << 24) | 3891 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 3892 *ecx = env->features[FEAT_1_ECX]; 3893 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 3894 *ecx |= CPUID_EXT_OSXSAVE; 3895 } 3896 *edx = env->features[FEAT_1_EDX]; 3897 if (cs->nr_cores * cs->nr_threads > 1) { 3898 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 3899 *edx |= CPUID_HT; 3900 } 3901 break; 3902 case 2: 3903 /* cache info: needed for Pentium Pro compatibility */ 3904 if (cpu->cache_info_passthrough) { 3905 host_cpuid(index, 0, eax, ebx, ecx, edx); 3906 break; 3907 } 3908 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 3909 *ebx = 0; 3910 if (!cpu->enable_l3_cache) { 3911 *ecx = 0; 3912 } else { 3913 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 3914 } 3915 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 3916 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 3917 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 3918 break; 3919 case 4: 3920 /* cache info: needed for Core compatibility */ 3921 if (cpu->cache_info_passthrough) { 3922 host_cpuid(index, count, eax, ebx, ecx, edx); 3923 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 3924 *eax &= ~0xFC000000; 3925 if ((*eax & 31) && cs->nr_cores > 1) { 3926 *eax |= (cs->nr_cores - 1) << 26; 3927 } 3928 } else { 3929 *eax = 0; 3930 switch (count) { 3931 case 0: /* L1 dcache info */ 3932 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 3933 1, cs->nr_cores, 3934 eax, ebx, ecx, edx); 3935 break; 3936 case 1: /* L1 icache info */ 3937 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 3938 1, cs->nr_cores, 3939 eax, ebx, ecx, edx); 3940 break; 3941 case 2: /* L2 cache info */ 3942 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 3943 cs->nr_threads, cs->nr_cores, 3944 eax, ebx, ecx, edx); 3945 break; 3946 case 3: /* L3 cache info */ 3947 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 3948 if (cpu->enable_l3_cache) { 3949 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 3950 (1 << pkg_offset), cs->nr_cores, 3951 eax, ebx, ecx, edx); 3952 break; 3953 } 3954 /* fall through */ 3955 default: /* end of info */ 3956 *eax = *ebx = *ecx = *edx = 0; 3957 break; 3958 } 3959 } 3960 break; 3961 case 5: 3962 /* mwait info: needed for Core compatibility */ 3963 *eax = 0; /* Smallest monitor-line size in bytes */ 3964 *ebx = 0; /* Largest monitor-line size in bytes */ 3965 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 3966 *edx = 0; 3967 break; 3968 case 6: 3969 /* Thermal and Power Leaf */ 3970 *eax = env->features[FEAT_6_EAX]; 3971 *ebx = 0; 3972 *ecx = 0; 3973 *edx = 0; 3974 break; 3975 case 7: 3976 /* Structured Extended Feature Flags Enumeration Leaf */ 3977 if (count == 0) { 3978 *eax = 0; /* Maximum ECX value for sub-leaves */ 3979 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 3980 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 3981 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 3982 *ecx |= CPUID_7_0_ECX_OSPKE; 3983 } 3984 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 3985 } else { 3986 *eax = 0; 3987 *ebx = 0; 3988 *ecx = 0; 3989 *edx = 0; 3990 } 3991 break; 3992 case 9: 3993 /* Direct Cache Access Information Leaf */ 3994 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 3995 *ebx = 0; 3996 *ecx = 0; 3997 *edx = 0; 3998 break; 3999 case 0xA: 4000 /* Architectural Performance Monitoring Leaf */ 4001 if (kvm_enabled() && cpu->enable_pmu) { 4002 KVMState *s = cs->kvm_state; 4003 4004 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 4005 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 4006 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 4007 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 4008 } else if (hvf_enabled() && cpu->enable_pmu) { 4009 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 4010 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 4011 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 4012 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 4013 } else { 4014 *eax = 0; 4015 *ebx = 0; 4016 *ecx = 0; 4017 *edx = 0; 4018 } 4019 break; 4020 case 0xB: 4021 /* Extended Topology Enumeration Leaf */ 4022 if (!cpu->enable_cpuid_0xb) { 4023 *eax = *ebx = *ecx = *edx = 0; 4024 break; 4025 } 4026 4027 *ecx = count & 0xff; 4028 *edx = cpu->apic_id; 4029 4030 switch (count) { 4031 case 0: 4032 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads); 4033 *ebx = cs->nr_threads; 4034 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 4035 break; 4036 case 1: 4037 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); 4038 *ebx = cs->nr_cores * cs->nr_threads; 4039 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 4040 break; 4041 default: 4042 *eax = 0; 4043 *ebx = 0; 4044 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 4045 } 4046 4047 assert(!(*eax & ~0x1f)); 4048 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 4049 break; 4050 case 0xD: { 4051 /* Processor Extended State */ 4052 *eax = 0; 4053 *ebx = 0; 4054 *ecx = 0; 4055 *edx = 0; 4056 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 4057 break; 4058 } 4059 4060 if (count == 0) { 4061 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 4062 *eax = env->features[FEAT_XSAVE_COMP_LO]; 4063 *edx = env->features[FEAT_XSAVE_COMP_HI]; 4064 *ebx = *ecx; 4065 } else if (count == 1) { 4066 *eax = env->features[FEAT_XSAVE]; 4067 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 4068 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 4069 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 4070 *eax = esa->size; 4071 *ebx = esa->offset; 4072 } 4073 } 4074 break; 4075 } 4076 case 0x14: { 4077 /* Intel Processor Trace Enumeration */ 4078 *eax = 0; 4079 *ebx = 0; 4080 *ecx = 0; 4081 *edx = 0; 4082 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 4083 !kvm_enabled()) { 4084 break; 4085 } 4086 4087 if (count == 0) { 4088 *eax = INTEL_PT_MAX_SUBLEAF; 4089 *ebx = INTEL_PT_MINIMAL_EBX; 4090 *ecx = INTEL_PT_MINIMAL_ECX; 4091 } else if (count == 1) { 4092 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 4093 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 4094 } 4095 break; 4096 } 4097 case 0x40000000: 4098 /* 4099 * CPUID code in kvm_arch_init_vcpu() ignores stuff 4100 * set here, but we restrict to TCG none the less. 4101 */ 4102 if (tcg_enabled() && cpu->expose_tcg) { 4103 memcpy(signature, "TCGTCGTCGTCG", 12); 4104 *eax = 0x40000001; 4105 *ebx = signature[0]; 4106 *ecx = signature[1]; 4107 *edx = signature[2]; 4108 } else { 4109 *eax = 0; 4110 *ebx = 0; 4111 *ecx = 0; 4112 *edx = 0; 4113 } 4114 break; 4115 case 0x40000001: 4116 *eax = 0; 4117 *ebx = 0; 4118 *ecx = 0; 4119 *edx = 0; 4120 break; 4121 case 0x80000000: 4122 *eax = env->cpuid_xlevel; 4123 *ebx = env->cpuid_vendor1; 4124 *edx = env->cpuid_vendor2; 4125 *ecx = env->cpuid_vendor3; 4126 break; 4127 case 0x80000001: 4128 *eax = env->cpuid_version; 4129 *ebx = 0; 4130 *ecx = env->features[FEAT_8000_0001_ECX]; 4131 *edx = env->features[FEAT_8000_0001_EDX]; 4132 4133 /* The Linux kernel checks for the CMPLegacy bit and 4134 * discards multiple thread information if it is set. 4135 * So don't set it here for Intel to make Linux guests happy. 4136 */ 4137 if (cs->nr_cores * cs->nr_threads > 1) { 4138 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 4139 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 4140 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 4141 *ecx |= 1 << 1; /* CmpLegacy bit */ 4142 } 4143 } 4144 break; 4145 case 0x80000002: 4146 case 0x80000003: 4147 case 0x80000004: 4148 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 4149 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 4150 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 4151 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 4152 break; 4153 case 0x80000005: 4154 /* cache info (L1 cache) */ 4155 if (cpu->cache_info_passthrough) { 4156 host_cpuid(index, 0, eax, ebx, ecx, edx); 4157 break; 4158 } 4159 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ 4160 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 4161 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ 4162 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 4163 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 4164 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 4165 break; 4166 case 0x80000006: 4167 /* cache info (L2 cache) */ 4168 if (cpu->cache_info_passthrough) { 4169 host_cpuid(index, 0, eax, ebx, ecx, edx); 4170 break; 4171 } 4172 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ 4173 (L2_DTLB_2M_ENTRIES << 16) | \ 4174 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ 4175 (L2_ITLB_2M_ENTRIES); 4176 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ 4177 (L2_DTLB_4K_ENTRIES << 16) | \ 4178 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ 4179 (L2_ITLB_4K_ENTRIES); 4180 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 4181 cpu->enable_l3_cache ? 4182 env->cache_info_amd.l3_cache : NULL, 4183 ecx, edx); 4184 break; 4185 case 0x80000007: 4186 *eax = 0; 4187 *ebx = 0; 4188 *ecx = 0; 4189 *edx = env->features[FEAT_8000_0007_EDX]; 4190 break; 4191 case 0x80000008: 4192 /* virtual & phys address size in low 2 bytes. */ 4193 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 4194 /* 64 bit processor */ 4195 *eax = cpu->phys_bits; /* configurable physical bits */ 4196 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 4197 *eax |= 0x00003900; /* 57 bits virtual */ 4198 } else { 4199 *eax |= 0x00003000; /* 48 bits virtual */ 4200 } 4201 } else { 4202 *eax = cpu->phys_bits; 4203 } 4204 *ebx = env->features[FEAT_8000_0008_EBX]; 4205 *ecx = 0; 4206 *edx = 0; 4207 if (cs->nr_cores * cs->nr_threads > 1) { 4208 *ecx |= (cs->nr_cores * cs->nr_threads) - 1; 4209 } 4210 break; 4211 case 0x8000000A: 4212 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 4213 *eax = 0x00000001; /* SVM Revision */ 4214 *ebx = 0x00000010; /* nr of ASIDs */ 4215 *ecx = 0; 4216 *edx = env->features[FEAT_SVM]; /* optional features */ 4217 } else { 4218 *eax = 0; 4219 *ebx = 0; 4220 *ecx = 0; 4221 *edx = 0; 4222 } 4223 break; 4224 case 0x8000001D: 4225 *eax = 0; 4226 switch (count) { 4227 case 0: /* L1 dcache info */ 4228 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs, 4229 eax, ebx, ecx, edx); 4230 break; 4231 case 1: /* L1 icache info */ 4232 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs, 4233 eax, ebx, ecx, edx); 4234 break; 4235 case 2: /* L2 cache info */ 4236 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs, 4237 eax, ebx, ecx, edx); 4238 break; 4239 case 3: /* L3 cache info */ 4240 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs, 4241 eax, ebx, ecx, edx); 4242 break; 4243 default: /* end of info */ 4244 *eax = *ebx = *ecx = *edx = 0; 4245 break; 4246 } 4247 break; 4248 case 0x8000001E: 4249 assert(cpu->core_id <= 255); 4250 encode_topo_cpuid8000001e(cs, cpu, 4251 eax, ebx, ecx, edx); 4252 break; 4253 case 0xC0000000: 4254 *eax = env->cpuid_xlevel2; 4255 *ebx = 0; 4256 *ecx = 0; 4257 *edx = 0; 4258 break; 4259 case 0xC0000001: 4260 /* Support for VIA CPU's CPUID instruction */ 4261 *eax = env->cpuid_version; 4262 *ebx = 0; 4263 *ecx = 0; 4264 *edx = env->features[FEAT_C000_0001_EDX]; 4265 break; 4266 case 0xC0000002: 4267 case 0xC0000003: 4268 case 0xC0000004: 4269 /* Reserved for the future, and now filled with zero */ 4270 *eax = 0; 4271 *ebx = 0; 4272 *ecx = 0; 4273 *edx = 0; 4274 break; 4275 case 0x8000001F: 4276 *eax = sev_enabled() ? 0x2 : 0; 4277 *ebx = sev_get_cbit_position(); 4278 *ebx |= sev_get_reduced_phys_bits() << 6; 4279 *ecx = 0; 4280 *edx = 0; 4281 break; 4282 default: 4283 /* reserved values: zero */ 4284 *eax = 0; 4285 *ebx = 0; 4286 *ecx = 0; 4287 *edx = 0; 4288 break; 4289 } 4290 } 4291 4292 /* CPUClass::reset() */ 4293 static void x86_cpu_reset(CPUState *s) 4294 { 4295 X86CPU *cpu = X86_CPU(s); 4296 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 4297 CPUX86State *env = &cpu->env; 4298 target_ulong cr4; 4299 uint64_t xcr0; 4300 int i; 4301 4302 xcc->parent_reset(s); 4303 4304 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 4305 4306 env->old_exception = -1; 4307 4308 /* init to reset state */ 4309 4310 env->hflags2 |= HF2_GIF_MASK; 4311 4312 cpu_x86_update_cr0(env, 0x60000010); 4313 env->a20_mask = ~0x0; 4314 env->smbase = 0x30000; 4315 env->msr_smi_count = 0; 4316 4317 env->idt.limit = 0xffff; 4318 env->gdt.limit = 0xffff; 4319 env->ldt.limit = 0xffff; 4320 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 4321 env->tr.limit = 0xffff; 4322 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 4323 4324 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 4325 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 4326 DESC_R_MASK | DESC_A_MASK); 4327 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 4328 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4329 DESC_A_MASK); 4330 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 4331 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4332 DESC_A_MASK); 4333 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 4334 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4335 DESC_A_MASK); 4336 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 4337 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4338 DESC_A_MASK); 4339 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 4340 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4341 DESC_A_MASK); 4342 4343 env->eip = 0xfff0; 4344 env->regs[R_EDX] = env->cpuid_version; 4345 4346 env->eflags = 0x2; 4347 4348 /* FPU init */ 4349 for (i = 0; i < 8; i++) { 4350 env->fptags[i] = 1; 4351 } 4352 cpu_set_fpuc(env, 0x37f); 4353 4354 env->mxcsr = 0x1f80; 4355 /* All units are in INIT state. */ 4356 env->xstate_bv = 0; 4357 4358 env->pat = 0x0007040600070406ULL; 4359 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 4360 4361 memset(env->dr, 0, sizeof(env->dr)); 4362 env->dr[6] = DR6_FIXED_1; 4363 env->dr[7] = DR7_FIXED_1; 4364 cpu_breakpoint_remove_all(s, BP_CPU); 4365 cpu_watchpoint_remove_all(s, BP_CPU); 4366 4367 cr4 = 0; 4368 xcr0 = XSTATE_FP_MASK; 4369 4370 #ifdef CONFIG_USER_ONLY 4371 /* Enable all the features for user-mode. */ 4372 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 4373 xcr0 |= XSTATE_SSE_MASK; 4374 } 4375 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 4376 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 4377 if (env->features[esa->feature] & esa->bits) { 4378 xcr0 |= 1ull << i; 4379 } 4380 } 4381 4382 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 4383 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 4384 } 4385 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 4386 cr4 |= CR4_FSGSBASE_MASK; 4387 } 4388 #endif 4389 4390 env->xcr0 = xcr0; 4391 cpu_x86_update_cr4(env, cr4); 4392 4393 /* 4394 * SDM 11.11.5 requires: 4395 * - IA32_MTRR_DEF_TYPE MSR.E = 0 4396 * - IA32_MTRR_PHYSMASKn.V = 0 4397 * All other bits are undefined. For simplification, zero it all. 4398 */ 4399 env->mtrr_deftype = 0; 4400 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 4401 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 4402 4403 env->interrupt_injected = -1; 4404 env->exception_injected = -1; 4405 env->nmi_injected = false; 4406 #if !defined(CONFIG_USER_ONLY) 4407 /* We hard-wire the BSP to the first CPU. */ 4408 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 4409 4410 s->halted = !cpu_is_bsp(cpu); 4411 4412 if (kvm_enabled()) { 4413 kvm_arch_reset_vcpu(cpu); 4414 } 4415 else if (hvf_enabled()) { 4416 hvf_reset_vcpu(s); 4417 } 4418 #endif 4419 } 4420 4421 #ifndef CONFIG_USER_ONLY 4422 bool cpu_is_bsp(X86CPU *cpu) 4423 { 4424 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 4425 } 4426 4427 /* TODO: remove me, when reset over QOM tree is implemented */ 4428 static void x86_cpu_machine_reset_cb(void *opaque) 4429 { 4430 X86CPU *cpu = opaque; 4431 cpu_reset(CPU(cpu)); 4432 } 4433 #endif 4434 4435 static void mce_init(X86CPU *cpu) 4436 { 4437 CPUX86State *cenv = &cpu->env; 4438 unsigned int bank; 4439 4440 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 4441 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 4442 (CPUID_MCE | CPUID_MCA)) { 4443 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 4444 (cpu->enable_lmce ? MCG_LMCE_P : 0); 4445 cenv->mcg_ctl = ~(uint64_t)0; 4446 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 4447 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 4448 } 4449 } 4450 } 4451 4452 #ifndef CONFIG_USER_ONLY 4453 APICCommonClass *apic_get_class(void) 4454 { 4455 const char *apic_type = "apic"; 4456 4457 /* TODO: in-kernel irqchip for hvf */ 4458 if (kvm_apic_in_kernel()) { 4459 apic_type = "kvm-apic"; 4460 } else if (xen_enabled()) { 4461 apic_type = "xen-apic"; 4462 } 4463 4464 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 4465 } 4466 4467 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 4468 { 4469 APICCommonState *apic; 4470 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 4471 4472 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class))); 4473 4474 object_property_add_child(OBJECT(cpu), "lapic", 4475 OBJECT(cpu->apic_state), &error_abort); 4476 object_unref(OBJECT(cpu->apic_state)); 4477 4478 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 4479 /* TODO: convert to link<> */ 4480 apic = APIC_COMMON(cpu->apic_state); 4481 apic->cpu = cpu; 4482 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 4483 } 4484 4485 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4486 { 4487 APICCommonState *apic; 4488 static bool apic_mmio_map_once; 4489 4490 if (cpu->apic_state == NULL) { 4491 return; 4492 } 4493 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized", 4494 errp); 4495 4496 /* Map APIC MMIO area */ 4497 apic = APIC_COMMON(cpu->apic_state); 4498 if (!apic_mmio_map_once) { 4499 memory_region_add_subregion_overlap(get_system_memory(), 4500 apic->apicbase & 4501 MSR_IA32_APICBASE_BASE, 4502 &apic->io_memory, 4503 0x1000); 4504 apic_mmio_map_once = true; 4505 } 4506 } 4507 4508 static void x86_cpu_machine_done(Notifier *n, void *unused) 4509 { 4510 X86CPU *cpu = container_of(n, X86CPU, machine_done); 4511 MemoryRegion *smram = 4512 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 4513 4514 if (smram) { 4515 cpu->smram = g_new(MemoryRegion, 1); 4516 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 4517 smram, 0, 1ull << 32); 4518 memory_region_set_enabled(cpu->smram, true); 4519 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 4520 } 4521 } 4522 #else 4523 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4524 { 4525 } 4526 #endif 4527 4528 /* Note: Only safe for use on x86(-64) hosts */ 4529 static uint32_t x86_host_phys_bits(void) 4530 { 4531 uint32_t eax; 4532 uint32_t host_phys_bits; 4533 4534 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 4535 if (eax >= 0x80000008) { 4536 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 4537 /* Note: According to AMD doc 25481 rev 2.34 they have a field 4538 * at 23:16 that can specify a maximum physical address bits for 4539 * the guest that can override this value; but I've not seen 4540 * anything with that set. 4541 */ 4542 host_phys_bits = eax & 0xff; 4543 } else { 4544 /* It's an odd 64 bit machine that doesn't have the leaf for 4545 * physical address bits; fall back to 36 that's most older 4546 * Intel. 4547 */ 4548 host_phys_bits = 36; 4549 } 4550 4551 return host_phys_bits; 4552 } 4553 4554 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 4555 { 4556 if (*min < value) { 4557 *min = value; 4558 } 4559 } 4560 4561 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 4562 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 4563 { 4564 CPUX86State *env = &cpu->env; 4565 FeatureWordInfo *fi = &feature_word_info[w]; 4566 uint32_t eax = fi->cpuid_eax; 4567 uint32_t region = eax & 0xF0000000; 4568 4569 if (!env->features[w]) { 4570 return; 4571 } 4572 4573 switch (region) { 4574 case 0x00000000: 4575 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 4576 break; 4577 case 0x80000000: 4578 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 4579 break; 4580 case 0xC0000000: 4581 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 4582 break; 4583 } 4584 } 4585 4586 /* Calculate XSAVE components based on the configured CPU feature flags */ 4587 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 4588 { 4589 CPUX86State *env = &cpu->env; 4590 int i; 4591 uint64_t mask; 4592 4593 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 4594 return; 4595 } 4596 4597 mask = 0; 4598 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 4599 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 4600 if (env->features[esa->feature] & esa->bits) { 4601 mask |= (1ULL << i); 4602 } 4603 } 4604 4605 env->features[FEAT_XSAVE_COMP_LO] = mask; 4606 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 4607 } 4608 4609 /***** Steps involved on loading and filtering CPUID data 4610 * 4611 * When initializing and realizing a CPU object, the steps 4612 * involved in setting up CPUID data are: 4613 * 4614 * 1) Loading CPU model definition (X86CPUDefinition). This is 4615 * implemented by x86_cpu_load_def() and should be completely 4616 * transparent, as it is done automatically by instance_init. 4617 * No code should need to look at X86CPUDefinition structs 4618 * outside instance_init. 4619 * 4620 * 2) CPU expansion. This is done by realize before CPUID 4621 * filtering, and will make sure host/accelerator data is 4622 * loaded for CPU models that depend on host capabilities 4623 * (e.g. "host"). Done by x86_cpu_expand_features(). 4624 * 4625 * 3) CPUID filtering. This initializes extra data related to 4626 * CPUID, and checks if the host supports all capabilities 4627 * required by the CPU. Runnability of a CPU model is 4628 * determined at this step. Done by x86_cpu_filter_features(). 4629 * 4630 * Some operations don't require all steps to be performed. 4631 * More precisely: 4632 * 4633 * - CPU instance creation (instance_init) will run only CPU 4634 * model loading. CPU expansion can't run at instance_init-time 4635 * because host/accelerator data may be not available yet. 4636 * - CPU realization will perform both CPU model expansion and CPUID 4637 * filtering, and return an error in case one of them fails. 4638 * - query-cpu-definitions needs to run all 3 steps. It needs 4639 * to run CPUID filtering, as the 'unavailable-features' 4640 * field is set based on the filtering results. 4641 * - The query-cpu-model-expansion QMP command only needs to run 4642 * CPU model loading and CPU expansion. It should not filter 4643 * any CPUID data based on host capabilities. 4644 */ 4645 4646 /* Expand CPU configuration data, based on configured features 4647 * and host/accelerator capabilities when appropriate. 4648 */ 4649 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 4650 { 4651 CPUX86State *env = &cpu->env; 4652 FeatureWord w; 4653 GList *l; 4654 Error *local_err = NULL; 4655 4656 /*TODO: Now cpu->max_features doesn't overwrite features 4657 * set using QOM properties, and we can convert 4658 * plus_features & minus_features to global properties 4659 * inside x86_cpu_parse_featurestr() too. 4660 */ 4661 if (cpu->max_features) { 4662 for (w = 0; w < FEATURE_WORDS; w++) { 4663 /* Override only features that weren't set explicitly 4664 * by the user. 4665 */ 4666 env->features[w] |= 4667 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 4668 ~env->user_features[w] & \ 4669 ~feature_word_info[w].no_autoenable_flags; 4670 } 4671 } 4672 4673 for (l = plus_features; l; l = l->next) { 4674 const char *prop = l->data; 4675 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 4676 if (local_err) { 4677 goto out; 4678 } 4679 } 4680 4681 for (l = minus_features; l; l = l->next) { 4682 const char *prop = l->data; 4683 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 4684 if (local_err) { 4685 goto out; 4686 } 4687 } 4688 4689 if (!kvm_enabled() || !cpu->expose_kvm) { 4690 env->features[FEAT_KVM] = 0; 4691 } 4692 4693 x86_cpu_enable_xsave_components(cpu); 4694 4695 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 4696 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 4697 if (cpu->full_cpuid_auto_level) { 4698 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 4699 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 4700 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 4701 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 4702 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 4703 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 4704 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 4705 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 4706 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 4707 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 4708 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 4709 /* SVM requires CPUID[0x8000000A] */ 4710 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 4711 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 4712 } 4713 4714 /* SEV requires CPUID[0x8000001F] */ 4715 if (sev_enabled()) { 4716 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 4717 } 4718 } 4719 4720 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 4721 if (env->cpuid_level == UINT32_MAX) { 4722 env->cpuid_level = env->cpuid_min_level; 4723 } 4724 if (env->cpuid_xlevel == UINT32_MAX) { 4725 env->cpuid_xlevel = env->cpuid_min_xlevel; 4726 } 4727 if (env->cpuid_xlevel2 == UINT32_MAX) { 4728 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 4729 } 4730 4731 out: 4732 if (local_err != NULL) { 4733 error_propagate(errp, local_err); 4734 } 4735 } 4736 4737 /* 4738 * Finishes initialization of CPUID data, filters CPU feature 4739 * words based on host availability of each feature. 4740 * 4741 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 4742 */ 4743 static int x86_cpu_filter_features(X86CPU *cpu) 4744 { 4745 CPUX86State *env = &cpu->env; 4746 FeatureWord w; 4747 int rv = 0; 4748 4749 for (w = 0; w < FEATURE_WORDS; w++) { 4750 uint32_t host_feat = 4751 x86_cpu_get_supported_feature_word(w, false); 4752 uint32_t requested_features = env->features[w]; 4753 env->features[w] &= host_feat; 4754 cpu->filtered_features[w] = requested_features & ~env->features[w]; 4755 if (cpu->filtered_features[w]) { 4756 rv = 1; 4757 } 4758 } 4759 4760 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 4761 kvm_enabled()) { 4762 KVMState *s = CPU(cpu)->kvm_state; 4763 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 4764 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 4765 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 4766 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 4767 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 4768 4769 if (!eax_0 || 4770 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 4771 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 4772 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 4773 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 4774 INTEL_PT_ADDR_RANGES_NUM) || 4775 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 4776 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 4777 (ecx_0 & INTEL_PT_IP_LIP)) { 4778 /* 4779 * Processor Trace capabilities aren't configurable, so if the 4780 * host can't emulate the capabilities we report on 4781 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 4782 */ 4783 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT; 4784 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT; 4785 rv = 1; 4786 } 4787 } 4788 4789 return rv; 4790 } 4791 4792 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ 4793 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ 4794 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) 4795 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ 4796 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ 4797 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) 4798 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 4799 { 4800 CPUState *cs = CPU(dev); 4801 X86CPU *cpu = X86_CPU(dev); 4802 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 4803 CPUX86State *env = &cpu->env; 4804 Error *local_err = NULL; 4805 static bool ht_warned; 4806 4807 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 4808 char *name = x86_cpu_class_get_model_name(xcc); 4809 error_setg(&local_err, "CPU model '%s' requires KVM", name); 4810 g_free(name); 4811 goto out; 4812 } 4813 4814 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 4815 error_setg(errp, "apic-id property was not initialized properly"); 4816 return; 4817 } 4818 4819 x86_cpu_expand_features(cpu, &local_err); 4820 if (local_err) { 4821 goto out; 4822 } 4823 4824 if (x86_cpu_filter_features(cpu) && 4825 (cpu->check_cpuid || cpu->enforce_cpuid)) { 4826 x86_cpu_report_filtered_features(cpu); 4827 if (cpu->enforce_cpuid) { 4828 error_setg(&local_err, 4829 accel_uses_host_cpuid() ? 4830 "Host doesn't support requested features" : 4831 "TCG doesn't support requested features"); 4832 goto out; 4833 } 4834 } 4835 4836 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 4837 * CPUID[1].EDX. 4838 */ 4839 if (IS_AMD_CPU(env)) { 4840 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 4841 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 4842 & CPUID_EXT2_AMD_ALIASES); 4843 } 4844 4845 /* For 64bit systems think about the number of physical bits to present. 4846 * ideally this should be the same as the host; anything other than matching 4847 * the host can cause incorrect guest behaviour. 4848 * QEMU used to pick the magic value of 40 bits that corresponds to 4849 * consumer AMD devices but nothing else. 4850 */ 4851 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 4852 if (accel_uses_host_cpuid()) { 4853 uint32_t host_phys_bits = x86_host_phys_bits(); 4854 static bool warned; 4855 4856 if (cpu->host_phys_bits) { 4857 /* The user asked for us to use the host physical bits */ 4858 cpu->phys_bits = host_phys_bits; 4859 } 4860 4861 /* Print a warning if the user set it to a value that's not the 4862 * host value. 4863 */ 4864 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 4865 !warned) { 4866 warn_report("Host physical bits (%u)" 4867 " does not match phys-bits property (%u)", 4868 host_phys_bits, cpu->phys_bits); 4869 warned = true; 4870 } 4871 4872 if (cpu->phys_bits && 4873 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 4874 cpu->phys_bits < 32)) { 4875 error_setg(errp, "phys-bits should be between 32 and %u " 4876 " (but is %u)", 4877 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 4878 return; 4879 } 4880 } else { 4881 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 4882 error_setg(errp, "TCG only supports phys-bits=%u", 4883 TCG_PHYS_ADDR_BITS); 4884 return; 4885 } 4886 } 4887 /* 0 means it was not explicitly set by the user (or by machine 4888 * compat_props or by the host code above). In this case, the default 4889 * is the value used by TCG (40). 4890 */ 4891 if (cpu->phys_bits == 0) { 4892 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 4893 } 4894 } else { 4895 /* For 32 bit systems don't use the user set value, but keep 4896 * phys_bits consistent with what we tell the guest. 4897 */ 4898 if (cpu->phys_bits != 0) { 4899 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 4900 return; 4901 } 4902 4903 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 4904 cpu->phys_bits = 36; 4905 } else { 4906 cpu->phys_bits = 32; 4907 } 4908 } 4909 4910 /* Cache information initialization */ 4911 if (!cpu->legacy_cache) { 4912 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) { 4913 char *name = x86_cpu_class_get_model_name(xcc); 4914 error_setg(errp, 4915 "CPU model '%s' doesn't support legacy-cache=off", name); 4916 g_free(name); 4917 return; 4918 } 4919 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 4920 *xcc->cpu_def->cache_info; 4921 } else { 4922 /* Build legacy cache information */ 4923 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 4924 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 4925 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 4926 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 4927 4928 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 4929 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 4930 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 4931 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 4932 4933 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 4934 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 4935 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 4936 env->cache_info_amd.l3_cache = &legacy_l3_cache; 4937 } 4938 4939 4940 cpu_exec_realizefn(cs, &local_err); 4941 if (local_err != NULL) { 4942 error_propagate(errp, local_err); 4943 return; 4944 } 4945 4946 #ifndef CONFIG_USER_ONLY 4947 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 4948 4949 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) { 4950 x86_cpu_apic_create(cpu, &local_err); 4951 if (local_err != NULL) { 4952 goto out; 4953 } 4954 } 4955 #endif 4956 4957 mce_init(cpu); 4958 4959 #ifndef CONFIG_USER_ONLY 4960 if (tcg_enabled()) { 4961 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 4962 cpu->cpu_as_root = g_new(MemoryRegion, 1); 4963 4964 /* Outer container... */ 4965 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 4966 memory_region_set_enabled(cpu->cpu_as_root, true); 4967 4968 /* ... with two regions inside: normal system memory with low 4969 * priority, and... 4970 */ 4971 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 4972 get_system_memory(), 0, ~0ull); 4973 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 4974 memory_region_set_enabled(cpu->cpu_as_mem, true); 4975 4976 cs->num_ases = 2; 4977 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 4978 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 4979 4980 /* ... SMRAM with higher priority, linked from /machine/smram. */ 4981 cpu->machine_done.notify = x86_cpu_machine_done; 4982 qemu_add_machine_init_done_notifier(&cpu->machine_done); 4983 } 4984 #endif 4985 4986 qemu_init_vcpu(cs); 4987 4988 /* 4989 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 4990 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 4991 * based on inputs (sockets,cores,threads), it is still better to give 4992 * users a warning. 4993 * 4994 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 4995 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 4996 */ 4997 if (IS_AMD_CPU(env) && 4998 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 4999 cs->nr_threads > 1 && !ht_warned) { 5000 error_report("This family of AMD CPU doesn't support " 5001 "hyperthreading(%d). Please configure -smp " 5002 "options properly or try enabling topoext feature.", 5003 cs->nr_threads); 5004 ht_warned = true; 5005 } 5006 5007 x86_cpu_apic_realize(cpu, &local_err); 5008 if (local_err != NULL) { 5009 goto out; 5010 } 5011 cpu_reset(cs); 5012 5013 xcc->parent_realize(dev, &local_err); 5014 5015 out: 5016 if (local_err != NULL) { 5017 error_propagate(errp, local_err); 5018 return; 5019 } 5020 } 5021 5022 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) 5023 { 5024 X86CPU *cpu = X86_CPU(dev); 5025 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 5026 Error *local_err = NULL; 5027 5028 #ifndef CONFIG_USER_ONLY 5029 cpu_remove_sync(CPU(dev)); 5030 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 5031 #endif 5032 5033 if (cpu->apic_state) { 5034 object_unparent(OBJECT(cpu->apic_state)); 5035 cpu->apic_state = NULL; 5036 } 5037 5038 xcc->parent_unrealize(dev, &local_err); 5039 if (local_err != NULL) { 5040 error_propagate(errp, local_err); 5041 return; 5042 } 5043 } 5044 5045 typedef struct BitProperty { 5046 FeatureWord w; 5047 uint32_t mask; 5048 } BitProperty; 5049 5050 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 5051 void *opaque, Error **errp) 5052 { 5053 X86CPU *cpu = X86_CPU(obj); 5054 BitProperty *fp = opaque; 5055 uint32_t f = cpu->env.features[fp->w]; 5056 bool value = (f & fp->mask) == fp->mask; 5057 visit_type_bool(v, name, &value, errp); 5058 } 5059 5060 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 5061 void *opaque, Error **errp) 5062 { 5063 DeviceState *dev = DEVICE(obj); 5064 X86CPU *cpu = X86_CPU(obj); 5065 BitProperty *fp = opaque; 5066 Error *local_err = NULL; 5067 bool value; 5068 5069 if (dev->realized) { 5070 qdev_prop_set_after_realize(dev, name, errp); 5071 return; 5072 } 5073 5074 visit_type_bool(v, name, &value, &local_err); 5075 if (local_err) { 5076 error_propagate(errp, local_err); 5077 return; 5078 } 5079 5080 if (value) { 5081 cpu->env.features[fp->w] |= fp->mask; 5082 } else { 5083 cpu->env.features[fp->w] &= ~fp->mask; 5084 } 5085 cpu->env.user_features[fp->w] |= fp->mask; 5086 } 5087 5088 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 5089 void *opaque) 5090 { 5091 BitProperty *prop = opaque; 5092 g_free(prop); 5093 } 5094 5095 /* Register a boolean property to get/set a single bit in a uint32_t field. 5096 * 5097 * The same property name can be registered multiple times to make it affect 5098 * multiple bits in the same FeatureWord. In that case, the getter will return 5099 * true only if all bits are set. 5100 */ 5101 static void x86_cpu_register_bit_prop(X86CPU *cpu, 5102 const char *prop_name, 5103 FeatureWord w, 5104 int bitnr) 5105 { 5106 BitProperty *fp; 5107 ObjectProperty *op; 5108 uint32_t mask = (1UL << bitnr); 5109 5110 op = object_property_find(OBJECT(cpu), prop_name, NULL); 5111 if (op) { 5112 fp = op->opaque; 5113 assert(fp->w == w); 5114 fp->mask |= mask; 5115 } else { 5116 fp = g_new0(BitProperty, 1); 5117 fp->w = w; 5118 fp->mask = mask; 5119 object_property_add(OBJECT(cpu), prop_name, "bool", 5120 x86_cpu_get_bit_prop, 5121 x86_cpu_set_bit_prop, 5122 x86_cpu_release_bit_prop, fp, &error_abort); 5123 } 5124 } 5125 5126 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 5127 FeatureWord w, 5128 int bitnr) 5129 { 5130 FeatureWordInfo *fi = &feature_word_info[w]; 5131 const char *name = fi->feat_names[bitnr]; 5132 5133 if (!name) { 5134 return; 5135 } 5136 5137 /* Property names should use "-" instead of "_". 5138 * Old names containing underscores are registered as aliases 5139 * using object_property_add_alias() 5140 */ 5141 assert(!strchr(name, '_')); 5142 /* aliases don't use "|" delimiters anymore, they are registered 5143 * manually using object_property_add_alias() */ 5144 assert(!strchr(name, '|')); 5145 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 5146 } 5147 5148 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 5149 { 5150 X86CPU *cpu = X86_CPU(cs); 5151 CPUX86State *env = &cpu->env; 5152 GuestPanicInformation *panic_info = NULL; 5153 5154 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 5155 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 5156 5157 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 5158 5159 assert(HV_CRASH_PARAMS >= 5); 5160 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 5161 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 5162 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 5163 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 5164 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 5165 } 5166 5167 return panic_info; 5168 } 5169 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 5170 const char *name, void *opaque, 5171 Error **errp) 5172 { 5173 CPUState *cs = CPU(obj); 5174 GuestPanicInformation *panic_info; 5175 5176 if (!cs->crash_occurred) { 5177 error_setg(errp, "No crash occured"); 5178 return; 5179 } 5180 5181 panic_info = x86_cpu_get_crash_info(cs); 5182 if (panic_info == NULL) { 5183 error_setg(errp, "No crash information"); 5184 return; 5185 } 5186 5187 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 5188 errp); 5189 qapi_free_GuestPanicInformation(panic_info); 5190 } 5191 5192 static void x86_cpu_initfn(Object *obj) 5193 { 5194 CPUState *cs = CPU(obj); 5195 X86CPU *cpu = X86_CPU(obj); 5196 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 5197 CPUX86State *env = &cpu->env; 5198 FeatureWord w; 5199 5200 cs->env_ptr = env; 5201 5202 object_property_add(obj, "family", "int", 5203 x86_cpuid_version_get_family, 5204 x86_cpuid_version_set_family, NULL, NULL, NULL); 5205 object_property_add(obj, "model", "int", 5206 x86_cpuid_version_get_model, 5207 x86_cpuid_version_set_model, NULL, NULL, NULL); 5208 object_property_add(obj, "stepping", "int", 5209 x86_cpuid_version_get_stepping, 5210 x86_cpuid_version_set_stepping, NULL, NULL, NULL); 5211 object_property_add_str(obj, "vendor", 5212 x86_cpuid_get_vendor, 5213 x86_cpuid_set_vendor, NULL); 5214 object_property_add_str(obj, "model-id", 5215 x86_cpuid_get_model_id, 5216 x86_cpuid_set_model_id, NULL); 5217 object_property_add(obj, "tsc-frequency", "int", 5218 x86_cpuid_get_tsc_freq, 5219 x86_cpuid_set_tsc_freq, NULL, NULL, NULL); 5220 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 5221 x86_cpu_get_feature_words, 5222 NULL, NULL, (void *)env->features, NULL); 5223 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 5224 x86_cpu_get_feature_words, 5225 NULL, NULL, (void *)cpu->filtered_features, NULL); 5226 5227 object_property_add(obj, "crash-information", "GuestPanicInformation", 5228 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); 5229 5230 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY; 5231 5232 for (w = 0; w < FEATURE_WORDS; w++) { 5233 int bitnr; 5234 5235 for (bitnr = 0; bitnr < 32; bitnr++) { 5236 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 5237 } 5238 } 5239 5240 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort); 5241 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort); 5242 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort); 5243 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort); 5244 object_property_add_alias(obj, "xd", obj, "nx", &error_abort); 5245 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort); 5246 object_property_add_alias(obj, "i64", obj, "lm", &error_abort); 5247 5248 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort); 5249 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort); 5250 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort); 5251 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort); 5252 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort); 5253 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort); 5254 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort); 5255 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort); 5256 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort); 5257 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort); 5258 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort); 5259 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort); 5260 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort); 5261 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort); 5262 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort); 5263 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort); 5264 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort); 5265 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort); 5266 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort); 5267 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort); 5268 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort); 5269 5270 if (xcc->cpu_def) { 5271 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort); 5272 } 5273 } 5274 5275 static int64_t x86_cpu_get_arch_id(CPUState *cs) 5276 { 5277 X86CPU *cpu = X86_CPU(cs); 5278 5279 return cpu->apic_id; 5280 } 5281 5282 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 5283 { 5284 X86CPU *cpu = X86_CPU(cs); 5285 5286 return cpu->env.cr[0] & CR0_PG_MASK; 5287 } 5288 5289 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 5290 { 5291 X86CPU *cpu = X86_CPU(cs); 5292 5293 cpu->env.eip = value; 5294 } 5295 5296 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 5297 { 5298 X86CPU *cpu = X86_CPU(cs); 5299 5300 cpu->env.eip = tb->pc - tb->cs_base; 5301 } 5302 5303 static bool x86_cpu_has_work(CPUState *cs) 5304 { 5305 X86CPU *cpu = X86_CPU(cs); 5306 CPUX86State *env = &cpu->env; 5307 5308 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD | 5309 CPU_INTERRUPT_POLL)) && 5310 (env->eflags & IF_MASK)) || 5311 (cs->interrupt_request & (CPU_INTERRUPT_NMI | 5312 CPU_INTERRUPT_INIT | 5313 CPU_INTERRUPT_SIPI | 5314 CPU_INTERRUPT_MCE)) || 5315 ((cs->interrupt_request & CPU_INTERRUPT_SMI) && 5316 !(env->hflags & HF_SMM_MASK)); 5317 } 5318 5319 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 5320 { 5321 X86CPU *cpu = X86_CPU(cs); 5322 CPUX86State *env = &cpu->env; 5323 5324 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 5325 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 5326 : bfd_mach_i386_i8086); 5327 info->print_insn = print_insn_i386; 5328 5329 info->cap_arch = CS_ARCH_X86; 5330 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 5331 : env->hflags & HF_CS32_MASK ? CS_MODE_32 5332 : CS_MODE_16); 5333 info->cap_insn_unit = 1; 5334 info->cap_insn_split = 8; 5335 } 5336 5337 void x86_update_hflags(CPUX86State *env) 5338 { 5339 uint32_t hflags; 5340 #define HFLAG_COPY_MASK \ 5341 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 5342 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 5343 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 5344 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 5345 5346 hflags = env->hflags & HFLAG_COPY_MASK; 5347 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 5348 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 5349 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 5350 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 5351 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 5352 5353 if (env->cr[4] & CR4_OSFXSR_MASK) { 5354 hflags |= HF_OSFXSR_MASK; 5355 } 5356 5357 if (env->efer & MSR_EFER_LMA) { 5358 hflags |= HF_LMA_MASK; 5359 } 5360 5361 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 5362 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 5363 } else { 5364 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 5365 (DESC_B_SHIFT - HF_CS32_SHIFT); 5366 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 5367 (DESC_B_SHIFT - HF_SS32_SHIFT); 5368 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 5369 !(hflags & HF_CS32_MASK)) { 5370 hflags |= HF_ADDSEG_MASK; 5371 } else { 5372 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 5373 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 5374 } 5375 } 5376 env->hflags = hflags; 5377 } 5378 5379 static Property x86_cpu_properties[] = { 5380 #ifdef CONFIG_USER_ONLY 5381 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 5382 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 5383 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 5384 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 5385 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 5386 #else 5387 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 5388 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 5389 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 5390 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 5391 #endif 5392 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 5393 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 5394 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks }, 5395 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false), 5396 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false), 5397 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false), 5398 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false), 5399 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false), 5400 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false), 5401 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false), 5402 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false), 5403 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false), 5404 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false), 5405 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false), 5406 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 5407 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 5408 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 5409 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 5410 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 5411 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 5412 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 5413 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 5414 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 5415 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 5416 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 5417 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 5418 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 5419 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 5420 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 5421 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 5422 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 5423 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 5424 false), 5425 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 5426 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 5427 /* 5428 * lecacy_cache defaults to true unless the CPU model provides its 5429 * own cache information (see x86_cpu_load_def()). 5430 */ 5431 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 5432 5433 /* 5434 * From "Requirements for Implementing the Microsoft 5435 * Hypervisor Interface": 5436 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 5437 * 5438 * "Starting with Windows Server 2012 and Windows 8, if 5439 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 5440 * the hypervisor imposes no specific limit to the number of VPs. 5441 * In this case, Windows Server 2012 guest VMs may use more than 5442 * 64 VPs, up to the maximum supported number of processors applicable 5443 * to the specific Windows version being used." 5444 */ 5445 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 5446 DEFINE_PROP_END_OF_LIST() 5447 }; 5448 5449 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 5450 { 5451 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5452 CPUClass *cc = CPU_CLASS(oc); 5453 DeviceClass *dc = DEVICE_CLASS(oc); 5454 5455 device_class_set_parent_realize(dc, x86_cpu_realizefn, 5456 &xcc->parent_realize); 5457 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 5458 &xcc->parent_unrealize); 5459 dc->props = x86_cpu_properties; 5460 5461 xcc->parent_reset = cc->reset; 5462 cc->reset = x86_cpu_reset; 5463 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 5464 5465 cc->class_by_name = x86_cpu_class_by_name; 5466 cc->parse_features = x86_cpu_parse_featurestr; 5467 cc->has_work = x86_cpu_has_work; 5468 #ifdef CONFIG_TCG 5469 cc->do_interrupt = x86_cpu_do_interrupt; 5470 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 5471 #endif 5472 cc->dump_state = x86_cpu_dump_state; 5473 cc->get_crash_info = x86_cpu_get_crash_info; 5474 cc->set_pc = x86_cpu_set_pc; 5475 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 5476 cc->gdb_read_register = x86_cpu_gdb_read_register; 5477 cc->gdb_write_register = x86_cpu_gdb_write_register; 5478 cc->get_arch_id = x86_cpu_get_arch_id; 5479 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 5480 #ifdef CONFIG_USER_ONLY 5481 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault; 5482 #else 5483 cc->asidx_from_attrs = x86_asidx_from_attrs; 5484 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 5485 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; 5486 cc->write_elf64_note = x86_cpu_write_elf64_note; 5487 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 5488 cc->write_elf32_note = x86_cpu_write_elf32_note; 5489 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 5490 cc->vmsd = &vmstate_x86_cpu; 5491 #endif 5492 cc->gdb_arch_name = x86_gdb_arch_name; 5493 #ifdef TARGET_X86_64 5494 cc->gdb_core_xml_file = "i386-64bit.xml"; 5495 cc->gdb_num_core_regs = 57; 5496 #else 5497 cc->gdb_core_xml_file = "i386-32bit.xml"; 5498 cc->gdb_num_core_regs = 41; 5499 #endif 5500 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 5501 cc->debug_excp_handler = breakpoint_handler; 5502 #endif 5503 cc->cpu_exec_enter = x86_cpu_exec_enter; 5504 cc->cpu_exec_exit = x86_cpu_exec_exit; 5505 #ifdef CONFIG_TCG 5506 cc->tcg_initialize = tcg_x86_init; 5507 #endif 5508 cc->disas_set_info = x86_disas_set_info; 5509 5510 dc->user_creatable = true; 5511 } 5512 5513 static const TypeInfo x86_cpu_type_info = { 5514 .name = TYPE_X86_CPU, 5515 .parent = TYPE_CPU, 5516 .instance_size = sizeof(X86CPU), 5517 .instance_init = x86_cpu_initfn, 5518 .abstract = true, 5519 .class_size = sizeof(X86CPUClass), 5520 .class_init = x86_cpu_common_class_init, 5521 }; 5522 5523 5524 /* "base" CPU model, used by query-cpu-model-expansion */ 5525 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 5526 { 5527 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5528 5529 xcc->static_model = true; 5530 xcc->migration_safe = true; 5531 xcc->model_description = "base CPU model type with no features enabled"; 5532 xcc->ordering = 8; 5533 } 5534 5535 static const TypeInfo x86_base_cpu_type_info = { 5536 .name = X86_CPU_TYPE_NAME("base"), 5537 .parent = TYPE_X86_CPU, 5538 .class_init = x86_cpu_base_class_init, 5539 }; 5540 5541 static void x86_cpu_register_types(void) 5542 { 5543 int i; 5544 5545 type_register_static(&x86_cpu_type_info); 5546 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 5547 x86_register_cpudef_type(&builtin_x86_defs[i]); 5548 } 5549 type_register_static(&max_x86_cpu_type_info); 5550 type_register_static(&x86_base_cpu_type_info); 5551 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 5552 type_register_static(&host_x86_cpu_type_info); 5553 #endif 5554 } 5555 5556 type_init(x86_cpu_register_types) 5557