1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "exec/exec-all.h" 28 #include "sysemu/kvm.h" 29 #include "sysemu/reset.h" 30 #include "sysemu/hvf.h" 31 #include "sysemu/cpus.h" 32 #include "kvm_i386.h" 33 #include "sev_i386.h" 34 35 #include "qemu/error-report.h" 36 #include "qemu/module.h" 37 #include "qemu/option.h" 38 #include "qemu/config-file.h" 39 #include "qapi/error.h" 40 #include "qapi/qapi-visit-machine.h" 41 #include "qapi/qapi-visit-run-state.h" 42 #include "qapi/qmp/qdict.h" 43 #include "qapi/qmp/qerror.h" 44 #include "qapi/visitor.h" 45 #include "qom/qom-qobject.h" 46 #include "sysemu/arch_init.h" 47 #include "qapi/qapi-commands-machine-target.h" 48 49 #include "standard-headers/asm-x86/kvm_para.h" 50 51 #include "sysemu/sysemu.h" 52 #include "sysemu/tcg.h" 53 #include "hw/qdev-properties.h" 54 #include "hw/i386/topology.h" 55 #ifndef CONFIG_USER_ONLY 56 #include "exec/address-spaces.h" 57 #include "hw/xen/xen.h" 58 #include "hw/i386/apic_internal.h" 59 #include "hw/boards.h" 60 #endif 61 62 #include "disas/capstone.h" 63 64 /* Helpers for building CPUID[2] descriptors: */ 65 66 struct CPUID2CacheDescriptorInfo { 67 enum CacheType type; 68 int level; 69 int size; 70 int line_size; 71 int associativity; 72 }; 73 74 /* 75 * Known CPUID 2 cache descriptors. 76 * From Intel SDM Volume 2A, CPUID instruction 77 */ 78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 80 .associativity = 4, .line_size = 32, }, 81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 82 .associativity = 4, .line_size = 32, }, 83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 84 .associativity = 4, .line_size = 64, }, 85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 86 .associativity = 2, .line_size = 32, }, 87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 88 .associativity = 4, .line_size = 32, }, 89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 90 .associativity = 4, .line_size = 64, }, 91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 92 .associativity = 6, .line_size = 64, }, 93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 94 .associativity = 2, .line_size = 64, }, 95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 96 .associativity = 8, .line_size = 64, }, 97 /* lines per sector is not supported cpuid2_cache_descriptor(), 98 * so descriptors 0x22, 0x23 are not included 99 */ 100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 101 .associativity = 16, .line_size = 64, }, 102 /* lines per sector is not supported cpuid2_cache_descriptor(), 103 * so descriptors 0x25, 0x20 are not included 104 */ 105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 106 .associativity = 8, .line_size = 64, }, 107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 108 .associativity = 8, .line_size = 64, }, 109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 110 .associativity = 4, .line_size = 32, }, 111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 112 .associativity = 4, .line_size = 32, }, 113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 114 .associativity = 4, .line_size = 32, }, 115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 116 .associativity = 4, .line_size = 32, }, 117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 118 .associativity = 4, .line_size = 32, }, 119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 120 .associativity = 4, .line_size = 64, }, 121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 122 .associativity = 8, .line_size = 64, }, 123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 124 .associativity = 12, .line_size = 64, }, 125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 127 .associativity = 12, .line_size = 64, }, 128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 129 .associativity = 16, .line_size = 64, }, 130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 131 .associativity = 12, .line_size = 64, }, 132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 133 .associativity = 16, .line_size = 64, }, 134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 135 .associativity = 24, .line_size = 64, }, 136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 137 .associativity = 8, .line_size = 64, }, 138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 139 .associativity = 4, .line_size = 64, }, 140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 141 .associativity = 4, .line_size = 64, }, 142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 143 .associativity = 4, .line_size = 64, }, 144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 145 .associativity = 4, .line_size = 64, }, 146 /* lines per sector is not supported cpuid2_cache_descriptor(), 147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 148 */ 149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 150 .associativity = 8, .line_size = 64, }, 151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 152 .associativity = 2, .line_size = 64, }, 153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 154 .associativity = 8, .line_size = 64, }, 155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 156 .associativity = 8, .line_size = 32, }, 157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 158 .associativity = 8, .line_size = 32, }, 159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 160 .associativity = 8, .line_size = 32, }, 161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 162 .associativity = 8, .line_size = 32, }, 163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 164 .associativity = 4, .line_size = 64, }, 165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 166 .associativity = 8, .line_size = 64, }, 167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 168 .associativity = 4, .line_size = 64, }, 169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 170 .associativity = 4, .line_size = 64, }, 171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 172 .associativity = 4, .line_size = 64, }, 173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 174 .associativity = 8, .line_size = 64, }, 175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 176 .associativity = 8, .line_size = 64, }, 177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 178 .associativity = 8, .line_size = 64, }, 179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 180 .associativity = 12, .line_size = 64, }, 181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 182 .associativity = 12, .line_size = 64, }, 183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 184 .associativity = 12, .line_size = 64, }, 185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 186 .associativity = 16, .line_size = 64, }, 187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 188 .associativity = 16, .line_size = 64, }, 189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 190 .associativity = 16, .line_size = 64, }, 191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 192 .associativity = 24, .line_size = 64, }, 193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 194 .associativity = 24, .line_size = 64, }, 195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 196 .associativity = 24, .line_size = 64, }, 197 }; 198 199 /* 200 * "CPUID leaf 2 does not report cache descriptor information, 201 * use CPUID leaf 4 to query cache parameters" 202 */ 203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 204 205 /* 206 * Return a CPUID 2 cache descriptor for a given cache. 207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 208 */ 209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 210 { 211 int i; 212 213 assert(cache->size > 0); 214 assert(cache->level > 0); 215 assert(cache->line_size > 0); 216 assert(cache->associativity > 0); 217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 219 if (d->level == cache->level && d->type == cache->type && 220 d->size == cache->size && d->line_size == cache->line_size && 221 d->associativity == cache->associativity) { 222 return i; 223 } 224 } 225 226 return CACHE_DESCRIPTOR_UNAVAILABLE; 227 } 228 229 /* CPUID Leaf 4 constants: */ 230 231 /* EAX: */ 232 #define CACHE_TYPE_D 1 233 #define CACHE_TYPE_I 2 234 #define CACHE_TYPE_UNIFIED 3 235 236 #define CACHE_LEVEL(l) (l << 5) 237 238 #define CACHE_SELF_INIT_LEVEL (1 << 8) 239 240 /* EDX: */ 241 #define CACHE_NO_INVD_SHARING (1 << 0) 242 #define CACHE_INCLUSIVE (1 << 1) 243 #define CACHE_COMPLEX_IDX (1 << 2) 244 245 /* Encode CacheType for CPUID[4].EAX */ 246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 249 0 /* Invalid value */) 250 251 252 /* Encode cache info for CPUID[4] */ 253 static void encode_cache_cpuid4(CPUCacheInfo *cache, 254 int num_apic_ids, int num_cores, 255 uint32_t *eax, uint32_t *ebx, 256 uint32_t *ecx, uint32_t *edx) 257 { 258 assert(cache->size == cache->line_size * cache->associativity * 259 cache->partitions * cache->sets); 260 261 assert(num_apic_ids > 0); 262 *eax = CACHE_TYPE(cache->type) | 263 CACHE_LEVEL(cache->level) | 264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 265 ((num_cores - 1) << 26) | 266 ((num_apic_ids - 1) << 14); 267 268 assert(cache->line_size > 0); 269 assert(cache->partitions > 0); 270 assert(cache->associativity > 0); 271 /* We don't implement fully-associative caches */ 272 assert(cache->associativity < cache->sets); 273 *ebx = (cache->line_size - 1) | 274 ((cache->partitions - 1) << 12) | 275 ((cache->associativity - 1) << 22); 276 277 assert(cache->sets > 0); 278 *ecx = cache->sets - 1; 279 280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 281 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 283 } 284 285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 287 { 288 assert(cache->size % 1024 == 0); 289 assert(cache->lines_per_tag > 0); 290 assert(cache->associativity > 0); 291 assert(cache->line_size > 0); 292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 293 (cache->lines_per_tag << 8) | (cache->line_size); 294 } 295 296 #define ASSOC_FULL 0xFF 297 298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 300 a == 2 ? 0x2 : \ 301 a == 4 ? 0x4 : \ 302 a == 8 ? 0x6 : \ 303 a == 16 ? 0x8 : \ 304 a == 32 ? 0xA : \ 305 a == 48 ? 0xB : \ 306 a == 64 ? 0xC : \ 307 a == 96 ? 0xD : \ 308 a == 128 ? 0xE : \ 309 a == ASSOC_FULL ? 0xF : \ 310 0 /* invalid value */) 311 312 /* 313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 314 * @l3 can be NULL. 315 */ 316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 317 CPUCacheInfo *l3, 318 uint32_t *ecx, uint32_t *edx) 319 { 320 assert(l2->size % 1024 == 0); 321 assert(l2->associativity > 0); 322 assert(l2->lines_per_tag > 0); 323 assert(l2->line_size > 0); 324 *ecx = ((l2->size / 1024) << 16) | 325 (AMD_ENC_ASSOC(l2->associativity) << 12) | 326 (l2->lines_per_tag << 8) | (l2->line_size); 327 328 if (l3) { 329 assert(l3->size % (512 * 1024) == 0); 330 assert(l3->associativity > 0); 331 assert(l3->lines_per_tag > 0); 332 assert(l3->line_size > 0); 333 *edx = ((l3->size / (512 * 1024)) << 18) | 334 (AMD_ENC_ASSOC(l3->associativity) << 12) | 335 (l3->lines_per_tag << 8) | (l3->line_size); 336 } else { 337 *edx = 0; 338 } 339 } 340 341 /* 342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E 343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3. 344 * Define the constants to build the cpu topology. Right now, TOPOEXT 345 * feature is enabled only on EPYC. So, these constants are based on 346 * EPYC supported configurations. We may need to handle the cases if 347 * these values change in future. 348 */ 349 /* Maximum core complexes in a node */ 350 #define MAX_CCX 2 351 /* Maximum cores in a core complex */ 352 #define MAX_CORES_IN_CCX 4 353 /* Maximum cores in a node */ 354 #define MAX_CORES_IN_NODE 8 355 /* Maximum nodes in a socket */ 356 #define MAX_NODES_PER_SOCKET 4 357 358 /* 359 * Figure out the number of nodes required to build this config. 360 * Max cores in a node is 8 361 */ 362 static int nodes_in_socket(int nr_cores) 363 { 364 int nodes; 365 366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE); 367 368 /* Hardware does not support config with 3 nodes, return 4 in that case */ 369 return (nodes == 3) ? 4 : nodes; 370 } 371 372 /* 373 * Decide the number of cores in a core complex with the given nr_cores using 374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and 375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible 376 * L3 cache is shared across all cores in a core complex. So, this will also 377 * tell us how many cores are sharing the L3 cache. 378 */ 379 static int cores_in_core_complex(int nr_cores) 380 { 381 int nodes; 382 383 /* Check if we can fit all the cores in one core complex */ 384 if (nr_cores <= MAX_CORES_IN_CCX) { 385 return nr_cores; 386 } 387 /* Get the number of nodes required to build this config */ 388 nodes = nodes_in_socket(nr_cores); 389 390 /* 391 * Divide the cores accros all the core complexes 392 * Return rounded up value 393 */ 394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX); 395 } 396 397 /* Encode cache info for CPUID[8000001D] */ 398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs, 399 uint32_t *eax, uint32_t *ebx, 400 uint32_t *ecx, uint32_t *edx) 401 { 402 uint32_t l3_cores; 403 assert(cache->size == cache->line_size * cache->associativity * 404 cache->partitions * cache->sets); 405 406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 408 409 /* L3 is shared among multiple cores */ 410 if (cache->level == 3) { 411 l3_cores = cores_in_core_complex(cs->nr_cores); 412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14; 413 } else { 414 *eax |= ((cs->nr_threads - 1) << 14); 415 } 416 417 assert(cache->line_size > 0); 418 assert(cache->partitions > 0); 419 assert(cache->associativity > 0); 420 /* We don't implement fully-associative caches */ 421 assert(cache->associativity < cache->sets); 422 *ebx = (cache->line_size - 1) | 423 ((cache->partitions - 1) << 12) | 424 ((cache->associativity - 1) << 22); 425 426 assert(cache->sets > 0); 427 *ecx = cache->sets - 1; 428 429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 430 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 432 } 433 434 /* Data structure to hold the configuration info for a given core index */ 435 struct core_topology { 436 /* core complex id of the current core index */ 437 int ccx_id; 438 /* 439 * Adjusted core index for this core in the topology 440 * This can be 0,1,2,3 with max 4 cores in a core complex 441 */ 442 int core_id; 443 /* Node id for this core index */ 444 int node_id; 445 /* Number of nodes in this config */ 446 int num_nodes; 447 }; 448 449 /* 450 * Build the configuration closely match the EPYC hardware. Using the EPYC 451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE) 452 * right now. This could change in future. 453 * nr_cores : Total number of cores in the config 454 * core_id : Core index of the current CPU 455 * topo : Data structure to hold all the config info for this core index 456 */ 457 static void build_core_topology(int nr_cores, int core_id, 458 struct core_topology *topo) 459 { 460 int nodes, cores_in_ccx; 461 462 /* First get the number of nodes required */ 463 nodes = nodes_in_socket(nr_cores); 464 465 cores_in_ccx = cores_in_core_complex(nr_cores); 466 467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX); 468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx; 469 topo->core_id = core_id % cores_in_ccx; 470 topo->num_nodes = nodes; 471 } 472 473 /* Encode cache info for CPUID[8000001E] */ 474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu, 475 uint32_t *eax, uint32_t *ebx, 476 uint32_t *ecx, uint32_t *edx) 477 { 478 struct core_topology topo = {0}; 479 unsigned long nodes; 480 int shift; 481 482 build_core_topology(cs->nr_cores, cpu->core_id, &topo); 483 *eax = cpu->apic_id; 484 /* 485 * CPUID_Fn8000001E_EBX 486 * 31:16 Reserved 487 * 15:8 Threads per core (The number of threads per core is 488 * Threads per core + 1) 489 * 7:0 Core id (see bit decoding below) 490 * SMT: 491 * 4:3 node id 492 * 2 Core complex id 493 * 1:0 Core id 494 * Non SMT: 495 * 5:4 node id 496 * 3 Core complex id 497 * 1:0 Core id 498 */ 499 if (cs->nr_threads - 1) { 500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) | 501 (topo.ccx_id << 2) | topo.core_id; 502 } else { 503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id; 504 } 505 /* 506 * CPUID_Fn8000001E_ECX 507 * 31:11 Reserved 508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) 509 * 7:0 Node id (see bit decoding below) 510 * 2 Socket id 511 * 1:0 Node id 512 */ 513 if (topo.num_nodes <= 4) { 514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) | 515 topo.node_id; 516 } else { 517 /* 518 * Node id fix up. Actual hardware supports up to 4 nodes. But with 519 * more than 32 cores, we may end up with more than 4 nodes. 520 * Node id is a combination of socket id and node id. Only requirement 521 * here is that this number should be unique accross the system. 522 * Shift the socket id to accommodate more nodes. We dont expect both 523 * socket id and node id to be big number at the same time. This is not 524 * an ideal config but we need to to support it. Max nodes we can have 525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need 526 * 5 bits for nodes. Find the left most set bit to represent the total 527 * number of nodes. find_last_bit returns last set bit(0 based). Left 528 * shift(+1) the socket id to represent all the nodes. 529 */ 530 nodes = topo.num_nodes - 1; 531 shift = find_last_bit(&nodes, 8); 532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) | 533 topo.node_id; 534 } 535 *edx = 0; 536 } 537 538 /* 539 * Definitions of the hardcoded cache entries we expose: 540 * These are legacy cache values. If there is a need to change any 541 * of these values please use builtin_x86_defs 542 */ 543 544 /* L1 data cache: */ 545 static CPUCacheInfo legacy_l1d_cache = { 546 .type = DATA_CACHE, 547 .level = 1, 548 .size = 32 * KiB, 549 .self_init = 1, 550 .line_size = 64, 551 .associativity = 8, 552 .sets = 64, 553 .partitions = 1, 554 .no_invd_sharing = true, 555 }; 556 557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 558 static CPUCacheInfo legacy_l1d_cache_amd = { 559 .type = DATA_CACHE, 560 .level = 1, 561 .size = 64 * KiB, 562 .self_init = 1, 563 .line_size = 64, 564 .associativity = 2, 565 .sets = 512, 566 .partitions = 1, 567 .lines_per_tag = 1, 568 .no_invd_sharing = true, 569 }; 570 571 /* L1 instruction cache: */ 572 static CPUCacheInfo legacy_l1i_cache = { 573 .type = INSTRUCTION_CACHE, 574 .level = 1, 575 .size = 32 * KiB, 576 .self_init = 1, 577 .line_size = 64, 578 .associativity = 8, 579 .sets = 64, 580 .partitions = 1, 581 .no_invd_sharing = true, 582 }; 583 584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 585 static CPUCacheInfo legacy_l1i_cache_amd = { 586 .type = INSTRUCTION_CACHE, 587 .level = 1, 588 .size = 64 * KiB, 589 .self_init = 1, 590 .line_size = 64, 591 .associativity = 2, 592 .sets = 512, 593 .partitions = 1, 594 .lines_per_tag = 1, 595 .no_invd_sharing = true, 596 }; 597 598 /* Level 2 unified cache: */ 599 static CPUCacheInfo legacy_l2_cache = { 600 .type = UNIFIED_CACHE, 601 .level = 2, 602 .size = 4 * MiB, 603 .self_init = 1, 604 .line_size = 64, 605 .associativity = 16, 606 .sets = 4096, 607 .partitions = 1, 608 .no_invd_sharing = true, 609 }; 610 611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 612 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 613 .type = UNIFIED_CACHE, 614 .level = 2, 615 .size = 2 * MiB, 616 .line_size = 64, 617 .associativity = 8, 618 }; 619 620 621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 622 static CPUCacheInfo legacy_l2_cache_amd = { 623 .type = UNIFIED_CACHE, 624 .level = 2, 625 .size = 512 * KiB, 626 .line_size = 64, 627 .lines_per_tag = 1, 628 .associativity = 16, 629 .sets = 512, 630 .partitions = 1, 631 }; 632 633 /* Level 3 unified cache: */ 634 static CPUCacheInfo legacy_l3_cache = { 635 .type = UNIFIED_CACHE, 636 .level = 3, 637 .size = 16 * MiB, 638 .line_size = 64, 639 .associativity = 16, 640 .sets = 16384, 641 .partitions = 1, 642 .lines_per_tag = 1, 643 .self_init = true, 644 .inclusive = true, 645 .complex_indexing = true, 646 }; 647 648 /* TLB definitions: */ 649 650 #define L1_DTLB_2M_ASSOC 1 651 #define L1_DTLB_2M_ENTRIES 255 652 #define L1_DTLB_4K_ASSOC 1 653 #define L1_DTLB_4K_ENTRIES 255 654 655 #define L1_ITLB_2M_ASSOC 1 656 #define L1_ITLB_2M_ENTRIES 255 657 #define L1_ITLB_4K_ASSOC 1 658 #define L1_ITLB_4K_ENTRIES 255 659 660 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 662 #define L2_DTLB_4K_ASSOC 4 663 #define L2_DTLB_4K_ENTRIES 512 664 665 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 667 #define L2_ITLB_4K_ASSOC 4 668 #define L2_ITLB_4K_ENTRIES 512 669 670 /* CPUID Leaf 0x14 constants: */ 671 #define INTEL_PT_MAX_SUBLEAF 0x1 672 /* 673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 674 * MSR can be accessed; 675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 677 * of Intel PT MSRs across warm reset; 678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 679 */ 680 #define INTEL_PT_MINIMAL_EBX 0xf 681 /* 682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 684 * accessed; 685 * bit[01]: ToPA tables can hold any number of output entries, up to the 686 * maximum allowed by the MaskOrTableOffset field of 687 * IA32_RTIT_OUTPUT_MASK_PTRS; 688 * bit[02]: Support Single-Range Output scheme; 689 */ 690 #define INTEL_PT_MINIMAL_ECX 0x7 691 /* generated packets which contain IP payloads have LIP values */ 692 #define INTEL_PT_IP_LIP (1 << 31) 693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 698 699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 700 uint32_t vendor2, uint32_t vendor3) 701 { 702 int i; 703 for (i = 0; i < 4; i++) { 704 dst[i] = vendor1 >> (8 * i); 705 dst[i + 4] = vendor2 >> (8 * i); 706 dst[i + 8] = vendor3 >> (8 * i); 707 } 708 dst[CPUID_VENDOR_SZ] = '\0'; 709 } 710 711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 716 CPUID_PSE36 | CPUID_FXSR) 717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 721 CPUID_PAE | CPUID_SEP | CPUID_APIC) 722 723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 728 /* partly implemented: 729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 730 /* missing: 731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 737 CPUID_EXT_RDRAND) 738 /* missing: 739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 743 CPUID_EXT_F16C */ 744 745 #ifdef TARGET_X86_64 746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 747 #else 748 #define TCG_EXT2_X86_64_FEATURES 0 749 #endif 750 751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 754 TCG_EXT2_X86_64_FEATURES) 755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 757 #define TCG_EXT4_FEATURES 0 758 #define TCG_SVM_FEATURES CPUID_SVM_NPT 759 #define TCG_KVM_FEATURES 0 760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 764 CPUID_7_0_EBX_ERMS) 765 /* missing: 766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 768 CPUID_7_0_EBX_RDSEED */ 769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 771 CPUID_7_0_ECX_LA57) 772 #define TCG_7_0_EDX_FEATURES 0 773 #define TCG_APM_FEATURES 0 774 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 775 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 776 /* missing: 777 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 778 779 typedef enum FeatureWordType { 780 CPUID_FEATURE_WORD, 781 MSR_FEATURE_WORD, 782 } FeatureWordType; 783 784 typedef struct FeatureWordInfo { 785 FeatureWordType type; 786 /* feature flags names are taken from "Intel Processor Identification and 787 * the CPUID Instruction" and AMD's "CPUID Specification". 788 * In cases of disagreement between feature naming conventions, 789 * aliases may be added. 790 */ 791 const char *feat_names[32]; 792 union { 793 /* If type==CPUID_FEATURE_WORD */ 794 struct { 795 uint32_t eax; /* Input EAX for CPUID */ 796 bool needs_ecx; /* CPUID instruction uses ECX as input */ 797 uint32_t ecx; /* Input ECX value for CPUID */ 798 int reg; /* output register (R_* constant) */ 799 } cpuid; 800 /* If type==MSR_FEATURE_WORD */ 801 struct { 802 uint32_t index; 803 struct { /*CPUID that enumerate this MSR*/ 804 FeatureWord cpuid_class; 805 uint32_t cpuid_flag; 806 } cpuid_dep; 807 } msr; 808 }; 809 uint32_t tcg_features; /* Feature flags supported by TCG */ 810 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */ 811 uint32_t migratable_flags; /* Feature flags known to be migratable */ 812 /* Features that shouldn't be auto-enabled by "-cpu host" */ 813 uint32_t no_autoenable_flags; 814 } FeatureWordInfo; 815 816 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 817 [FEAT_1_EDX] = { 818 .type = CPUID_FEATURE_WORD, 819 .feat_names = { 820 "fpu", "vme", "de", "pse", 821 "tsc", "msr", "pae", "mce", 822 "cx8", "apic", NULL, "sep", 823 "mtrr", "pge", "mca", "cmov", 824 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 825 NULL, "ds" /* Intel dts */, "acpi", "mmx", 826 "fxsr", "sse", "sse2", "ss", 827 "ht" /* Intel htt */, "tm", "ia64", "pbe", 828 }, 829 .cpuid = {.eax = 1, .reg = R_EDX, }, 830 .tcg_features = TCG_FEATURES, 831 }, 832 [FEAT_1_ECX] = { 833 .type = CPUID_FEATURE_WORD, 834 .feat_names = { 835 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 836 "ds-cpl", "vmx", "smx", "est", 837 "tm2", "ssse3", "cid", NULL, 838 "fma", "cx16", "xtpr", "pdcm", 839 NULL, "pcid", "dca", "sse4.1", 840 "sse4.2", "x2apic", "movbe", "popcnt", 841 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 842 "avx", "f16c", "rdrand", "hypervisor", 843 }, 844 .cpuid = { .eax = 1, .reg = R_ECX, }, 845 .tcg_features = TCG_EXT_FEATURES, 846 }, 847 /* Feature names that are already defined on feature_name[] but 848 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 849 * names on feat_names below. They are copied automatically 850 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 851 */ 852 [FEAT_8000_0001_EDX] = { 853 .type = CPUID_FEATURE_WORD, 854 .feat_names = { 855 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 856 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 857 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 858 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 859 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 860 "nx", NULL, "mmxext", NULL /* mmx */, 861 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 862 NULL, "lm", "3dnowext", "3dnow", 863 }, 864 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 865 .tcg_features = TCG_EXT2_FEATURES, 866 }, 867 [FEAT_8000_0001_ECX] = { 868 .type = CPUID_FEATURE_WORD, 869 .feat_names = { 870 "lahf-lm", "cmp-legacy", "svm", "extapic", 871 "cr8legacy", "abm", "sse4a", "misalignsse", 872 "3dnowprefetch", "osvw", "ibs", "xop", 873 "skinit", "wdt", NULL, "lwp", 874 "fma4", "tce", NULL, "nodeid-msr", 875 NULL, "tbm", "topoext", "perfctr-core", 876 "perfctr-nb", NULL, NULL, NULL, 877 NULL, NULL, NULL, NULL, 878 }, 879 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 880 .tcg_features = TCG_EXT3_FEATURES, 881 /* 882 * TOPOEXT is always allowed but can't be enabled blindly by 883 * "-cpu host", as it requires consistent cache topology info 884 * to be provided so it doesn't confuse guests. 885 */ 886 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 887 }, 888 [FEAT_C000_0001_EDX] = { 889 .type = CPUID_FEATURE_WORD, 890 .feat_names = { 891 NULL, NULL, "xstore", "xstore-en", 892 NULL, NULL, "xcrypt", "xcrypt-en", 893 "ace2", "ace2-en", "phe", "phe-en", 894 "pmm", "pmm-en", NULL, NULL, 895 NULL, NULL, NULL, NULL, 896 NULL, NULL, NULL, NULL, 897 NULL, NULL, NULL, NULL, 898 NULL, NULL, NULL, NULL, 899 }, 900 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 901 .tcg_features = TCG_EXT4_FEATURES, 902 }, 903 [FEAT_KVM] = { 904 .type = CPUID_FEATURE_WORD, 905 .feat_names = { 906 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 907 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 908 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 909 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL, 910 NULL, NULL, NULL, NULL, 911 NULL, NULL, NULL, NULL, 912 "kvmclock-stable-bit", NULL, NULL, NULL, 913 NULL, NULL, NULL, NULL, 914 }, 915 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 916 .tcg_features = TCG_KVM_FEATURES, 917 }, 918 [FEAT_KVM_HINTS] = { 919 .type = CPUID_FEATURE_WORD, 920 .feat_names = { 921 "kvm-hint-dedicated", NULL, NULL, NULL, 922 NULL, NULL, NULL, NULL, 923 NULL, NULL, NULL, NULL, 924 NULL, NULL, NULL, NULL, 925 NULL, NULL, NULL, NULL, 926 NULL, NULL, NULL, NULL, 927 NULL, NULL, NULL, NULL, 928 NULL, NULL, NULL, NULL, 929 }, 930 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 931 .tcg_features = TCG_KVM_FEATURES, 932 /* 933 * KVM hints aren't auto-enabled by -cpu host, they need to be 934 * explicitly enabled in the command-line. 935 */ 936 .no_autoenable_flags = ~0U, 937 }, 938 /* 939 * .feat_names are commented out for Hyper-V enlightenments because we 940 * don't want to have two different ways for enabling them on QEMU command 941 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 942 * enabling several feature bits simultaneously, exposing these bits 943 * individually may just confuse guests. 944 */ 945 [FEAT_HYPERV_EAX] = { 946 .type = CPUID_FEATURE_WORD, 947 .feat_names = { 948 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 949 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 950 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 951 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 952 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 953 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 954 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 955 NULL, NULL, 956 NULL, NULL, NULL, NULL, 957 NULL, NULL, NULL, NULL, 958 NULL, NULL, NULL, NULL, 959 NULL, NULL, NULL, NULL, 960 }, 961 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 962 }, 963 [FEAT_HYPERV_EBX] = { 964 .type = CPUID_FEATURE_WORD, 965 .feat_names = { 966 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 967 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 968 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 969 NULL /* hv_create_port */, NULL /* hv_connect_port */, 970 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 971 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 972 NULL, NULL, 973 NULL, NULL, NULL, NULL, 974 NULL, NULL, NULL, NULL, 975 NULL, NULL, NULL, NULL, 976 NULL, NULL, NULL, NULL, 977 }, 978 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 979 }, 980 [FEAT_HYPERV_EDX] = { 981 .type = CPUID_FEATURE_WORD, 982 .feat_names = { 983 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 984 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 985 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 986 NULL, NULL, 987 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 988 NULL, NULL, NULL, NULL, 989 NULL, NULL, NULL, NULL, 990 NULL, NULL, NULL, NULL, 991 NULL, NULL, NULL, NULL, 992 NULL, NULL, NULL, NULL, 993 }, 994 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 995 }, 996 [FEAT_HV_RECOMM_EAX] = { 997 .type = CPUID_FEATURE_WORD, 998 .feat_names = { 999 NULL /* hv_recommend_pv_as_switch */, 1000 NULL /* hv_recommend_pv_tlbflush_local */, 1001 NULL /* hv_recommend_pv_tlbflush_remote */, 1002 NULL /* hv_recommend_msr_apic_access */, 1003 NULL /* hv_recommend_msr_reset */, 1004 NULL /* hv_recommend_relaxed_timing */, 1005 NULL /* hv_recommend_dma_remapping */, 1006 NULL /* hv_recommend_int_remapping */, 1007 NULL /* hv_recommend_x2apic_msrs */, 1008 NULL /* hv_recommend_autoeoi_deprecation */, 1009 NULL /* hv_recommend_pv_ipi */, 1010 NULL /* hv_recommend_ex_hypercalls */, 1011 NULL /* hv_hypervisor_is_nested */, 1012 NULL /* hv_recommend_int_mbec */, 1013 NULL /* hv_recommend_evmcs */, 1014 NULL, 1015 NULL, NULL, NULL, NULL, 1016 NULL, NULL, NULL, NULL, 1017 NULL, NULL, NULL, NULL, 1018 NULL, NULL, NULL, NULL, 1019 }, 1020 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 1021 }, 1022 [FEAT_HV_NESTED_EAX] = { 1023 .type = CPUID_FEATURE_WORD, 1024 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 1025 }, 1026 [FEAT_SVM] = { 1027 .type = CPUID_FEATURE_WORD, 1028 .feat_names = { 1029 "npt", "lbrv", "svm-lock", "nrip-save", 1030 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 1031 NULL, NULL, "pause-filter", NULL, 1032 "pfthreshold", NULL, NULL, NULL, 1033 NULL, NULL, NULL, NULL, 1034 NULL, NULL, NULL, NULL, 1035 NULL, NULL, NULL, NULL, 1036 NULL, NULL, NULL, NULL, 1037 }, 1038 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 1039 .tcg_features = TCG_SVM_FEATURES, 1040 }, 1041 [FEAT_7_0_EBX] = { 1042 .type = CPUID_FEATURE_WORD, 1043 .feat_names = { 1044 "fsgsbase", "tsc-adjust", NULL, "bmi1", 1045 "hle", "avx2", NULL, "smep", 1046 "bmi2", "erms", "invpcid", "rtm", 1047 NULL, NULL, "mpx", NULL, 1048 "avx512f", "avx512dq", "rdseed", "adx", 1049 "smap", "avx512ifma", "pcommit", "clflushopt", 1050 "clwb", "intel-pt", "avx512pf", "avx512er", 1051 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 1052 }, 1053 .cpuid = { 1054 .eax = 7, 1055 .needs_ecx = true, .ecx = 0, 1056 .reg = R_EBX, 1057 }, 1058 .tcg_features = TCG_7_0_EBX_FEATURES, 1059 }, 1060 [FEAT_7_0_ECX] = { 1061 .type = CPUID_FEATURE_WORD, 1062 .feat_names = { 1063 NULL, "avx512vbmi", "umip", "pku", 1064 NULL /* ospke */, NULL, "avx512vbmi2", NULL, 1065 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 1066 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 1067 "la57", NULL, NULL, NULL, 1068 NULL, NULL, "rdpid", NULL, 1069 NULL, "cldemote", NULL, "movdiri", 1070 "movdir64b", NULL, NULL, NULL, 1071 }, 1072 .cpuid = { 1073 .eax = 7, 1074 .needs_ecx = true, .ecx = 0, 1075 .reg = R_ECX, 1076 }, 1077 .tcg_features = TCG_7_0_ECX_FEATURES, 1078 }, 1079 [FEAT_7_0_EDX] = { 1080 .type = CPUID_FEATURE_WORD, 1081 .feat_names = { 1082 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 1083 NULL, NULL, NULL, NULL, 1084 NULL, NULL, "md-clear", NULL, 1085 NULL, NULL, NULL, NULL, 1086 NULL, NULL, NULL /* pconfig */, NULL, 1087 NULL, NULL, NULL, NULL, 1088 NULL, NULL, "spec-ctrl", "stibp", 1089 NULL, "arch-capabilities", "core-capability", "ssbd", 1090 }, 1091 .cpuid = { 1092 .eax = 7, 1093 .needs_ecx = true, .ecx = 0, 1094 .reg = R_EDX, 1095 }, 1096 .tcg_features = TCG_7_0_EDX_FEATURES, 1097 }, 1098 [FEAT_8000_0007_EDX] = { 1099 .type = CPUID_FEATURE_WORD, 1100 .feat_names = { 1101 NULL, NULL, NULL, NULL, 1102 NULL, NULL, NULL, NULL, 1103 "invtsc", NULL, NULL, NULL, 1104 NULL, NULL, NULL, NULL, 1105 NULL, NULL, NULL, NULL, 1106 NULL, NULL, NULL, NULL, 1107 NULL, NULL, NULL, NULL, 1108 NULL, NULL, NULL, NULL, 1109 }, 1110 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1111 .tcg_features = TCG_APM_FEATURES, 1112 .unmigratable_flags = CPUID_APM_INVTSC, 1113 }, 1114 [FEAT_8000_0008_EBX] = { 1115 .type = CPUID_FEATURE_WORD, 1116 .feat_names = { 1117 NULL, NULL, NULL, NULL, 1118 NULL, NULL, NULL, NULL, 1119 NULL, "wbnoinvd", NULL, NULL, 1120 "ibpb", NULL, NULL, NULL, 1121 NULL, NULL, NULL, NULL, 1122 NULL, NULL, NULL, NULL, 1123 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1124 NULL, NULL, NULL, NULL, 1125 }, 1126 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1127 .tcg_features = 0, 1128 .unmigratable_flags = 0, 1129 }, 1130 [FEAT_XSAVE] = { 1131 .type = CPUID_FEATURE_WORD, 1132 .feat_names = { 1133 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1134 NULL, NULL, NULL, NULL, 1135 NULL, NULL, NULL, NULL, 1136 NULL, NULL, NULL, NULL, 1137 NULL, NULL, NULL, NULL, 1138 NULL, NULL, NULL, NULL, 1139 NULL, NULL, NULL, NULL, 1140 NULL, NULL, NULL, NULL, 1141 }, 1142 .cpuid = { 1143 .eax = 0xd, 1144 .needs_ecx = true, .ecx = 1, 1145 .reg = R_EAX, 1146 }, 1147 .tcg_features = TCG_XSAVE_FEATURES, 1148 }, 1149 [FEAT_6_EAX] = { 1150 .type = CPUID_FEATURE_WORD, 1151 .feat_names = { 1152 NULL, NULL, "arat", NULL, 1153 NULL, NULL, NULL, NULL, 1154 NULL, NULL, NULL, NULL, 1155 NULL, NULL, NULL, NULL, 1156 NULL, NULL, NULL, NULL, 1157 NULL, NULL, NULL, NULL, 1158 NULL, NULL, NULL, NULL, 1159 NULL, NULL, NULL, NULL, 1160 }, 1161 .cpuid = { .eax = 6, .reg = R_EAX, }, 1162 .tcg_features = TCG_6_EAX_FEATURES, 1163 }, 1164 [FEAT_XSAVE_COMP_LO] = { 1165 .type = CPUID_FEATURE_WORD, 1166 .cpuid = { 1167 .eax = 0xD, 1168 .needs_ecx = true, .ecx = 0, 1169 .reg = R_EAX, 1170 }, 1171 .tcg_features = ~0U, 1172 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1173 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1174 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1175 XSTATE_PKRU_MASK, 1176 }, 1177 [FEAT_XSAVE_COMP_HI] = { 1178 .type = CPUID_FEATURE_WORD, 1179 .cpuid = { 1180 .eax = 0xD, 1181 .needs_ecx = true, .ecx = 0, 1182 .reg = R_EDX, 1183 }, 1184 .tcg_features = ~0U, 1185 }, 1186 /*Below are MSR exposed features*/ 1187 [FEAT_ARCH_CAPABILITIES] = { 1188 .type = MSR_FEATURE_WORD, 1189 .feat_names = { 1190 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1191 "ssb-no", "mds-no", NULL, NULL, 1192 NULL, NULL, NULL, NULL, 1193 NULL, NULL, NULL, NULL, 1194 NULL, NULL, NULL, NULL, 1195 NULL, NULL, NULL, NULL, 1196 NULL, NULL, NULL, NULL, 1197 NULL, NULL, NULL, NULL, 1198 }, 1199 .msr = { 1200 .index = MSR_IA32_ARCH_CAPABILITIES, 1201 .cpuid_dep = { 1202 FEAT_7_0_EDX, 1203 CPUID_7_0_EDX_ARCH_CAPABILITIES 1204 } 1205 }, 1206 }, 1207 [FEAT_CORE_CAPABILITY] = { 1208 .type = MSR_FEATURE_WORD, 1209 .feat_names = { 1210 NULL, NULL, NULL, NULL, 1211 NULL, "split-lock-detect", NULL, NULL, 1212 NULL, NULL, NULL, NULL, 1213 NULL, NULL, NULL, NULL, 1214 NULL, NULL, NULL, NULL, 1215 NULL, NULL, NULL, NULL, 1216 NULL, NULL, NULL, NULL, 1217 NULL, NULL, NULL, NULL, 1218 }, 1219 .msr = { 1220 .index = MSR_IA32_CORE_CAPABILITY, 1221 .cpuid_dep = { 1222 FEAT_7_0_EDX, 1223 CPUID_7_0_EDX_CORE_CAPABILITY, 1224 }, 1225 }, 1226 }, 1227 }; 1228 1229 typedef struct X86RegisterInfo32 { 1230 /* Name of register */ 1231 const char *name; 1232 /* QAPI enum value register */ 1233 X86CPURegister32 qapi_enum; 1234 } X86RegisterInfo32; 1235 1236 #define REGISTER(reg) \ 1237 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1238 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1239 REGISTER(EAX), 1240 REGISTER(ECX), 1241 REGISTER(EDX), 1242 REGISTER(EBX), 1243 REGISTER(ESP), 1244 REGISTER(EBP), 1245 REGISTER(ESI), 1246 REGISTER(EDI), 1247 }; 1248 #undef REGISTER 1249 1250 typedef struct ExtSaveArea { 1251 uint32_t feature, bits; 1252 uint32_t offset, size; 1253 } ExtSaveArea; 1254 1255 static const ExtSaveArea x86_ext_save_areas[] = { 1256 [XSTATE_FP_BIT] = { 1257 /* x87 FP state component is always enabled if XSAVE is supported */ 1258 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1259 /* x87 state is in the legacy region of the XSAVE area */ 1260 .offset = 0, 1261 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1262 }, 1263 [XSTATE_SSE_BIT] = { 1264 /* SSE state component is always enabled if XSAVE is supported */ 1265 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1266 /* SSE state is in the legacy region of the XSAVE area */ 1267 .offset = 0, 1268 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1269 }, 1270 [XSTATE_YMM_BIT] = 1271 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1272 .offset = offsetof(X86XSaveArea, avx_state), 1273 .size = sizeof(XSaveAVX) }, 1274 [XSTATE_BNDREGS_BIT] = 1275 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1276 .offset = offsetof(X86XSaveArea, bndreg_state), 1277 .size = sizeof(XSaveBNDREG) }, 1278 [XSTATE_BNDCSR_BIT] = 1279 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1280 .offset = offsetof(X86XSaveArea, bndcsr_state), 1281 .size = sizeof(XSaveBNDCSR) }, 1282 [XSTATE_OPMASK_BIT] = 1283 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1284 .offset = offsetof(X86XSaveArea, opmask_state), 1285 .size = sizeof(XSaveOpmask) }, 1286 [XSTATE_ZMM_Hi256_BIT] = 1287 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1288 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1289 .size = sizeof(XSaveZMM_Hi256) }, 1290 [XSTATE_Hi16_ZMM_BIT] = 1291 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1292 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1293 .size = sizeof(XSaveHi16_ZMM) }, 1294 [XSTATE_PKRU_BIT] = 1295 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1296 .offset = offsetof(X86XSaveArea, pkru_state), 1297 .size = sizeof(XSavePKRU) }, 1298 }; 1299 1300 static uint32_t xsave_area_size(uint64_t mask) 1301 { 1302 int i; 1303 uint64_t ret = 0; 1304 1305 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1306 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1307 if ((mask >> i) & 1) { 1308 ret = MAX(ret, esa->offset + esa->size); 1309 } 1310 } 1311 return ret; 1312 } 1313 1314 static inline bool accel_uses_host_cpuid(void) 1315 { 1316 return kvm_enabled() || hvf_enabled(); 1317 } 1318 1319 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1320 { 1321 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1322 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1323 } 1324 1325 const char *get_register_name_32(unsigned int reg) 1326 { 1327 if (reg >= CPU_NB_REGS32) { 1328 return NULL; 1329 } 1330 return x86_reg_info_32[reg].name; 1331 } 1332 1333 /* 1334 * Returns the set of feature flags that are supported and migratable by 1335 * QEMU, for a given FeatureWord. 1336 */ 1337 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w) 1338 { 1339 FeatureWordInfo *wi = &feature_word_info[w]; 1340 uint32_t r = 0; 1341 int i; 1342 1343 for (i = 0; i < 32; i++) { 1344 uint32_t f = 1U << i; 1345 1346 /* If the feature name is known, it is implicitly considered migratable, 1347 * unless it is explicitly set in unmigratable_flags */ 1348 if ((wi->migratable_flags & f) || 1349 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1350 r |= f; 1351 } 1352 } 1353 return r; 1354 } 1355 1356 void host_cpuid(uint32_t function, uint32_t count, 1357 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1358 { 1359 uint32_t vec[4]; 1360 1361 #ifdef __x86_64__ 1362 asm volatile("cpuid" 1363 : "=a"(vec[0]), "=b"(vec[1]), 1364 "=c"(vec[2]), "=d"(vec[3]) 1365 : "0"(function), "c"(count) : "cc"); 1366 #elif defined(__i386__) 1367 asm volatile("pusha \n\t" 1368 "cpuid \n\t" 1369 "mov %%eax, 0(%2) \n\t" 1370 "mov %%ebx, 4(%2) \n\t" 1371 "mov %%ecx, 8(%2) \n\t" 1372 "mov %%edx, 12(%2) \n\t" 1373 "popa" 1374 : : "a"(function), "c"(count), "S"(vec) 1375 : "memory", "cc"); 1376 #else 1377 abort(); 1378 #endif 1379 1380 if (eax) 1381 *eax = vec[0]; 1382 if (ebx) 1383 *ebx = vec[1]; 1384 if (ecx) 1385 *ecx = vec[2]; 1386 if (edx) 1387 *edx = vec[3]; 1388 } 1389 1390 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1391 { 1392 uint32_t eax, ebx, ecx, edx; 1393 1394 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1395 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1396 1397 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1398 if (family) { 1399 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1400 } 1401 if (model) { 1402 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1403 } 1404 if (stepping) { 1405 *stepping = eax & 0x0F; 1406 } 1407 } 1408 1409 /* CPU class name definitions: */ 1410 1411 /* Return type name for a given CPU model name 1412 * Caller is responsible for freeing the returned string. 1413 */ 1414 static char *x86_cpu_type_name(const char *model_name) 1415 { 1416 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1417 } 1418 1419 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1420 { 1421 ObjectClass *oc; 1422 char *typename = x86_cpu_type_name(cpu_model); 1423 oc = object_class_by_name(typename); 1424 g_free(typename); 1425 return oc; 1426 } 1427 1428 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1429 { 1430 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1431 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1432 return g_strndup(class_name, 1433 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1434 } 1435 1436 typedef struct PropValue { 1437 const char *prop, *value; 1438 } PropValue; 1439 1440 typedef struct X86CPUVersionDefinition { 1441 X86CPUVersion version; 1442 const char *alias; 1443 PropValue *props; 1444 } X86CPUVersionDefinition; 1445 1446 /* Base definition for a CPU model */ 1447 typedef struct X86CPUDefinition { 1448 const char *name; 1449 uint32_t level; 1450 uint32_t xlevel; 1451 /* vendor is zero-terminated, 12 character ASCII string */ 1452 char vendor[CPUID_VENDOR_SZ + 1]; 1453 int family; 1454 int model; 1455 int stepping; 1456 FeatureWordArray features; 1457 const char *model_id; 1458 CPUCaches *cache_info; 1459 /* 1460 * Definitions for alternative versions of CPU model. 1461 * List is terminated by item with version == 0. 1462 * If NULL, version 1 will be registered automatically. 1463 */ 1464 const X86CPUVersionDefinition *versions; 1465 } X86CPUDefinition; 1466 1467 /* Reference to a specific CPU model version */ 1468 struct X86CPUModel { 1469 /* Base CPU definition */ 1470 X86CPUDefinition *cpudef; 1471 /* CPU model version */ 1472 X86CPUVersion version; 1473 /* 1474 * If true, this is an alias CPU model. 1475 * This matters only for "-cpu help" and query-cpu-definitions 1476 */ 1477 bool is_alias; 1478 }; 1479 1480 /* Get full model name for CPU version */ 1481 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, 1482 X86CPUVersion version) 1483 { 1484 assert(version > 0); 1485 return g_strdup_printf("%s-v%d", cpudef->name, (int)version); 1486 } 1487 1488 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) 1489 { 1490 /* When X86CPUDefinition::versions is NULL, we register only v1 */ 1491 static const X86CPUVersionDefinition default_version_list[] = { 1492 { 1 }, 1493 { /* end of list */ } 1494 }; 1495 1496 return def->versions ?: default_version_list; 1497 } 1498 1499 static CPUCaches epyc_cache_info = { 1500 .l1d_cache = &(CPUCacheInfo) { 1501 .type = DATA_CACHE, 1502 .level = 1, 1503 .size = 32 * KiB, 1504 .line_size = 64, 1505 .associativity = 8, 1506 .partitions = 1, 1507 .sets = 64, 1508 .lines_per_tag = 1, 1509 .self_init = 1, 1510 .no_invd_sharing = true, 1511 }, 1512 .l1i_cache = &(CPUCacheInfo) { 1513 .type = INSTRUCTION_CACHE, 1514 .level = 1, 1515 .size = 64 * KiB, 1516 .line_size = 64, 1517 .associativity = 4, 1518 .partitions = 1, 1519 .sets = 256, 1520 .lines_per_tag = 1, 1521 .self_init = 1, 1522 .no_invd_sharing = true, 1523 }, 1524 .l2_cache = &(CPUCacheInfo) { 1525 .type = UNIFIED_CACHE, 1526 .level = 2, 1527 .size = 512 * KiB, 1528 .line_size = 64, 1529 .associativity = 8, 1530 .partitions = 1, 1531 .sets = 1024, 1532 .lines_per_tag = 1, 1533 }, 1534 .l3_cache = &(CPUCacheInfo) { 1535 .type = UNIFIED_CACHE, 1536 .level = 3, 1537 .size = 8 * MiB, 1538 .line_size = 64, 1539 .associativity = 16, 1540 .partitions = 1, 1541 .sets = 8192, 1542 .lines_per_tag = 1, 1543 .self_init = true, 1544 .inclusive = true, 1545 .complex_indexing = true, 1546 }, 1547 }; 1548 1549 static X86CPUDefinition builtin_x86_defs[] = { 1550 { 1551 .name = "qemu64", 1552 .level = 0xd, 1553 .vendor = CPUID_VENDOR_AMD, 1554 .family = 6, 1555 .model = 6, 1556 .stepping = 3, 1557 .features[FEAT_1_EDX] = 1558 PPRO_FEATURES | 1559 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1560 CPUID_PSE36, 1561 .features[FEAT_1_ECX] = 1562 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1563 .features[FEAT_8000_0001_EDX] = 1564 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1565 .features[FEAT_8000_0001_ECX] = 1566 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1567 .xlevel = 0x8000000A, 1568 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1569 }, 1570 { 1571 .name = "phenom", 1572 .level = 5, 1573 .vendor = CPUID_VENDOR_AMD, 1574 .family = 16, 1575 .model = 2, 1576 .stepping = 3, 1577 /* Missing: CPUID_HT */ 1578 .features[FEAT_1_EDX] = 1579 PPRO_FEATURES | 1580 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1581 CPUID_PSE36 | CPUID_VME, 1582 .features[FEAT_1_ECX] = 1583 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1584 CPUID_EXT_POPCNT, 1585 .features[FEAT_8000_0001_EDX] = 1586 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1587 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1588 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1589 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1590 CPUID_EXT3_CR8LEG, 1591 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1592 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1593 .features[FEAT_8000_0001_ECX] = 1594 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1595 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1596 /* Missing: CPUID_SVM_LBRV */ 1597 .features[FEAT_SVM] = 1598 CPUID_SVM_NPT, 1599 .xlevel = 0x8000001A, 1600 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1601 }, 1602 { 1603 .name = "core2duo", 1604 .level = 10, 1605 .vendor = CPUID_VENDOR_INTEL, 1606 .family = 6, 1607 .model = 15, 1608 .stepping = 11, 1609 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1610 .features[FEAT_1_EDX] = 1611 PPRO_FEATURES | 1612 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1613 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1614 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1615 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1616 .features[FEAT_1_ECX] = 1617 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1618 CPUID_EXT_CX16, 1619 .features[FEAT_8000_0001_EDX] = 1620 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1621 .features[FEAT_8000_0001_ECX] = 1622 CPUID_EXT3_LAHF_LM, 1623 .xlevel = 0x80000008, 1624 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1625 }, 1626 { 1627 .name = "kvm64", 1628 .level = 0xd, 1629 .vendor = CPUID_VENDOR_INTEL, 1630 .family = 15, 1631 .model = 6, 1632 .stepping = 1, 1633 /* Missing: CPUID_HT */ 1634 .features[FEAT_1_EDX] = 1635 PPRO_FEATURES | CPUID_VME | 1636 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1637 CPUID_PSE36, 1638 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1639 .features[FEAT_1_ECX] = 1640 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1641 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1642 .features[FEAT_8000_0001_EDX] = 1643 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1644 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1645 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1646 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1647 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1648 .features[FEAT_8000_0001_ECX] = 1649 0, 1650 .xlevel = 0x80000008, 1651 .model_id = "Common KVM processor" 1652 }, 1653 { 1654 .name = "qemu32", 1655 .level = 4, 1656 .vendor = CPUID_VENDOR_INTEL, 1657 .family = 6, 1658 .model = 6, 1659 .stepping = 3, 1660 .features[FEAT_1_EDX] = 1661 PPRO_FEATURES, 1662 .features[FEAT_1_ECX] = 1663 CPUID_EXT_SSE3, 1664 .xlevel = 0x80000004, 1665 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1666 }, 1667 { 1668 .name = "kvm32", 1669 .level = 5, 1670 .vendor = CPUID_VENDOR_INTEL, 1671 .family = 15, 1672 .model = 6, 1673 .stepping = 1, 1674 .features[FEAT_1_EDX] = 1675 PPRO_FEATURES | CPUID_VME | 1676 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1677 .features[FEAT_1_ECX] = 1678 CPUID_EXT_SSE3, 1679 .features[FEAT_8000_0001_ECX] = 1680 0, 1681 .xlevel = 0x80000008, 1682 .model_id = "Common 32-bit KVM processor" 1683 }, 1684 { 1685 .name = "coreduo", 1686 .level = 10, 1687 .vendor = CPUID_VENDOR_INTEL, 1688 .family = 6, 1689 .model = 14, 1690 .stepping = 8, 1691 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1692 .features[FEAT_1_EDX] = 1693 PPRO_FEATURES | CPUID_VME | 1694 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 1695 CPUID_SS, 1696 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 1697 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1698 .features[FEAT_1_ECX] = 1699 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 1700 .features[FEAT_8000_0001_EDX] = 1701 CPUID_EXT2_NX, 1702 .xlevel = 0x80000008, 1703 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 1704 }, 1705 { 1706 .name = "486", 1707 .level = 1, 1708 .vendor = CPUID_VENDOR_INTEL, 1709 .family = 4, 1710 .model = 8, 1711 .stepping = 0, 1712 .features[FEAT_1_EDX] = 1713 I486_FEATURES, 1714 .xlevel = 0, 1715 .model_id = "", 1716 }, 1717 { 1718 .name = "pentium", 1719 .level = 1, 1720 .vendor = CPUID_VENDOR_INTEL, 1721 .family = 5, 1722 .model = 4, 1723 .stepping = 3, 1724 .features[FEAT_1_EDX] = 1725 PENTIUM_FEATURES, 1726 .xlevel = 0, 1727 .model_id = "", 1728 }, 1729 { 1730 .name = "pentium2", 1731 .level = 2, 1732 .vendor = CPUID_VENDOR_INTEL, 1733 .family = 6, 1734 .model = 5, 1735 .stepping = 2, 1736 .features[FEAT_1_EDX] = 1737 PENTIUM2_FEATURES, 1738 .xlevel = 0, 1739 .model_id = "", 1740 }, 1741 { 1742 .name = "pentium3", 1743 .level = 3, 1744 .vendor = CPUID_VENDOR_INTEL, 1745 .family = 6, 1746 .model = 7, 1747 .stepping = 3, 1748 .features[FEAT_1_EDX] = 1749 PENTIUM3_FEATURES, 1750 .xlevel = 0, 1751 .model_id = "", 1752 }, 1753 { 1754 .name = "athlon", 1755 .level = 2, 1756 .vendor = CPUID_VENDOR_AMD, 1757 .family = 6, 1758 .model = 2, 1759 .stepping = 3, 1760 .features[FEAT_1_EDX] = 1761 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 1762 CPUID_MCA, 1763 .features[FEAT_8000_0001_EDX] = 1764 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 1765 .xlevel = 0x80000008, 1766 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1767 }, 1768 { 1769 .name = "n270", 1770 .level = 10, 1771 .vendor = CPUID_VENDOR_INTEL, 1772 .family = 6, 1773 .model = 28, 1774 .stepping = 2, 1775 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1776 .features[FEAT_1_EDX] = 1777 PPRO_FEATURES | 1778 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 1779 CPUID_ACPI | CPUID_SS, 1780 /* Some CPUs got no CPUID_SEP */ 1781 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 1782 * CPUID_EXT_XTPR */ 1783 .features[FEAT_1_ECX] = 1784 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1785 CPUID_EXT_MOVBE, 1786 .features[FEAT_8000_0001_EDX] = 1787 CPUID_EXT2_NX, 1788 .features[FEAT_8000_0001_ECX] = 1789 CPUID_EXT3_LAHF_LM, 1790 .xlevel = 0x80000008, 1791 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 1792 }, 1793 { 1794 .name = "Conroe", 1795 .level = 10, 1796 .vendor = CPUID_VENDOR_INTEL, 1797 .family = 6, 1798 .model = 15, 1799 .stepping = 3, 1800 .features[FEAT_1_EDX] = 1801 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1802 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1803 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1804 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1805 CPUID_DE | CPUID_FP87, 1806 .features[FEAT_1_ECX] = 1807 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1808 .features[FEAT_8000_0001_EDX] = 1809 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1810 .features[FEAT_8000_0001_ECX] = 1811 CPUID_EXT3_LAHF_LM, 1812 .xlevel = 0x80000008, 1813 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 1814 }, 1815 { 1816 .name = "Penryn", 1817 .level = 10, 1818 .vendor = CPUID_VENDOR_INTEL, 1819 .family = 6, 1820 .model = 23, 1821 .stepping = 3, 1822 .features[FEAT_1_EDX] = 1823 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1824 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1825 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1826 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1827 CPUID_DE | CPUID_FP87, 1828 .features[FEAT_1_ECX] = 1829 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1830 CPUID_EXT_SSE3, 1831 .features[FEAT_8000_0001_EDX] = 1832 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1833 .features[FEAT_8000_0001_ECX] = 1834 CPUID_EXT3_LAHF_LM, 1835 .xlevel = 0x80000008, 1836 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 1837 }, 1838 { 1839 .name = "Nehalem", 1840 .level = 11, 1841 .vendor = CPUID_VENDOR_INTEL, 1842 .family = 6, 1843 .model = 26, 1844 .stepping = 3, 1845 .features[FEAT_1_EDX] = 1846 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1847 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1848 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1849 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1850 CPUID_DE | CPUID_FP87, 1851 .features[FEAT_1_ECX] = 1852 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1853 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1854 .features[FEAT_8000_0001_EDX] = 1855 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1856 .features[FEAT_8000_0001_ECX] = 1857 CPUID_EXT3_LAHF_LM, 1858 .xlevel = 0x80000008, 1859 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 1860 .versions = (X86CPUVersionDefinition[]) { 1861 { .version = 1 }, 1862 { 1863 .version = 2, 1864 .alias = "Nehalem-IBRS", 1865 .props = (PropValue[]) { 1866 { "spec-ctrl", "on" }, 1867 { "model-id", 1868 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, 1869 { /* end of list */ } 1870 } 1871 }, 1872 { /* end of list */ } 1873 } 1874 }, 1875 { 1876 .name = "Westmere", 1877 .level = 11, 1878 .vendor = CPUID_VENDOR_INTEL, 1879 .family = 6, 1880 .model = 44, 1881 .stepping = 1, 1882 .features[FEAT_1_EDX] = 1883 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1884 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1885 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1886 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1887 CPUID_DE | CPUID_FP87, 1888 .features[FEAT_1_ECX] = 1889 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1890 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1891 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1892 .features[FEAT_8000_0001_EDX] = 1893 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1894 .features[FEAT_8000_0001_ECX] = 1895 CPUID_EXT3_LAHF_LM, 1896 .features[FEAT_6_EAX] = 1897 CPUID_6_EAX_ARAT, 1898 .xlevel = 0x80000008, 1899 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 1900 .versions = (X86CPUVersionDefinition[]) { 1901 { .version = 1 }, 1902 { 1903 .version = 2, 1904 .alias = "Westmere-IBRS", 1905 .props = (PropValue[]) { 1906 { "spec-ctrl", "on" }, 1907 { "model-id", 1908 "Westmere E56xx/L56xx/X56xx (IBRS update)" }, 1909 { /* end of list */ } 1910 } 1911 }, 1912 { /* end of list */ } 1913 } 1914 }, 1915 { 1916 .name = "SandyBridge", 1917 .level = 0xd, 1918 .vendor = CPUID_VENDOR_INTEL, 1919 .family = 6, 1920 .model = 42, 1921 .stepping = 1, 1922 .features[FEAT_1_EDX] = 1923 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1924 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1925 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1926 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1927 CPUID_DE | CPUID_FP87, 1928 .features[FEAT_1_ECX] = 1929 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1930 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1931 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1932 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1933 CPUID_EXT_SSE3, 1934 .features[FEAT_8000_0001_EDX] = 1935 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1936 CPUID_EXT2_SYSCALL, 1937 .features[FEAT_8000_0001_ECX] = 1938 CPUID_EXT3_LAHF_LM, 1939 .features[FEAT_XSAVE] = 1940 CPUID_XSAVE_XSAVEOPT, 1941 .features[FEAT_6_EAX] = 1942 CPUID_6_EAX_ARAT, 1943 .xlevel = 0x80000008, 1944 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 1945 .versions = (X86CPUVersionDefinition[]) { 1946 { .version = 1 }, 1947 { 1948 .version = 2, 1949 .alias = "SandyBridge-IBRS", 1950 .props = (PropValue[]) { 1951 { "spec-ctrl", "on" }, 1952 { "model-id", 1953 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, 1954 { /* end of list */ } 1955 } 1956 }, 1957 { /* end of list */ } 1958 } 1959 }, 1960 { 1961 .name = "IvyBridge", 1962 .level = 0xd, 1963 .vendor = CPUID_VENDOR_INTEL, 1964 .family = 6, 1965 .model = 58, 1966 .stepping = 9, 1967 .features[FEAT_1_EDX] = 1968 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1969 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1970 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1971 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1972 CPUID_DE | CPUID_FP87, 1973 .features[FEAT_1_ECX] = 1974 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1975 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1976 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1977 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1978 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1979 .features[FEAT_7_0_EBX] = 1980 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1981 CPUID_7_0_EBX_ERMS, 1982 .features[FEAT_8000_0001_EDX] = 1983 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1984 CPUID_EXT2_SYSCALL, 1985 .features[FEAT_8000_0001_ECX] = 1986 CPUID_EXT3_LAHF_LM, 1987 .features[FEAT_XSAVE] = 1988 CPUID_XSAVE_XSAVEOPT, 1989 .features[FEAT_6_EAX] = 1990 CPUID_6_EAX_ARAT, 1991 .xlevel = 0x80000008, 1992 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 1993 .versions = (X86CPUVersionDefinition[]) { 1994 { .version = 1 }, 1995 { 1996 .version = 2, 1997 .alias = "IvyBridge-IBRS", 1998 .props = (PropValue[]) { 1999 { "spec-ctrl", "on" }, 2000 { "model-id", 2001 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, 2002 { /* end of list */ } 2003 } 2004 }, 2005 { /* end of list */ } 2006 } 2007 }, 2008 { 2009 .name = "Haswell", 2010 .level = 0xd, 2011 .vendor = CPUID_VENDOR_INTEL, 2012 .family = 6, 2013 .model = 60, 2014 .stepping = 4, 2015 .features[FEAT_1_EDX] = 2016 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2017 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2018 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2019 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2020 CPUID_DE | CPUID_FP87, 2021 .features[FEAT_1_ECX] = 2022 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2023 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2024 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2025 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2026 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2027 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2028 .features[FEAT_8000_0001_EDX] = 2029 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2030 CPUID_EXT2_SYSCALL, 2031 .features[FEAT_8000_0001_ECX] = 2032 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2033 .features[FEAT_7_0_EBX] = 2034 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2035 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2036 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2037 CPUID_7_0_EBX_RTM, 2038 .features[FEAT_XSAVE] = 2039 CPUID_XSAVE_XSAVEOPT, 2040 .features[FEAT_6_EAX] = 2041 CPUID_6_EAX_ARAT, 2042 .xlevel = 0x80000008, 2043 .model_id = "Intel Core Processor (Haswell)", 2044 .versions = (X86CPUVersionDefinition[]) { 2045 { .version = 1 }, 2046 { 2047 .version = 2, 2048 .alias = "Haswell-noTSX", 2049 .props = (PropValue[]) { 2050 { "hle", "off" }, 2051 { "rtm", "off" }, 2052 { "stepping", "1" }, 2053 { "model-id", "Intel Core Processor (Haswell, no TSX)", }, 2054 { /* end of list */ } 2055 }, 2056 }, 2057 { 2058 .version = 3, 2059 .alias = "Haswell-IBRS", 2060 .props = (PropValue[]) { 2061 /* Restore TSX features removed by -v2 above */ 2062 { "hle", "on" }, 2063 { "rtm", "on" }, 2064 /* 2065 * Haswell and Haswell-IBRS had stepping=4 in 2066 * QEMU 4.0 and older 2067 */ 2068 { "stepping", "4" }, 2069 { "spec-ctrl", "on" }, 2070 { "model-id", 2071 "Intel Core Processor (Haswell, IBRS)" }, 2072 { /* end of list */ } 2073 } 2074 }, 2075 { 2076 .version = 4, 2077 .alias = "Haswell-noTSX-IBRS", 2078 .props = (PropValue[]) { 2079 { "hle", "off" }, 2080 { "rtm", "off" }, 2081 /* spec-ctrl was already enabled by -v3 above */ 2082 { "stepping", "1" }, 2083 { "model-id", 2084 "Intel Core Processor (Haswell, no TSX, IBRS)" }, 2085 { /* end of list */ } 2086 } 2087 }, 2088 { /* end of list */ } 2089 } 2090 }, 2091 { 2092 .name = "Broadwell", 2093 .level = 0xd, 2094 .vendor = CPUID_VENDOR_INTEL, 2095 .family = 6, 2096 .model = 61, 2097 .stepping = 2, 2098 .features[FEAT_1_EDX] = 2099 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2100 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2101 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2102 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2103 CPUID_DE | CPUID_FP87, 2104 .features[FEAT_1_ECX] = 2105 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2106 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2107 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2108 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2109 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2110 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2111 .features[FEAT_8000_0001_EDX] = 2112 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2113 CPUID_EXT2_SYSCALL, 2114 .features[FEAT_8000_0001_ECX] = 2115 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2116 .features[FEAT_7_0_EBX] = 2117 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2118 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2119 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2120 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2121 CPUID_7_0_EBX_SMAP, 2122 .features[FEAT_XSAVE] = 2123 CPUID_XSAVE_XSAVEOPT, 2124 .features[FEAT_6_EAX] = 2125 CPUID_6_EAX_ARAT, 2126 .xlevel = 0x80000008, 2127 .model_id = "Intel Core Processor (Broadwell)", 2128 .versions = (X86CPUVersionDefinition[]) { 2129 { .version = 1 }, 2130 { 2131 .version = 2, 2132 .alias = "Broadwell-noTSX", 2133 .props = (PropValue[]) { 2134 { "hle", "off" }, 2135 { "rtm", "off" }, 2136 { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, 2137 { /* end of list */ } 2138 }, 2139 }, 2140 { 2141 .version = 3, 2142 .alias = "Broadwell-IBRS", 2143 .props = (PropValue[]) { 2144 /* Restore TSX features removed by -v2 above */ 2145 { "hle", "on" }, 2146 { "rtm", "on" }, 2147 { "spec-ctrl", "on" }, 2148 { "model-id", 2149 "Intel Core Processor (Broadwell, IBRS)" }, 2150 { /* end of list */ } 2151 } 2152 }, 2153 { 2154 .version = 4, 2155 .alias = "Broadwell-noTSX-IBRS", 2156 .props = (PropValue[]) { 2157 { "hle", "off" }, 2158 { "rtm", "off" }, 2159 /* spec-ctrl was already enabled by -v3 above */ 2160 { "model-id", 2161 "Intel Core Processor (Broadwell, no TSX, IBRS)" }, 2162 { /* end of list */ } 2163 } 2164 }, 2165 { /* end of list */ } 2166 } 2167 }, 2168 { 2169 .name = "Skylake-Client", 2170 .level = 0xd, 2171 .vendor = CPUID_VENDOR_INTEL, 2172 .family = 6, 2173 .model = 94, 2174 .stepping = 3, 2175 .features[FEAT_1_EDX] = 2176 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2180 CPUID_DE | CPUID_FP87, 2181 .features[FEAT_1_ECX] = 2182 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2183 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2184 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2185 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2186 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2187 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2188 .features[FEAT_8000_0001_EDX] = 2189 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2190 CPUID_EXT2_SYSCALL, 2191 .features[FEAT_8000_0001_ECX] = 2192 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2193 .features[FEAT_7_0_EBX] = 2194 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2195 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2196 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2197 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2198 CPUID_7_0_EBX_SMAP, 2199 /* Missing: XSAVES (not supported by some Linux versions, 2200 * including v4.1 to v4.12). 2201 * KVM doesn't yet expose any XSAVES state save component, 2202 * and the only one defined in Skylake (processor tracing) 2203 * probably will block migration anyway. 2204 */ 2205 .features[FEAT_XSAVE] = 2206 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2207 CPUID_XSAVE_XGETBV1, 2208 .features[FEAT_6_EAX] = 2209 CPUID_6_EAX_ARAT, 2210 .xlevel = 0x80000008, 2211 .model_id = "Intel Core Processor (Skylake)", 2212 .versions = (X86CPUVersionDefinition[]) { 2213 { .version = 1 }, 2214 { 2215 .version = 2, 2216 .alias = "Skylake-Client-IBRS", 2217 .props = (PropValue[]) { 2218 { "spec-ctrl", "on" }, 2219 { "model-id", 2220 "Intel Core Processor (Skylake, IBRS)" }, 2221 { /* end of list */ } 2222 } 2223 }, 2224 { /* end of list */ } 2225 } 2226 }, 2227 { 2228 .name = "Skylake-Server", 2229 .level = 0xd, 2230 .vendor = CPUID_VENDOR_INTEL, 2231 .family = 6, 2232 .model = 85, 2233 .stepping = 4, 2234 .features[FEAT_1_EDX] = 2235 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2236 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2237 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2238 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2239 CPUID_DE | CPUID_FP87, 2240 .features[FEAT_1_ECX] = 2241 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2242 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2243 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2244 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2245 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2246 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2247 .features[FEAT_8000_0001_EDX] = 2248 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2249 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2250 .features[FEAT_8000_0001_ECX] = 2251 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2252 .features[FEAT_7_0_EBX] = 2253 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2254 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2255 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2256 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2257 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2258 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2259 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2260 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2261 .features[FEAT_7_0_ECX] = 2262 CPUID_7_0_ECX_PKU, 2263 /* Missing: XSAVES (not supported by some Linux versions, 2264 * including v4.1 to v4.12). 2265 * KVM doesn't yet expose any XSAVES state save component, 2266 * and the only one defined in Skylake (processor tracing) 2267 * probably will block migration anyway. 2268 */ 2269 .features[FEAT_XSAVE] = 2270 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2271 CPUID_XSAVE_XGETBV1, 2272 .features[FEAT_6_EAX] = 2273 CPUID_6_EAX_ARAT, 2274 .xlevel = 0x80000008, 2275 .model_id = "Intel Xeon Processor (Skylake)", 2276 .versions = (X86CPUVersionDefinition[]) { 2277 { .version = 1 }, 2278 { 2279 .version = 2, 2280 .alias = "Skylake-Server-IBRS", 2281 .props = (PropValue[]) { 2282 /* clflushopt was not added to Skylake-Server-IBRS */ 2283 /* TODO: add -v3 including clflushopt */ 2284 { "clflushopt", "off" }, 2285 { "spec-ctrl", "on" }, 2286 { "model-id", 2287 "Intel Xeon Processor (Skylake, IBRS)" }, 2288 { /* end of list */ } 2289 } 2290 }, 2291 { /* end of list */ } 2292 } 2293 }, 2294 { 2295 .name = "Cascadelake-Server", 2296 .level = 0xd, 2297 .vendor = CPUID_VENDOR_INTEL, 2298 .family = 6, 2299 .model = 85, 2300 .stepping = 6, 2301 .features[FEAT_1_EDX] = 2302 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2303 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2304 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2305 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2306 CPUID_DE | CPUID_FP87, 2307 .features[FEAT_1_ECX] = 2308 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2309 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2310 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2311 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2312 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2313 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2314 .features[FEAT_8000_0001_EDX] = 2315 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2316 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2317 .features[FEAT_8000_0001_ECX] = 2318 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2319 .features[FEAT_7_0_EBX] = 2320 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2321 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2322 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2323 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2324 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2325 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2326 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2327 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2328 .features[FEAT_7_0_ECX] = 2329 CPUID_7_0_ECX_PKU | 2330 CPUID_7_0_ECX_AVX512VNNI, 2331 .features[FEAT_7_0_EDX] = 2332 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2333 /* Missing: XSAVES (not supported by some Linux versions, 2334 * including v4.1 to v4.12). 2335 * KVM doesn't yet expose any XSAVES state save component, 2336 * and the only one defined in Skylake (processor tracing) 2337 * probably will block migration anyway. 2338 */ 2339 .features[FEAT_XSAVE] = 2340 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2341 CPUID_XSAVE_XGETBV1, 2342 .features[FEAT_6_EAX] = 2343 CPUID_6_EAX_ARAT, 2344 .xlevel = 0x80000008, 2345 .model_id = "Intel Xeon Processor (Cascadelake)", 2346 .versions = (X86CPUVersionDefinition[]) { 2347 { .version = 1 }, 2348 { .version = 2, 2349 .props = (PropValue[]) { 2350 { "arch-capabilities", "on" }, 2351 { "rdctl-no", "on" }, 2352 { "ibrs-all", "on" }, 2353 { "skip-l1dfl-vmentry", "on" }, 2354 { "mds-no", "on" }, 2355 { /* end of list */ } 2356 }, 2357 }, 2358 { /* end of list */ } 2359 } 2360 }, 2361 { 2362 .name = "Icelake-Client", 2363 .level = 0xd, 2364 .vendor = CPUID_VENDOR_INTEL, 2365 .family = 6, 2366 .model = 126, 2367 .stepping = 0, 2368 .features[FEAT_1_EDX] = 2369 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2370 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2371 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2372 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2373 CPUID_DE | CPUID_FP87, 2374 .features[FEAT_1_ECX] = 2375 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2376 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2377 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2378 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2379 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2380 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2381 .features[FEAT_8000_0001_EDX] = 2382 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2383 CPUID_EXT2_SYSCALL, 2384 .features[FEAT_8000_0001_ECX] = 2385 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2386 .features[FEAT_8000_0008_EBX] = 2387 CPUID_8000_0008_EBX_WBNOINVD, 2388 .features[FEAT_7_0_EBX] = 2389 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2390 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2391 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2392 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2393 CPUID_7_0_EBX_SMAP, 2394 .features[FEAT_7_0_ECX] = 2395 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 2396 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI | 2397 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 2398 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 2399 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 2400 .features[FEAT_7_0_EDX] = 2401 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2402 /* Missing: XSAVES (not supported by some Linux versions, 2403 * including v4.1 to v4.12). 2404 * KVM doesn't yet expose any XSAVES state save component, 2405 * and the only one defined in Skylake (processor tracing) 2406 * probably will block migration anyway. 2407 */ 2408 .features[FEAT_XSAVE] = 2409 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2410 CPUID_XSAVE_XGETBV1, 2411 .features[FEAT_6_EAX] = 2412 CPUID_6_EAX_ARAT, 2413 .xlevel = 0x80000008, 2414 .model_id = "Intel Core Processor (Icelake)", 2415 }, 2416 { 2417 .name = "Icelake-Server", 2418 .level = 0xd, 2419 .vendor = CPUID_VENDOR_INTEL, 2420 .family = 6, 2421 .model = 134, 2422 .stepping = 0, 2423 .features[FEAT_1_EDX] = 2424 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2425 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2426 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2427 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2428 CPUID_DE | CPUID_FP87, 2429 .features[FEAT_1_ECX] = 2430 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2431 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2432 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2433 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2434 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2435 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2436 .features[FEAT_8000_0001_EDX] = 2437 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2438 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2439 .features[FEAT_8000_0001_ECX] = 2440 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2441 .features[FEAT_8000_0008_EBX] = 2442 CPUID_8000_0008_EBX_WBNOINVD, 2443 .features[FEAT_7_0_EBX] = 2444 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2445 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2446 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2447 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2448 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2449 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2450 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2451 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2452 .features[FEAT_7_0_ECX] = 2453 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 2454 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI | 2455 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 2456 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 2457 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 2458 .features[FEAT_7_0_EDX] = 2459 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2460 /* Missing: XSAVES (not supported by some Linux versions, 2461 * including v4.1 to v4.12). 2462 * KVM doesn't yet expose any XSAVES state save component, 2463 * and the only one defined in Skylake (processor tracing) 2464 * probably will block migration anyway. 2465 */ 2466 .features[FEAT_XSAVE] = 2467 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2468 CPUID_XSAVE_XGETBV1, 2469 .features[FEAT_6_EAX] = 2470 CPUID_6_EAX_ARAT, 2471 .xlevel = 0x80000008, 2472 .model_id = "Intel Xeon Processor (Icelake)", 2473 }, 2474 { 2475 .name = "Snowridge", 2476 .level = 27, 2477 .vendor = CPUID_VENDOR_INTEL, 2478 .family = 6, 2479 .model = 134, 2480 .stepping = 1, 2481 .features[FEAT_1_EDX] = 2482 /* missing: CPUID_PN CPUID_IA64 */ 2483 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2484 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | 2485 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | 2486 CPUID_CX8 | CPUID_APIC | CPUID_SEP | 2487 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 2488 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | 2489 CPUID_MMX | 2490 CPUID_FXSR | CPUID_SSE | CPUID_SSE2, 2491 .features[FEAT_1_ECX] = 2492 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 2493 CPUID_EXT_SSSE3 | 2494 CPUID_EXT_CX16 | 2495 CPUID_EXT_SSE41 | 2496 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 2497 CPUID_EXT_POPCNT | 2498 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | 2499 CPUID_EXT_RDRAND, 2500 .features[FEAT_8000_0001_EDX] = 2501 CPUID_EXT2_SYSCALL | 2502 CPUID_EXT2_NX | 2503 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2504 CPUID_EXT2_LM, 2505 .features[FEAT_8000_0001_ECX] = 2506 CPUID_EXT3_LAHF_LM | 2507 CPUID_EXT3_3DNOWPREFETCH, 2508 .features[FEAT_7_0_EBX] = 2509 CPUID_7_0_EBX_FSGSBASE | 2510 CPUID_7_0_EBX_SMEP | 2511 CPUID_7_0_EBX_ERMS | 2512 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ 2513 CPUID_7_0_EBX_RDSEED | 2514 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2515 CPUID_7_0_EBX_CLWB | 2516 CPUID_7_0_EBX_SHA_NI, 2517 .features[FEAT_7_0_ECX] = 2518 CPUID_7_0_ECX_UMIP | 2519 /* missing bit 5 */ 2520 CPUID_7_0_ECX_GFNI | 2521 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | 2522 CPUID_7_0_ECX_MOVDIR64B, 2523 .features[FEAT_7_0_EDX] = 2524 CPUID_7_0_EDX_SPEC_CTRL | 2525 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | 2526 CPUID_7_0_EDX_CORE_CAPABILITY, 2527 .features[FEAT_CORE_CAPABILITY] = 2528 MSR_CORE_CAP_SPLIT_LOCK_DETECT, 2529 /* 2530 * Missing: XSAVES (not supported by some Linux versions, 2531 * including v4.1 to v4.12). 2532 * KVM doesn't yet expose any XSAVES state save component, 2533 * and the only one defined in Skylake (processor tracing) 2534 * probably will block migration anyway. 2535 */ 2536 .features[FEAT_XSAVE] = 2537 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2538 CPUID_XSAVE_XGETBV1, 2539 .features[FEAT_6_EAX] = 2540 CPUID_6_EAX_ARAT, 2541 .xlevel = 0x80000008, 2542 .model_id = "Intel Atom Processor (SnowRidge)", 2543 }, 2544 { 2545 .name = "KnightsMill", 2546 .level = 0xd, 2547 .vendor = CPUID_VENDOR_INTEL, 2548 .family = 6, 2549 .model = 133, 2550 .stepping = 0, 2551 .features[FEAT_1_EDX] = 2552 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 2553 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 2554 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 2555 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 2556 CPUID_PSE | CPUID_DE | CPUID_FP87, 2557 .features[FEAT_1_ECX] = 2558 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2559 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2560 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2561 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2562 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2563 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2564 .features[FEAT_8000_0001_EDX] = 2565 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2566 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2567 .features[FEAT_8000_0001_ECX] = 2568 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2569 .features[FEAT_7_0_EBX] = 2570 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2571 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 2572 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 2573 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 2574 CPUID_7_0_EBX_AVX512ER, 2575 .features[FEAT_7_0_ECX] = 2576 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 2577 .features[FEAT_7_0_EDX] = 2578 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 2579 .features[FEAT_XSAVE] = 2580 CPUID_XSAVE_XSAVEOPT, 2581 .features[FEAT_6_EAX] = 2582 CPUID_6_EAX_ARAT, 2583 .xlevel = 0x80000008, 2584 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 2585 }, 2586 { 2587 .name = "Opteron_G1", 2588 .level = 5, 2589 .vendor = CPUID_VENDOR_AMD, 2590 .family = 15, 2591 .model = 6, 2592 .stepping = 1, 2593 .features[FEAT_1_EDX] = 2594 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2595 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2596 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2597 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2598 CPUID_DE | CPUID_FP87, 2599 .features[FEAT_1_ECX] = 2600 CPUID_EXT_SSE3, 2601 .features[FEAT_8000_0001_EDX] = 2602 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2603 .xlevel = 0x80000008, 2604 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 2605 }, 2606 { 2607 .name = "Opteron_G2", 2608 .level = 5, 2609 .vendor = CPUID_VENDOR_AMD, 2610 .family = 15, 2611 .model = 6, 2612 .stepping = 1, 2613 .features[FEAT_1_EDX] = 2614 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2615 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2616 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2617 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2618 CPUID_DE | CPUID_FP87, 2619 .features[FEAT_1_ECX] = 2620 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 2621 .features[FEAT_8000_0001_EDX] = 2622 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2623 .features[FEAT_8000_0001_ECX] = 2624 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2625 .xlevel = 0x80000008, 2626 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 2627 }, 2628 { 2629 .name = "Opteron_G3", 2630 .level = 5, 2631 .vendor = CPUID_VENDOR_AMD, 2632 .family = 16, 2633 .model = 2, 2634 .stepping = 3, 2635 .features[FEAT_1_EDX] = 2636 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2637 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2638 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2639 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2640 CPUID_DE | CPUID_FP87, 2641 .features[FEAT_1_ECX] = 2642 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 2643 CPUID_EXT_SSE3, 2644 .features[FEAT_8000_0001_EDX] = 2645 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 2646 CPUID_EXT2_RDTSCP, 2647 .features[FEAT_8000_0001_ECX] = 2648 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 2649 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2650 .xlevel = 0x80000008, 2651 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 2652 }, 2653 { 2654 .name = "Opteron_G4", 2655 .level = 0xd, 2656 .vendor = CPUID_VENDOR_AMD, 2657 .family = 21, 2658 .model = 1, 2659 .stepping = 2, 2660 .features[FEAT_1_EDX] = 2661 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2662 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2663 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2664 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2665 CPUID_DE | CPUID_FP87, 2666 .features[FEAT_1_ECX] = 2667 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2668 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2669 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2670 CPUID_EXT_SSE3, 2671 .features[FEAT_8000_0001_EDX] = 2672 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2673 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 2674 .features[FEAT_8000_0001_ECX] = 2675 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2676 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2677 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2678 CPUID_EXT3_LAHF_LM, 2679 .features[FEAT_SVM] = 2680 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2681 /* no xsaveopt! */ 2682 .xlevel = 0x8000001A, 2683 .model_id = "AMD Opteron 62xx class CPU", 2684 }, 2685 { 2686 .name = "Opteron_G5", 2687 .level = 0xd, 2688 .vendor = CPUID_VENDOR_AMD, 2689 .family = 21, 2690 .model = 2, 2691 .stepping = 0, 2692 .features[FEAT_1_EDX] = 2693 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2694 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2695 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2696 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2697 CPUID_DE | CPUID_FP87, 2698 .features[FEAT_1_ECX] = 2699 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 2700 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2701 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 2702 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2703 .features[FEAT_8000_0001_EDX] = 2704 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2705 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 2706 .features[FEAT_8000_0001_ECX] = 2707 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2708 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2709 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2710 CPUID_EXT3_LAHF_LM, 2711 .features[FEAT_SVM] = 2712 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2713 /* no xsaveopt! */ 2714 .xlevel = 0x8000001A, 2715 .model_id = "AMD Opteron 63xx class CPU", 2716 }, 2717 { 2718 .name = "EPYC", 2719 .level = 0xd, 2720 .vendor = CPUID_VENDOR_AMD, 2721 .family = 23, 2722 .model = 1, 2723 .stepping = 2, 2724 .features[FEAT_1_EDX] = 2725 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2726 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2727 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2728 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2729 CPUID_VME | CPUID_FP87, 2730 .features[FEAT_1_ECX] = 2731 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2732 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 2733 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2734 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2735 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2736 .features[FEAT_8000_0001_EDX] = 2737 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2738 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2739 CPUID_EXT2_SYSCALL, 2740 .features[FEAT_8000_0001_ECX] = 2741 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2742 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2743 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2744 CPUID_EXT3_TOPOEXT, 2745 .features[FEAT_7_0_EBX] = 2746 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2747 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2748 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2749 CPUID_7_0_EBX_SHA_NI, 2750 /* Missing: XSAVES (not supported by some Linux versions, 2751 * including v4.1 to v4.12). 2752 * KVM doesn't yet expose any XSAVES state save component. 2753 */ 2754 .features[FEAT_XSAVE] = 2755 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2756 CPUID_XSAVE_XGETBV1, 2757 .features[FEAT_6_EAX] = 2758 CPUID_6_EAX_ARAT, 2759 .features[FEAT_SVM] = 2760 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2761 .xlevel = 0x8000001E, 2762 .model_id = "AMD EPYC Processor", 2763 .cache_info = &epyc_cache_info, 2764 .versions = (X86CPUVersionDefinition[]) { 2765 { .version = 1 }, 2766 { 2767 .version = 2, 2768 .alias = "EPYC-IBPB", 2769 .props = (PropValue[]) { 2770 { "ibpb", "on" }, 2771 { "model-id", 2772 "AMD EPYC Processor (with IBPB)" }, 2773 { /* end of list */ } 2774 } 2775 }, 2776 { /* end of list */ } 2777 } 2778 }, 2779 { 2780 .name = "Dhyana", 2781 .level = 0xd, 2782 .vendor = CPUID_VENDOR_HYGON, 2783 .family = 24, 2784 .model = 0, 2785 .stepping = 1, 2786 .features[FEAT_1_EDX] = 2787 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2788 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2789 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2790 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2791 CPUID_VME | CPUID_FP87, 2792 .features[FEAT_1_ECX] = 2793 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2794 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 2795 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2796 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2797 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 2798 .features[FEAT_8000_0001_EDX] = 2799 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2800 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2801 CPUID_EXT2_SYSCALL, 2802 .features[FEAT_8000_0001_ECX] = 2803 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2804 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2805 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2806 CPUID_EXT3_TOPOEXT, 2807 .features[FEAT_8000_0008_EBX] = 2808 CPUID_8000_0008_EBX_IBPB, 2809 .features[FEAT_7_0_EBX] = 2810 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2811 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2812 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 2813 /* 2814 * Missing: XSAVES (not supported by some Linux versions, 2815 * including v4.1 to v4.12). 2816 * KVM doesn't yet expose any XSAVES state save component. 2817 */ 2818 .features[FEAT_XSAVE] = 2819 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2820 CPUID_XSAVE_XGETBV1, 2821 .features[FEAT_6_EAX] = 2822 CPUID_6_EAX_ARAT, 2823 .features[FEAT_SVM] = 2824 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2825 .xlevel = 0x8000001E, 2826 .model_id = "Hygon Dhyana Processor", 2827 .cache_info = &epyc_cache_info, 2828 }, 2829 }; 2830 2831 /* KVM-specific features that are automatically added/removed 2832 * from all CPU models when KVM is enabled. 2833 */ 2834 static PropValue kvm_default_props[] = { 2835 { "kvmclock", "on" }, 2836 { "kvm-nopiodelay", "on" }, 2837 { "kvm-asyncpf", "on" }, 2838 { "kvm-steal-time", "on" }, 2839 { "kvm-pv-eoi", "on" }, 2840 { "kvmclock-stable-bit", "on" }, 2841 { "x2apic", "on" }, 2842 { "acpi", "off" }, 2843 { "monitor", "off" }, 2844 { "svm", "off" }, 2845 { NULL, NULL }, 2846 }; 2847 2848 /* TCG-specific defaults that override all CPU models when using TCG 2849 */ 2850 static PropValue tcg_default_props[] = { 2851 { "vme", "off" }, 2852 { NULL, NULL }, 2853 }; 2854 2855 2856 X86CPUVersion default_cpu_version = CPU_VERSION_LATEST; 2857 2858 void x86_cpu_set_default_version(X86CPUVersion version) 2859 { 2860 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ 2861 assert(version != CPU_VERSION_AUTO); 2862 default_cpu_version = version; 2863 } 2864 2865 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) 2866 { 2867 int v = 0; 2868 const X86CPUVersionDefinition *vdef = 2869 x86_cpu_def_get_versions(model->cpudef); 2870 while (vdef->version) { 2871 v = vdef->version; 2872 vdef++; 2873 } 2874 return v; 2875 } 2876 2877 /* Return the actual version being used for a specific CPU model */ 2878 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) 2879 { 2880 X86CPUVersion v = model->version; 2881 if (v == CPU_VERSION_AUTO) { 2882 v = default_cpu_version; 2883 } 2884 if (v == CPU_VERSION_LATEST) { 2885 return x86_cpu_model_last_version(model); 2886 } 2887 return v; 2888 } 2889 2890 void x86_cpu_change_kvm_default(const char *prop, const char *value) 2891 { 2892 PropValue *pv; 2893 for (pv = kvm_default_props; pv->prop; pv++) { 2894 if (!strcmp(pv->prop, prop)) { 2895 pv->value = value; 2896 break; 2897 } 2898 } 2899 2900 /* It is valid to call this function only for properties that 2901 * are already present in the kvm_default_props table. 2902 */ 2903 assert(pv->prop); 2904 } 2905 2906 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 2907 bool migratable_only); 2908 2909 static bool lmce_supported(void) 2910 { 2911 uint64_t mce_cap = 0; 2912 2913 #ifdef CONFIG_KVM 2914 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 2915 return false; 2916 } 2917 #endif 2918 2919 return !!(mce_cap & MCG_LMCE_P); 2920 } 2921 2922 #define CPUID_MODEL_ID_SZ 48 2923 2924 /** 2925 * cpu_x86_fill_model_id: 2926 * Get CPUID model ID string from host CPU. 2927 * 2928 * @str should have at least CPUID_MODEL_ID_SZ bytes 2929 * 2930 * The function does NOT add a null terminator to the string 2931 * automatically. 2932 */ 2933 static int cpu_x86_fill_model_id(char *str) 2934 { 2935 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 2936 int i; 2937 2938 for (i = 0; i < 3; i++) { 2939 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 2940 memcpy(str + i * 16 + 0, &eax, 4); 2941 memcpy(str + i * 16 + 4, &ebx, 4); 2942 memcpy(str + i * 16 + 8, &ecx, 4); 2943 memcpy(str + i * 16 + 12, &edx, 4); 2944 } 2945 return 0; 2946 } 2947 2948 static Property max_x86_cpu_properties[] = { 2949 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 2950 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 2951 DEFINE_PROP_END_OF_LIST() 2952 }; 2953 2954 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 2955 { 2956 DeviceClass *dc = DEVICE_CLASS(oc); 2957 X86CPUClass *xcc = X86_CPU_CLASS(oc); 2958 2959 xcc->ordering = 9; 2960 2961 xcc->model_description = 2962 "Enables all features supported by the accelerator in the current host"; 2963 2964 dc->props = max_x86_cpu_properties; 2965 } 2966 2967 static void max_x86_cpu_initfn(Object *obj) 2968 { 2969 X86CPU *cpu = X86_CPU(obj); 2970 CPUX86State *env = &cpu->env; 2971 KVMState *s = kvm_state; 2972 2973 /* We can't fill the features array here because we don't know yet if 2974 * "migratable" is true or false. 2975 */ 2976 cpu->max_features = true; 2977 2978 if (accel_uses_host_cpuid()) { 2979 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 2980 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 2981 int family, model, stepping; 2982 2983 host_vendor_fms(vendor, &family, &model, &stepping); 2984 cpu_x86_fill_model_id(model_id); 2985 2986 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort); 2987 object_property_set_int(OBJECT(cpu), family, "family", &error_abort); 2988 object_property_set_int(OBJECT(cpu), model, "model", &error_abort); 2989 object_property_set_int(OBJECT(cpu), stepping, "stepping", 2990 &error_abort); 2991 object_property_set_str(OBJECT(cpu), model_id, "model-id", 2992 &error_abort); 2993 2994 if (kvm_enabled()) { 2995 env->cpuid_min_level = 2996 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 2997 env->cpuid_min_xlevel = 2998 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 2999 env->cpuid_min_xlevel2 = 3000 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 3001 } else { 3002 env->cpuid_min_level = 3003 hvf_get_supported_cpuid(0x0, 0, R_EAX); 3004 env->cpuid_min_xlevel = 3005 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 3006 env->cpuid_min_xlevel2 = 3007 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 3008 } 3009 3010 if (lmce_supported()) { 3011 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 3012 } 3013 } else { 3014 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 3015 "vendor", &error_abort); 3016 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 3017 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 3018 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 3019 object_property_set_str(OBJECT(cpu), 3020 "QEMU TCG CPU version " QEMU_HW_VERSION, 3021 "model-id", &error_abort); 3022 } 3023 3024 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 3025 } 3026 3027 static const TypeInfo max_x86_cpu_type_info = { 3028 .name = X86_CPU_TYPE_NAME("max"), 3029 .parent = TYPE_X86_CPU, 3030 .instance_init = max_x86_cpu_initfn, 3031 .class_init = max_x86_cpu_class_init, 3032 }; 3033 3034 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 3035 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 3036 { 3037 X86CPUClass *xcc = X86_CPU_CLASS(oc); 3038 3039 xcc->host_cpuid_required = true; 3040 xcc->ordering = 8; 3041 3042 #if defined(CONFIG_KVM) 3043 xcc->model_description = 3044 "KVM processor with all supported host features "; 3045 #elif defined(CONFIG_HVF) 3046 xcc->model_description = 3047 "HVF processor with all supported host features "; 3048 #endif 3049 } 3050 3051 static const TypeInfo host_x86_cpu_type_info = { 3052 .name = X86_CPU_TYPE_NAME("host"), 3053 .parent = X86_CPU_TYPE_NAME("max"), 3054 .class_init = host_x86_cpu_class_init, 3055 }; 3056 3057 #endif 3058 3059 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 3060 { 3061 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 3062 3063 switch (f->type) { 3064 case CPUID_FEATURE_WORD: 3065 { 3066 const char *reg = get_register_name_32(f->cpuid.reg); 3067 assert(reg); 3068 return g_strdup_printf("CPUID.%02XH:%s", 3069 f->cpuid.eax, reg); 3070 } 3071 case MSR_FEATURE_WORD: 3072 return g_strdup_printf("MSR(%02XH)", 3073 f->msr.index); 3074 } 3075 3076 return NULL; 3077 } 3078 3079 static void report_unavailable_features(FeatureWord w, uint32_t mask) 3080 { 3081 FeatureWordInfo *f = &feature_word_info[w]; 3082 int i; 3083 char *feat_word_str; 3084 3085 for (i = 0; i < 32; ++i) { 3086 if ((1UL << i) & mask) { 3087 feat_word_str = feature_word_description(f, i); 3088 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]", 3089 accel_uses_host_cpuid() ? "host" : "TCG", 3090 feat_word_str, 3091 f->feat_names[i] ? "." : "", 3092 f->feat_names[i] ? f->feat_names[i] : "", i); 3093 g_free(feat_word_str); 3094 } 3095 } 3096 } 3097 3098 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 3099 const char *name, void *opaque, 3100 Error **errp) 3101 { 3102 X86CPU *cpu = X86_CPU(obj); 3103 CPUX86State *env = &cpu->env; 3104 int64_t value; 3105 3106 value = (env->cpuid_version >> 8) & 0xf; 3107 if (value == 0xf) { 3108 value += (env->cpuid_version >> 20) & 0xff; 3109 } 3110 visit_type_int(v, name, &value, errp); 3111 } 3112 3113 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 3114 const char *name, void *opaque, 3115 Error **errp) 3116 { 3117 X86CPU *cpu = X86_CPU(obj); 3118 CPUX86State *env = &cpu->env; 3119 const int64_t min = 0; 3120 const int64_t max = 0xff + 0xf; 3121 Error *local_err = NULL; 3122 int64_t value; 3123 3124 visit_type_int(v, name, &value, &local_err); 3125 if (local_err) { 3126 error_propagate(errp, local_err); 3127 return; 3128 } 3129 if (value < min || value > max) { 3130 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3131 name ? name : "null", value, min, max); 3132 return; 3133 } 3134 3135 env->cpuid_version &= ~0xff00f00; 3136 if (value > 0x0f) { 3137 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 3138 } else { 3139 env->cpuid_version |= value << 8; 3140 } 3141 } 3142 3143 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 3144 const char *name, void *opaque, 3145 Error **errp) 3146 { 3147 X86CPU *cpu = X86_CPU(obj); 3148 CPUX86State *env = &cpu->env; 3149 int64_t value; 3150 3151 value = (env->cpuid_version >> 4) & 0xf; 3152 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 3153 visit_type_int(v, name, &value, errp); 3154 } 3155 3156 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 3157 const char *name, void *opaque, 3158 Error **errp) 3159 { 3160 X86CPU *cpu = X86_CPU(obj); 3161 CPUX86State *env = &cpu->env; 3162 const int64_t min = 0; 3163 const int64_t max = 0xff; 3164 Error *local_err = NULL; 3165 int64_t value; 3166 3167 visit_type_int(v, name, &value, &local_err); 3168 if (local_err) { 3169 error_propagate(errp, local_err); 3170 return; 3171 } 3172 if (value < min || value > max) { 3173 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3174 name ? name : "null", value, min, max); 3175 return; 3176 } 3177 3178 env->cpuid_version &= ~0xf00f0; 3179 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 3180 } 3181 3182 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 3183 const char *name, void *opaque, 3184 Error **errp) 3185 { 3186 X86CPU *cpu = X86_CPU(obj); 3187 CPUX86State *env = &cpu->env; 3188 int64_t value; 3189 3190 value = env->cpuid_version & 0xf; 3191 visit_type_int(v, name, &value, errp); 3192 } 3193 3194 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 3195 const char *name, void *opaque, 3196 Error **errp) 3197 { 3198 X86CPU *cpu = X86_CPU(obj); 3199 CPUX86State *env = &cpu->env; 3200 const int64_t min = 0; 3201 const int64_t max = 0xf; 3202 Error *local_err = NULL; 3203 int64_t value; 3204 3205 visit_type_int(v, name, &value, &local_err); 3206 if (local_err) { 3207 error_propagate(errp, local_err); 3208 return; 3209 } 3210 if (value < min || value > max) { 3211 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3212 name ? name : "null", value, min, max); 3213 return; 3214 } 3215 3216 env->cpuid_version &= ~0xf; 3217 env->cpuid_version |= value & 0xf; 3218 } 3219 3220 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 3221 { 3222 X86CPU *cpu = X86_CPU(obj); 3223 CPUX86State *env = &cpu->env; 3224 char *value; 3225 3226 value = g_malloc(CPUID_VENDOR_SZ + 1); 3227 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 3228 env->cpuid_vendor3); 3229 return value; 3230 } 3231 3232 static void x86_cpuid_set_vendor(Object *obj, const char *value, 3233 Error **errp) 3234 { 3235 X86CPU *cpu = X86_CPU(obj); 3236 CPUX86State *env = &cpu->env; 3237 int i; 3238 3239 if (strlen(value) != CPUID_VENDOR_SZ) { 3240 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 3241 return; 3242 } 3243 3244 env->cpuid_vendor1 = 0; 3245 env->cpuid_vendor2 = 0; 3246 env->cpuid_vendor3 = 0; 3247 for (i = 0; i < 4; i++) { 3248 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 3249 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 3250 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 3251 } 3252 } 3253 3254 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 3255 { 3256 X86CPU *cpu = X86_CPU(obj); 3257 CPUX86State *env = &cpu->env; 3258 char *value; 3259 int i; 3260 3261 value = g_malloc(48 + 1); 3262 for (i = 0; i < 48; i++) { 3263 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 3264 } 3265 value[48] = '\0'; 3266 return value; 3267 } 3268 3269 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 3270 Error **errp) 3271 { 3272 X86CPU *cpu = X86_CPU(obj); 3273 CPUX86State *env = &cpu->env; 3274 int c, len, i; 3275 3276 if (model_id == NULL) { 3277 model_id = ""; 3278 } 3279 len = strlen(model_id); 3280 memset(env->cpuid_model, 0, 48); 3281 for (i = 0; i < 48; i++) { 3282 if (i >= len) { 3283 c = '\0'; 3284 } else { 3285 c = (uint8_t)model_id[i]; 3286 } 3287 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 3288 } 3289 } 3290 3291 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 3292 void *opaque, Error **errp) 3293 { 3294 X86CPU *cpu = X86_CPU(obj); 3295 int64_t value; 3296 3297 value = cpu->env.tsc_khz * 1000; 3298 visit_type_int(v, name, &value, errp); 3299 } 3300 3301 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 3302 void *opaque, Error **errp) 3303 { 3304 X86CPU *cpu = X86_CPU(obj); 3305 const int64_t min = 0; 3306 const int64_t max = INT64_MAX; 3307 Error *local_err = NULL; 3308 int64_t value; 3309 3310 visit_type_int(v, name, &value, &local_err); 3311 if (local_err) { 3312 error_propagate(errp, local_err); 3313 return; 3314 } 3315 if (value < min || value > max) { 3316 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3317 name ? name : "null", value, min, max); 3318 return; 3319 } 3320 3321 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 3322 } 3323 3324 /* Generic getter for "feature-words" and "filtered-features" properties */ 3325 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 3326 const char *name, void *opaque, 3327 Error **errp) 3328 { 3329 uint32_t *array = (uint32_t *)opaque; 3330 FeatureWord w; 3331 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 3332 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 3333 X86CPUFeatureWordInfoList *list = NULL; 3334 3335 for (w = 0; w < FEATURE_WORDS; w++) { 3336 FeatureWordInfo *wi = &feature_word_info[w]; 3337 /* 3338 * We didn't have MSR features when "feature-words" was 3339 * introduced. Therefore skipped other type entries. 3340 */ 3341 if (wi->type != CPUID_FEATURE_WORD) { 3342 continue; 3343 } 3344 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 3345 qwi->cpuid_input_eax = wi->cpuid.eax; 3346 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 3347 qwi->cpuid_input_ecx = wi->cpuid.ecx; 3348 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 3349 qwi->features = array[w]; 3350 3351 /* List will be in reverse order, but order shouldn't matter */ 3352 list_entries[w].next = list; 3353 list_entries[w].value = &word_infos[w]; 3354 list = &list_entries[w]; 3355 } 3356 3357 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 3358 } 3359 3360 /* Convert all '_' in a feature string option name to '-', to make feature 3361 * name conform to QOM property naming rule, which uses '-' instead of '_'. 3362 */ 3363 static inline void feat2prop(char *s) 3364 { 3365 while ((s = strchr(s, '_'))) { 3366 *s = '-'; 3367 } 3368 } 3369 3370 /* Return the feature property name for a feature flag bit */ 3371 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 3372 { 3373 /* XSAVE components are automatically enabled by other features, 3374 * so return the original feature name instead 3375 */ 3376 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 3377 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 3378 3379 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 3380 x86_ext_save_areas[comp].bits) { 3381 w = x86_ext_save_areas[comp].feature; 3382 bitnr = ctz32(x86_ext_save_areas[comp].bits); 3383 } 3384 } 3385 3386 assert(bitnr < 32); 3387 assert(w < FEATURE_WORDS); 3388 return feature_word_info[w].feat_names[bitnr]; 3389 } 3390 3391 /* Compatibily hack to maintain legacy +-feat semantic, 3392 * where +-feat overwrites any feature set by 3393 * feat=on|feat even if the later is parsed after +-feat 3394 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 3395 */ 3396 static GList *plus_features, *minus_features; 3397 3398 static gint compare_string(gconstpointer a, gconstpointer b) 3399 { 3400 return g_strcmp0(a, b); 3401 } 3402 3403 /* Parse "+feature,-feature,feature=foo" CPU feature string 3404 */ 3405 static void x86_cpu_parse_featurestr(const char *typename, char *features, 3406 Error **errp) 3407 { 3408 char *featurestr; /* Single 'key=value" string being parsed */ 3409 static bool cpu_globals_initialized; 3410 bool ambiguous = false; 3411 3412 if (cpu_globals_initialized) { 3413 return; 3414 } 3415 cpu_globals_initialized = true; 3416 3417 if (!features) { 3418 return; 3419 } 3420 3421 for (featurestr = strtok(features, ","); 3422 featurestr; 3423 featurestr = strtok(NULL, ",")) { 3424 const char *name; 3425 const char *val = NULL; 3426 char *eq = NULL; 3427 char num[32]; 3428 GlobalProperty *prop; 3429 3430 /* Compatibility syntax: */ 3431 if (featurestr[0] == '+') { 3432 plus_features = g_list_append(plus_features, 3433 g_strdup(featurestr + 1)); 3434 continue; 3435 } else if (featurestr[0] == '-') { 3436 minus_features = g_list_append(minus_features, 3437 g_strdup(featurestr + 1)); 3438 continue; 3439 } 3440 3441 eq = strchr(featurestr, '='); 3442 if (eq) { 3443 *eq++ = 0; 3444 val = eq; 3445 } else { 3446 val = "on"; 3447 } 3448 3449 feat2prop(featurestr); 3450 name = featurestr; 3451 3452 if (g_list_find_custom(plus_features, name, compare_string)) { 3453 warn_report("Ambiguous CPU model string. " 3454 "Don't mix both \"+%s\" and \"%s=%s\"", 3455 name, name, val); 3456 ambiguous = true; 3457 } 3458 if (g_list_find_custom(minus_features, name, compare_string)) { 3459 warn_report("Ambiguous CPU model string. " 3460 "Don't mix both \"-%s\" and \"%s=%s\"", 3461 name, name, val); 3462 ambiguous = true; 3463 } 3464 3465 /* Special case: */ 3466 if (!strcmp(name, "tsc-freq")) { 3467 int ret; 3468 uint64_t tsc_freq; 3469 3470 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 3471 if (ret < 0 || tsc_freq > INT64_MAX) { 3472 error_setg(errp, "bad numerical value %s", val); 3473 return; 3474 } 3475 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 3476 val = num; 3477 name = "tsc-frequency"; 3478 } 3479 3480 prop = g_new0(typeof(*prop), 1); 3481 prop->driver = typename; 3482 prop->property = g_strdup(name); 3483 prop->value = g_strdup(val); 3484 qdev_prop_register_global(prop); 3485 } 3486 3487 if (ambiguous) { 3488 warn_report("Compatibility of ambiguous CPU model " 3489 "strings won't be kept on future QEMU versions"); 3490 } 3491 } 3492 3493 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 3494 static int x86_cpu_filter_features(X86CPU *cpu); 3495 3496 /* Build a list with the name of all features on a feature word array */ 3497 static void x86_cpu_list_feature_names(FeatureWordArray features, 3498 strList **feat_names) 3499 { 3500 FeatureWord w; 3501 strList **next = feat_names; 3502 3503 for (w = 0; w < FEATURE_WORDS; w++) { 3504 uint32_t filtered = features[w]; 3505 int i; 3506 for (i = 0; i < 32; i++) { 3507 if (filtered & (1UL << i)) { 3508 strList *new = g_new0(strList, 1); 3509 new->value = g_strdup(x86_cpu_feature_name(w, i)); 3510 *next = new; 3511 next = &new->next; 3512 } 3513 } 3514 } 3515 } 3516 3517 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 3518 const char *name, void *opaque, 3519 Error **errp) 3520 { 3521 X86CPU *xc = X86_CPU(obj); 3522 strList *result = NULL; 3523 3524 x86_cpu_list_feature_names(xc->filtered_features, &result); 3525 visit_type_strList(v, "unavailable-features", &result, errp); 3526 } 3527 3528 /* Check for missing features that may prevent the CPU class from 3529 * running using the current machine and accelerator. 3530 */ 3531 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 3532 strList **missing_feats) 3533 { 3534 X86CPU *xc; 3535 Error *err = NULL; 3536 strList **next = missing_feats; 3537 3538 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 3539 strList *new = g_new0(strList, 1); 3540 new->value = g_strdup("kvm"); 3541 *missing_feats = new; 3542 return; 3543 } 3544 3545 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 3546 3547 x86_cpu_expand_features(xc, &err); 3548 if (err) { 3549 /* Errors at x86_cpu_expand_features should never happen, 3550 * but in case it does, just report the model as not 3551 * runnable at all using the "type" property. 3552 */ 3553 strList *new = g_new0(strList, 1); 3554 new->value = g_strdup("type"); 3555 *next = new; 3556 next = &new->next; 3557 } 3558 3559 x86_cpu_filter_features(xc); 3560 3561 x86_cpu_list_feature_names(xc->filtered_features, next); 3562 3563 object_unref(OBJECT(xc)); 3564 } 3565 3566 /* Print all cpuid feature names in featureset 3567 */ 3568 static void listflags(GList *features) 3569 { 3570 size_t len = 0; 3571 GList *tmp; 3572 3573 for (tmp = features; tmp; tmp = tmp->next) { 3574 const char *name = tmp->data; 3575 if ((len + strlen(name) + 1) >= 75) { 3576 qemu_printf("\n"); 3577 len = 0; 3578 } 3579 qemu_printf("%s%s", len == 0 ? " " : " ", name); 3580 len += strlen(name) + 1; 3581 } 3582 qemu_printf("\n"); 3583 } 3584 3585 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 3586 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 3587 { 3588 ObjectClass *class_a = (ObjectClass *)a; 3589 ObjectClass *class_b = (ObjectClass *)b; 3590 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 3591 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 3592 char *name_a, *name_b; 3593 int ret; 3594 3595 if (cc_a->ordering != cc_b->ordering) { 3596 ret = cc_a->ordering - cc_b->ordering; 3597 } else { 3598 name_a = x86_cpu_class_get_model_name(cc_a); 3599 name_b = x86_cpu_class_get_model_name(cc_b); 3600 ret = strcmp(name_a, name_b); 3601 g_free(name_a); 3602 g_free(name_b); 3603 } 3604 return ret; 3605 } 3606 3607 static GSList *get_sorted_cpu_model_list(void) 3608 { 3609 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 3610 list = g_slist_sort(list, x86_cpu_list_compare); 3611 return list; 3612 } 3613 3614 static char *x86_cpu_class_get_model_id(X86CPUClass *xc) 3615 { 3616 Object *obj = object_new(object_class_get_name(OBJECT_CLASS(xc))); 3617 char *r = object_property_get_str(obj, "model-id", &error_abort); 3618 object_unref(obj); 3619 return r; 3620 } 3621 3622 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) 3623 { 3624 X86CPUVersion version; 3625 3626 if (!cc->model || !cc->model->is_alias) { 3627 return NULL; 3628 } 3629 version = x86_cpu_model_resolve_version(cc->model); 3630 if (version <= 0) { 3631 return NULL; 3632 } 3633 return x86_cpu_versioned_model_name(cc->model->cpudef, version); 3634 } 3635 3636 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 3637 { 3638 ObjectClass *oc = data; 3639 X86CPUClass *cc = X86_CPU_CLASS(oc); 3640 char *name = x86_cpu_class_get_model_name(cc); 3641 char *desc = g_strdup(cc->model_description); 3642 char *alias_of = x86_cpu_class_get_alias_of(cc); 3643 3644 if (!desc && alias_of) { 3645 if (cc->model && cc->model->version == CPU_VERSION_AUTO) { 3646 desc = g_strdup("(alias configured by machine type)"); 3647 } else { 3648 desc = g_strdup_printf("(alias of %s)", alias_of); 3649 } 3650 } 3651 if (!desc) { 3652 desc = x86_cpu_class_get_model_id(cc); 3653 } 3654 3655 qemu_printf("x86 %-20s %-48s\n", name, desc); 3656 g_free(name); 3657 g_free(desc); 3658 g_free(alias_of); 3659 } 3660 3661 /* list available CPU models and flags */ 3662 void x86_cpu_list(void) 3663 { 3664 int i, j; 3665 GSList *list; 3666 GList *names = NULL; 3667 3668 qemu_printf("Available CPUs:\n"); 3669 list = get_sorted_cpu_model_list(); 3670 g_slist_foreach(list, x86_cpu_list_entry, NULL); 3671 g_slist_free(list); 3672 3673 names = NULL; 3674 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 3675 FeatureWordInfo *fw = &feature_word_info[i]; 3676 for (j = 0; j < 32; j++) { 3677 if (fw->feat_names[j]) { 3678 names = g_list_append(names, (gpointer)fw->feat_names[j]); 3679 } 3680 } 3681 } 3682 3683 names = g_list_sort(names, (GCompareFunc)strcmp); 3684 3685 qemu_printf("\nRecognized CPUID flags:\n"); 3686 listflags(names); 3687 qemu_printf("\n"); 3688 g_list_free(names); 3689 } 3690 3691 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 3692 { 3693 ObjectClass *oc = data; 3694 X86CPUClass *cc = X86_CPU_CLASS(oc); 3695 CpuDefinitionInfoList **cpu_list = user_data; 3696 CpuDefinitionInfoList *entry; 3697 CpuDefinitionInfo *info; 3698 3699 info = g_malloc0(sizeof(*info)); 3700 info->name = x86_cpu_class_get_model_name(cc); 3701 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 3702 info->has_unavailable_features = true; 3703 info->q_typename = g_strdup(object_class_get_name(oc)); 3704 info->migration_safe = cc->migration_safe; 3705 info->has_migration_safe = true; 3706 info->q_static = cc->static_model; 3707 /* 3708 * Old machine types won't report aliases, so that alias translation 3709 * doesn't break compatibility with previous QEMU versions. 3710 */ 3711 if (default_cpu_version != CPU_VERSION_LEGACY) { 3712 info->alias_of = x86_cpu_class_get_alias_of(cc); 3713 info->has_alias_of = !!info->alias_of; 3714 } 3715 3716 entry = g_malloc0(sizeof(*entry)); 3717 entry->value = info; 3718 entry->next = *cpu_list; 3719 *cpu_list = entry; 3720 } 3721 3722 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 3723 { 3724 CpuDefinitionInfoList *cpu_list = NULL; 3725 GSList *list = get_sorted_cpu_model_list(); 3726 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 3727 g_slist_free(list); 3728 return cpu_list; 3729 } 3730 3731 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 3732 bool migratable_only) 3733 { 3734 FeatureWordInfo *wi = &feature_word_info[w]; 3735 uint32_t r = 0; 3736 3737 if (kvm_enabled()) { 3738 switch (wi->type) { 3739 case CPUID_FEATURE_WORD: 3740 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 3741 wi->cpuid.ecx, 3742 wi->cpuid.reg); 3743 break; 3744 case MSR_FEATURE_WORD: 3745 r = kvm_arch_get_supported_msr_feature(kvm_state, 3746 wi->msr.index); 3747 break; 3748 } 3749 } else if (hvf_enabled()) { 3750 if (wi->type != CPUID_FEATURE_WORD) { 3751 return 0; 3752 } 3753 r = hvf_get_supported_cpuid(wi->cpuid.eax, 3754 wi->cpuid.ecx, 3755 wi->cpuid.reg); 3756 } else if (tcg_enabled()) { 3757 r = wi->tcg_features; 3758 } else { 3759 return ~0; 3760 } 3761 if (migratable_only) { 3762 r &= x86_cpu_get_migratable_flags(w); 3763 } 3764 return r; 3765 } 3766 3767 static void x86_cpu_report_filtered_features(X86CPU *cpu) 3768 { 3769 FeatureWord w; 3770 3771 for (w = 0; w < FEATURE_WORDS; w++) { 3772 report_unavailable_features(w, cpu->filtered_features[w]); 3773 } 3774 } 3775 3776 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 3777 { 3778 PropValue *pv; 3779 for (pv = props; pv->prop; pv++) { 3780 if (!pv->value) { 3781 continue; 3782 } 3783 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 3784 &error_abort); 3785 } 3786 } 3787 3788 /* Apply properties for the CPU model version specified in model */ 3789 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) 3790 { 3791 const X86CPUVersionDefinition *vdef; 3792 X86CPUVersion version = x86_cpu_model_resolve_version(model); 3793 3794 if (version == CPU_VERSION_LEGACY) { 3795 return; 3796 } 3797 3798 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { 3799 PropValue *p; 3800 3801 for (p = vdef->props; p && p->prop; p++) { 3802 object_property_parse(OBJECT(cpu), p->value, p->prop, 3803 &error_abort); 3804 } 3805 3806 if (vdef->version == version) { 3807 break; 3808 } 3809 } 3810 3811 /* 3812 * If we reached the end of the list, version number was invalid 3813 */ 3814 assert(vdef->version == version); 3815 } 3816 3817 /* Load data from X86CPUDefinition into a X86CPU object 3818 */ 3819 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp) 3820 { 3821 X86CPUDefinition *def = model->cpudef; 3822 CPUX86State *env = &cpu->env; 3823 const char *vendor; 3824 char host_vendor[CPUID_VENDOR_SZ + 1]; 3825 FeatureWord w; 3826 3827 /*NOTE: any property set by this function should be returned by 3828 * x86_cpu_static_props(), so static expansion of 3829 * query-cpu-model-expansion is always complete. 3830 */ 3831 3832 /* CPU models only set _minimum_ values for level/xlevel: */ 3833 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp); 3834 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp); 3835 3836 object_property_set_int(OBJECT(cpu), def->family, "family", errp); 3837 object_property_set_int(OBJECT(cpu), def->model, "model", errp); 3838 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp); 3839 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); 3840 for (w = 0; w < FEATURE_WORDS; w++) { 3841 env->features[w] = def->features[w]; 3842 } 3843 3844 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 3845 cpu->legacy_cache = !def->cache_info; 3846 3847 /* Special cases not set in the X86CPUDefinition structs: */ 3848 /* TODO: in-kernel irqchip for hvf */ 3849 if (kvm_enabled()) { 3850 if (!kvm_irqchip_in_kernel()) { 3851 x86_cpu_change_kvm_default("x2apic", "off"); 3852 } 3853 3854 x86_cpu_apply_props(cpu, kvm_default_props); 3855 } else if (tcg_enabled()) { 3856 x86_cpu_apply_props(cpu, tcg_default_props); 3857 } 3858 3859 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 3860 3861 /* sysenter isn't supported in compatibility mode on AMD, 3862 * syscall isn't supported in compatibility mode on Intel. 3863 * Normally we advertise the actual CPU vendor, but you can 3864 * override this using the 'vendor' property if you want to use 3865 * KVM's sysenter/syscall emulation in compatibility mode and 3866 * when doing cross vendor migration 3867 */ 3868 vendor = def->vendor; 3869 if (accel_uses_host_cpuid()) { 3870 uint32_t ebx = 0, ecx = 0, edx = 0; 3871 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 3872 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 3873 vendor = host_vendor; 3874 } 3875 3876 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp); 3877 3878 x86_cpu_apply_version_props(cpu, model); 3879 } 3880 3881 #ifndef CONFIG_USER_ONLY 3882 /* Return a QDict containing keys for all properties that can be included 3883 * in static expansion of CPU models. All properties set by x86_cpu_load_model() 3884 * must be included in the dictionary. 3885 */ 3886 static QDict *x86_cpu_static_props(void) 3887 { 3888 FeatureWord w; 3889 int i; 3890 static const char *props[] = { 3891 "min-level", 3892 "min-xlevel", 3893 "family", 3894 "model", 3895 "stepping", 3896 "model-id", 3897 "vendor", 3898 "lmce", 3899 NULL, 3900 }; 3901 static QDict *d; 3902 3903 if (d) { 3904 return d; 3905 } 3906 3907 d = qdict_new(); 3908 for (i = 0; props[i]; i++) { 3909 qdict_put_null(d, props[i]); 3910 } 3911 3912 for (w = 0; w < FEATURE_WORDS; w++) { 3913 FeatureWordInfo *fi = &feature_word_info[w]; 3914 int bit; 3915 for (bit = 0; bit < 32; bit++) { 3916 if (!fi->feat_names[bit]) { 3917 continue; 3918 } 3919 qdict_put_null(d, fi->feat_names[bit]); 3920 } 3921 } 3922 3923 return d; 3924 } 3925 3926 /* Add an entry to @props dict, with the value for property. */ 3927 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 3928 { 3929 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 3930 &error_abort); 3931 3932 qdict_put_obj(props, prop, value); 3933 } 3934 3935 /* Convert CPU model data from X86CPU object to a property dictionary 3936 * that can recreate exactly the same CPU model. 3937 */ 3938 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 3939 { 3940 QDict *sprops = x86_cpu_static_props(); 3941 const QDictEntry *e; 3942 3943 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 3944 const char *prop = qdict_entry_key(e); 3945 x86_cpu_expand_prop(cpu, props, prop); 3946 } 3947 } 3948 3949 /* Convert CPU model data from X86CPU object to a property dictionary 3950 * that can recreate exactly the same CPU model, including every 3951 * writeable QOM property. 3952 */ 3953 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 3954 { 3955 ObjectPropertyIterator iter; 3956 ObjectProperty *prop; 3957 3958 object_property_iter_init(&iter, OBJECT(cpu)); 3959 while ((prop = object_property_iter_next(&iter))) { 3960 /* skip read-only or write-only properties */ 3961 if (!prop->get || !prop->set) { 3962 continue; 3963 } 3964 3965 /* "hotplugged" is the only property that is configurable 3966 * on the command-line but will be set differently on CPUs 3967 * created using "-cpu ... -smp ..." and by CPUs created 3968 * on the fly by x86_cpu_from_model() for querying. Skip it. 3969 */ 3970 if (!strcmp(prop->name, "hotplugged")) { 3971 continue; 3972 } 3973 x86_cpu_expand_prop(cpu, props, prop->name); 3974 } 3975 } 3976 3977 static void object_apply_props(Object *obj, QDict *props, Error **errp) 3978 { 3979 const QDictEntry *prop; 3980 Error *err = NULL; 3981 3982 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 3983 object_property_set_qobject(obj, qdict_entry_value(prop), 3984 qdict_entry_key(prop), &err); 3985 if (err) { 3986 break; 3987 } 3988 } 3989 3990 error_propagate(errp, err); 3991 } 3992 3993 /* Create X86CPU object according to model+props specification */ 3994 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 3995 { 3996 X86CPU *xc = NULL; 3997 X86CPUClass *xcc; 3998 Error *err = NULL; 3999 4000 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 4001 if (xcc == NULL) { 4002 error_setg(&err, "CPU model '%s' not found", model); 4003 goto out; 4004 } 4005 4006 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 4007 if (props) { 4008 object_apply_props(OBJECT(xc), props, &err); 4009 if (err) { 4010 goto out; 4011 } 4012 } 4013 4014 x86_cpu_expand_features(xc, &err); 4015 if (err) { 4016 goto out; 4017 } 4018 4019 out: 4020 if (err) { 4021 error_propagate(errp, err); 4022 object_unref(OBJECT(xc)); 4023 xc = NULL; 4024 } 4025 return xc; 4026 } 4027 4028 CpuModelExpansionInfo * 4029 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 4030 CpuModelInfo *model, 4031 Error **errp) 4032 { 4033 X86CPU *xc = NULL; 4034 Error *err = NULL; 4035 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 4036 QDict *props = NULL; 4037 const char *base_name; 4038 4039 xc = x86_cpu_from_model(model->name, 4040 model->has_props ? 4041 qobject_to(QDict, model->props) : 4042 NULL, &err); 4043 if (err) { 4044 goto out; 4045 } 4046 4047 props = qdict_new(); 4048 ret->model = g_new0(CpuModelInfo, 1); 4049 ret->model->props = QOBJECT(props); 4050 ret->model->has_props = true; 4051 4052 switch (type) { 4053 case CPU_MODEL_EXPANSION_TYPE_STATIC: 4054 /* Static expansion will be based on "base" only */ 4055 base_name = "base"; 4056 x86_cpu_to_dict(xc, props); 4057 break; 4058 case CPU_MODEL_EXPANSION_TYPE_FULL: 4059 /* As we don't return every single property, full expansion needs 4060 * to keep the original model name+props, and add extra 4061 * properties on top of that. 4062 */ 4063 base_name = model->name; 4064 x86_cpu_to_dict_full(xc, props); 4065 break; 4066 default: 4067 error_setg(&err, "Unsupported expansion type"); 4068 goto out; 4069 } 4070 4071 x86_cpu_to_dict(xc, props); 4072 4073 ret->model->name = g_strdup(base_name); 4074 4075 out: 4076 object_unref(OBJECT(xc)); 4077 if (err) { 4078 error_propagate(errp, err); 4079 qapi_free_CpuModelExpansionInfo(ret); 4080 ret = NULL; 4081 } 4082 return ret; 4083 } 4084 #endif /* !CONFIG_USER_ONLY */ 4085 4086 static gchar *x86_gdb_arch_name(CPUState *cs) 4087 { 4088 #ifdef TARGET_X86_64 4089 return g_strdup("i386:x86-64"); 4090 #else 4091 return g_strdup("i386"); 4092 #endif 4093 } 4094 4095 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 4096 { 4097 X86CPUModel *model = data; 4098 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4099 4100 xcc->model = model; 4101 xcc->migration_safe = true; 4102 } 4103 4104 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) 4105 { 4106 char *typename = x86_cpu_type_name(name); 4107 TypeInfo ti = { 4108 .name = typename, 4109 .parent = TYPE_X86_CPU, 4110 .class_init = x86_cpu_cpudef_class_init, 4111 .class_data = model, 4112 }; 4113 4114 type_register(&ti); 4115 g_free(typename); 4116 } 4117 4118 static void x86_register_cpudef_types(X86CPUDefinition *def) 4119 { 4120 X86CPUModel *m; 4121 const X86CPUVersionDefinition *vdef; 4122 char *name; 4123 4124 /* AMD aliases are handled at runtime based on CPUID vendor, so 4125 * they shouldn't be set on the CPU model table. 4126 */ 4127 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 4128 /* catch mistakes instead of silently truncating model_id when too long */ 4129 assert(def->model_id && strlen(def->model_id) <= 48); 4130 4131 /* Unversioned model: */ 4132 m = g_new0(X86CPUModel, 1); 4133 m->cpudef = def; 4134 m->version = CPU_VERSION_AUTO; 4135 m->is_alias = true; 4136 x86_register_cpu_model_type(def->name, m); 4137 4138 /* Versioned models: */ 4139 4140 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { 4141 X86CPUModel *m = g_new0(X86CPUModel, 1); 4142 m->cpudef = def; 4143 m->version = vdef->version; 4144 name = x86_cpu_versioned_model_name(def, vdef->version); 4145 x86_register_cpu_model_type(name, m); 4146 g_free(name); 4147 4148 if (vdef->alias) { 4149 X86CPUModel *am = g_new0(X86CPUModel, 1); 4150 am->cpudef = def; 4151 am->version = vdef->version; 4152 am->is_alias = true; 4153 x86_register_cpu_model_type(vdef->alias, am); 4154 } 4155 } 4156 4157 } 4158 4159 #if !defined(CONFIG_USER_ONLY) 4160 4161 void cpu_clear_apic_feature(CPUX86State *env) 4162 { 4163 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 4164 } 4165 4166 #endif /* !CONFIG_USER_ONLY */ 4167 4168 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 4169 uint32_t *eax, uint32_t *ebx, 4170 uint32_t *ecx, uint32_t *edx) 4171 { 4172 X86CPU *cpu = env_archcpu(env); 4173 CPUState *cs = env_cpu(env); 4174 uint32_t die_offset; 4175 uint32_t limit; 4176 uint32_t signature[3]; 4177 4178 /* Calculate & apply limits for different index ranges */ 4179 if (index >= 0xC0000000) { 4180 limit = env->cpuid_xlevel2; 4181 } else if (index >= 0x80000000) { 4182 limit = env->cpuid_xlevel; 4183 } else if (index >= 0x40000000) { 4184 limit = 0x40000001; 4185 } else { 4186 limit = env->cpuid_level; 4187 } 4188 4189 if (index > limit) { 4190 /* Intel documentation states that invalid EAX input will 4191 * return the same information as EAX=cpuid_level 4192 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 4193 */ 4194 index = env->cpuid_level; 4195 } 4196 4197 switch(index) { 4198 case 0: 4199 *eax = env->cpuid_level; 4200 *ebx = env->cpuid_vendor1; 4201 *edx = env->cpuid_vendor2; 4202 *ecx = env->cpuid_vendor3; 4203 break; 4204 case 1: 4205 *eax = env->cpuid_version; 4206 *ebx = (cpu->apic_id << 24) | 4207 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 4208 *ecx = env->features[FEAT_1_ECX]; 4209 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 4210 *ecx |= CPUID_EXT_OSXSAVE; 4211 } 4212 *edx = env->features[FEAT_1_EDX]; 4213 if (cs->nr_cores * cs->nr_threads > 1) { 4214 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 4215 *edx |= CPUID_HT; 4216 } 4217 break; 4218 case 2: 4219 /* cache info: needed for Pentium Pro compatibility */ 4220 if (cpu->cache_info_passthrough) { 4221 host_cpuid(index, 0, eax, ebx, ecx, edx); 4222 break; 4223 } 4224 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 4225 *ebx = 0; 4226 if (!cpu->enable_l3_cache) { 4227 *ecx = 0; 4228 } else { 4229 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 4230 } 4231 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 4232 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 4233 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 4234 break; 4235 case 4: 4236 /* cache info: needed for Core compatibility */ 4237 if (cpu->cache_info_passthrough) { 4238 host_cpuid(index, count, eax, ebx, ecx, edx); 4239 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 4240 *eax &= ~0xFC000000; 4241 if ((*eax & 31) && cs->nr_cores > 1) { 4242 *eax |= (cs->nr_cores - 1) << 26; 4243 } 4244 } else { 4245 *eax = 0; 4246 switch (count) { 4247 case 0: /* L1 dcache info */ 4248 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 4249 1, cs->nr_cores, 4250 eax, ebx, ecx, edx); 4251 break; 4252 case 1: /* L1 icache info */ 4253 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 4254 1, cs->nr_cores, 4255 eax, ebx, ecx, edx); 4256 break; 4257 case 2: /* L2 cache info */ 4258 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 4259 cs->nr_threads, cs->nr_cores, 4260 eax, ebx, ecx, edx); 4261 break; 4262 case 3: /* L3 cache info */ 4263 die_offset = apicid_die_offset(env->nr_dies, 4264 cs->nr_cores, cs->nr_threads); 4265 if (cpu->enable_l3_cache) { 4266 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 4267 (1 << die_offset), cs->nr_cores, 4268 eax, ebx, ecx, edx); 4269 break; 4270 } 4271 /* fall through */ 4272 default: /* end of info */ 4273 *eax = *ebx = *ecx = *edx = 0; 4274 break; 4275 } 4276 } 4277 break; 4278 case 5: 4279 /* MONITOR/MWAIT Leaf */ 4280 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 4281 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 4282 *ecx = cpu->mwait.ecx; /* flags */ 4283 *edx = cpu->mwait.edx; /* mwait substates */ 4284 break; 4285 case 6: 4286 /* Thermal and Power Leaf */ 4287 *eax = env->features[FEAT_6_EAX]; 4288 *ebx = 0; 4289 *ecx = 0; 4290 *edx = 0; 4291 break; 4292 case 7: 4293 /* Structured Extended Feature Flags Enumeration Leaf */ 4294 if (count == 0) { 4295 *eax = 0; /* Maximum ECX value for sub-leaves */ 4296 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 4297 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 4298 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 4299 *ecx |= CPUID_7_0_ECX_OSPKE; 4300 } 4301 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 4302 } else { 4303 *eax = 0; 4304 *ebx = 0; 4305 *ecx = 0; 4306 *edx = 0; 4307 } 4308 break; 4309 case 9: 4310 /* Direct Cache Access Information Leaf */ 4311 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 4312 *ebx = 0; 4313 *ecx = 0; 4314 *edx = 0; 4315 break; 4316 case 0xA: 4317 /* Architectural Performance Monitoring Leaf */ 4318 if (kvm_enabled() && cpu->enable_pmu) { 4319 KVMState *s = cs->kvm_state; 4320 4321 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 4322 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 4323 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 4324 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 4325 } else if (hvf_enabled() && cpu->enable_pmu) { 4326 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 4327 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 4328 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 4329 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 4330 } else { 4331 *eax = 0; 4332 *ebx = 0; 4333 *ecx = 0; 4334 *edx = 0; 4335 } 4336 break; 4337 case 0xB: 4338 /* Extended Topology Enumeration Leaf */ 4339 if (!cpu->enable_cpuid_0xb) { 4340 *eax = *ebx = *ecx = *edx = 0; 4341 break; 4342 } 4343 4344 *ecx = count & 0xff; 4345 *edx = cpu->apic_id; 4346 4347 switch (count) { 4348 case 0: 4349 *eax = apicid_core_offset(env->nr_dies, 4350 cs->nr_cores, cs->nr_threads); 4351 *ebx = cs->nr_threads; 4352 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 4353 break; 4354 case 1: 4355 *eax = apicid_pkg_offset(env->nr_dies, 4356 cs->nr_cores, cs->nr_threads); 4357 *ebx = cs->nr_cores * cs->nr_threads; 4358 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 4359 break; 4360 default: 4361 *eax = 0; 4362 *ebx = 0; 4363 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 4364 } 4365 4366 assert(!(*eax & ~0x1f)); 4367 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 4368 break; 4369 case 0x1F: 4370 /* V2 Extended Topology Enumeration Leaf */ 4371 if (env->nr_dies < 2) { 4372 *eax = *ebx = *ecx = *edx = 0; 4373 break; 4374 } 4375 4376 *ecx = count & 0xff; 4377 *edx = cpu->apic_id; 4378 switch (count) { 4379 case 0: 4380 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores, 4381 cs->nr_threads); 4382 *ebx = cs->nr_threads; 4383 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 4384 break; 4385 case 1: 4386 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores, 4387 cs->nr_threads); 4388 *ebx = cs->nr_cores * cs->nr_threads; 4389 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 4390 break; 4391 case 2: 4392 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores, 4393 cs->nr_threads); 4394 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; 4395 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; 4396 break; 4397 default: 4398 *eax = 0; 4399 *ebx = 0; 4400 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 4401 } 4402 assert(!(*eax & ~0x1f)); 4403 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 4404 break; 4405 case 0xD: { 4406 /* Processor Extended State */ 4407 *eax = 0; 4408 *ebx = 0; 4409 *ecx = 0; 4410 *edx = 0; 4411 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 4412 break; 4413 } 4414 4415 if (count == 0) { 4416 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 4417 *eax = env->features[FEAT_XSAVE_COMP_LO]; 4418 *edx = env->features[FEAT_XSAVE_COMP_HI]; 4419 *ebx = xsave_area_size(env->xcr0); 4420 } else if (count == 1) { 4421 *eax = env->features[FEAT_XSAVE]; 4422 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 4423 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 4424 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 4425 *eax = esa->size; 4426 *ebx = esa->offset; 4427 } 4428 } 4429 break; 4430 } 4431 case 0x14: { 4432 /* Intel Processor Trace Enumeration */ 4433 *eax = 0; 4434 *ebx = 0; 4435 *ecx = 0; 4436 *edx = 0; 4437 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 4438 !kvm_enabled()) { 4439 break; 4440 } 4441 4442 if (count == 0) { 4443 *eax = INTEL_PT_MAX_SUBLEAF; 4444 *ebx = INTEL_PT_MINIMAL_EBX; 4445 *ecx = INTEL_PT_MINIMAL_ECX; 4446 } else if (count == 1) { 4447 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 4448 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 4449 } 4450 break; 4451 } 4452 case 0x40000000: 4453 /* 4454 * CPUID code in kvm_arch_init_vcpu() ignores stuff 4455 * set here, but we restrict to TCG none the less. 4456 */ 4457 if (tcg_enabled() && cpu->expose_tcg) { 4458 memcpy(signature, "TCGTCGTCGTCG", 12); 4459 *eax = 0x40000001; 4460 *ebx = signature[0]; 4461 *ecx = signature[1]; 4462 *edx = signature[2]; 4463 } else { 4464 *eax = 0; 4465 *ebx = 0; 4466 *ecx = 0; 4467 *edx = 0; 4468 } 4469 break; 4470 case 0x40000001: 4471 *eax = 0; 4472 *ebx = 0; 4473 *ecx = 0; 4474 *edx = 0; 4475 break; 4476 case 0x80000000: 4477 *eax = env->cpuid_xlevel; 4478 *ebx = env->cpuid_vendor1; 4479 *edx = env->cpuid_vendor2; 4480 *ecx = env->cpuid_vendor3; 4481 break; 4482 case 0x80000001: 4483 *eax = env->cpuid_version; 4484 *ebx = 0; 4485 *ecx = env->features[FEAT_8000_0001_ECX]; 4486 *edx = env->features[FEAT_8000_0001_EDX]; 4487 4488 /* The Linux kernel checks for the CMPLegacy bit and 4489 * discards multiple thread information if it is set. 4490 * So don't set it here for Intel to make Linux guests happy. 4491 */ 4492 if (cs->nr_cores * cs->nr_threads > 1) { 4493 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 4494 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 4495 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 4496 *ecx |= 1 << 1; /* CmpLegacy bit */ 4497 } 4498 } 4499 break; 4500 case 0x80000002: 4501 case 0x80000003: 4502 case 0x80000004: 4503 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 4504 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 4505 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 4506 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 4507 break; 4508 case 0x80000005: 4509 /* cache info (L1 cache) */ 4510 if (cpu->cache_info_passthrough) { 4511 host_cpuid(index, 0, eax, ebx, ecx, edx); 4512 break; 4513 } 4514 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ 4515 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 4516 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ 4517 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 4518 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 4519 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 4520 break; 4521 case 0x80000006: 4522 /* cache info (L2 cache) */ 4523 if (cpu->cache_info_passthrough) { 4524 host_cpuid(index, 0, eax, ebx, ecx, edx); 4525 break; 4526 } 4527 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ 4528 (L2_DTLB_2M_ENTRIES << 16) | \ 4529 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ 4530 (L2_ITLB_2M_ENTRIES); 4531 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ 4532 (L2_DTLB_4K_ENTRIES << 16) | \ 4533 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ 4534 (L2_ITLB_4K_ENTRIES); 4535 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 4536 cpu->enable_l3_cache ? 4537 env->cache_info_amd.l3_cache : NULL, 4538 ecx, edx); 4539 break; 4540 case 0x80000007: 4541 *eax = 0; 4542 *ebx = 0; 4543 *ecx = 0; 4544 *edx = env->features[FEAT_8000_0007_EDX]; 4545 break; 4546 case 0x80000008: 4547 /* virtual & phys address size in low 2 bytes. */ 4548 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 4549 /* 64 bit processor */ 4550 *eax = cpu->phys_bits; /* configurable physical bits */ 4551 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 4552 *eax |= 0x00003900; /* 57 bits virtual */ 4553 } else { 4554 *eax |= 0x00003000; /* 48 bits virtual */ 4555 } 4556 } else { 4557 *eax = cpu->phys_bits; 4558 } 4559 *ebx = env->features[FEAT_8000_0008_EBX]; 4560 *ecx = 0; 4561 *edx = 0; 4562 if (cs->nr_cores * cs->nr_threads > 1) { 4563 *ecx |= (cs->nr_cores * cs->nr_threads) - 1; 4564 } 4565 break; 4566 case 0x8000000A: 4567 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 4568 *eax = 0x00000001; /* SVM Revision */ 4569 *ebx = 0x00000010; /* nr of ASIDs */ 4570 *ecx = 0; 4571 *edx = env->features[FEAT_SVM]; /* optional features */ 4572 } else { 4573 *eax = 0; 4574 *ebx = 0; 4575 *ecx = 0; 4576 *edx = 0; 4577 } 4578 break; 4579 case 0x8000001D: 4580 *eax = 0; 4581 if (cpu->cache_info_passthrough) { 4582 host_cpuid(index, count, eax, ebx, ecx, edx); 4583 break; 4584 } 4585 switch (count) { 4586 case 0: /* L1 dcache info */ 4587 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs, 4588 eax, ebx, ecx, edx); 4589 break; 4590 case 1: /* L1 icache info */ 4591 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs, 4592 eax, ebx, ecx, edx); 4593 break; 4594 case 2: /* L2 cache info */ 4595 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs, 4596 eax, ebx, ecx, edx); 4597 break; 4598 case 3: /* L3 cache info */ 4599 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs, 4600 eax, ebx, ecx, edx); 4601 break; 4602 default: /* end of info */ 4603 *eax = *ebx = *ecx = *edx = 0; 4604 break; 4605 } 4606 break; 4607 case 0x8000001E: 4608 assert(cpu->core_id <= 255); 4609 encode_topo_cpuid8000001e(cs, cpu, 4610 eax, ebx, ecx, edx); 4611 break; 4612 case 0xC0000000: 4613 *eax = env->cpuid_xlevel2; 4614 *ebx = 0; 4615 *ecx = 0; 4616 *edx = 0; 4617 break; 4618 case 0xC0000001: 4619 /* Support for VIA CPU's CPUID instruction */ 4620 *eax = env->cpuid_version; 4621 *ebx = 0; 4622 *ecx = 0; 4623 *edx = env->features[FEAT_C000_0001_EDX]; 4624 break; 4625 case 0xC0000002: 4626 case 0xC0000003: 4627 case 0xC0000004: 4628 /* Reserved for the future, and now filled with zero */ 4629 *eax = 0; 4630 *ebx = 0; 4631 *ecx = 0; 4632 *edx = 0; 4633 break; 4634 case 0x8000001F: 4635 *eax = sev_enabled() ? 0x2 : 0; 4636 *ebx = sev_get_cbit_position(); 4637 *ebx |= sev_get_reduced_phys_bits() << 6; 4638 *ecx = 0; 4639 *edx = 0; 4640 break; 4641 default: 4642 /* reserved values: zero */ 4643 *eax = 0; 4644 *ebx = 0; 4645 *ecx = 0; 4646 *edx = 0; 4647 break; 4648 } 4649 } 4650 4651 /* CPUClass::reset() */ 4652 static void x86_cpu_reset(CPUState *s) 4653 { 4654 X86CPU *cpu = X86_CPU(s); 4655 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 4656 CPUX86State *env = &cpu->env; 4657 target_ulong cr4; 4658 uint64_t xcr0; 4659 int i; 4660 4661 xcc->parent_reset(s); 4662 4663 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 4664 4665 env->old_exception = -1; 4666 4667 /* init to reset state */ 4668 4669 env->hflags2 |= HF2_GIF_MASK; 4670 4671 cpu_x86_update_cr0(env, 0x60000010); 4672 env->a20_mask = ~0x0; 4673 env->smbase = 0x30000; 4674 env->msr_smi_count = 0; 4675 4676 env->idt.limit = 0xffff; 4677 env->gdt.limit = 0xffff; 4678 env->ldt.limit = 0xffff; 4679 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 4680 env->tr.limit = 0xffff; 4681 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 4682 4683 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 4684 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 4685 DESC_R_MASK | DESC_A_MASK); 4686 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 4687 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4688 DESC_A_MASK); 4689 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 4690 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4691 DESC_A_MASK); 4692 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 4693 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4694 DESC_A_MASK); 4695 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 4696 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4697 DESC_A_MASK); 4698 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 4699 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4700 DESC_A_MASK); 4701 4702 env->eip = 0xfff0; 4703 env->regs[R_EDX] = env->cpuid_version; 4704 4705 env->eflags = 0x2; 4706 4707 /* FPU init */ 4708 for (i = 0; i < 8; i++) { 4709 env->fptags[i] = 1; 4710 } 4711 cpu_set_fpuc(env, 0x37f); 4712 4713 env->mxcsr = 0x1f80; 4714 /* All units are in INIT state. */ 4715 env->xstate_bv = 0; 4716 4717 env->pat = 0x0007040600070406ULL; 4718 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 4719 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 4720 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 4721 } 4722 4723 memset(env->dr, 0, sizeof(env->dr)); 4724 env->dr[6] = DR6_FIXED_1; 4725 env->dr[7] = DR7_FIXED_1; 4726 cpu_breakpoint_remove_all(s, BP_CPU); 4727 cpu_watchpoint_remove_all(s, BP_CPU); 4728 4729 cr4 = 0; 4730 xcr0 = XSTATE_FP_MASK; 4731 4732 #ifdef CONFIG_USER_ONLY 4733 /* Enable all the features for user-mode. */ 4734 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 4735 xcr0 |= XSTATE_SSE_MASK; 4736 } 4737 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 4738 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 4739 if (env->features[esa->feature] & esa->bits) { 4740 xcr0 |= 1ull << i; 4741 } 4742 } 4743 4744 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 4745 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 4746 } 4747 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 4748 cr4 |= CR4_FSGSBASE_MASK; 4749 } 4750 #endif 4751 4752 env->xcr0 = xcr0; 4753 cpu_x86_update_cr4(env, cr4); 4754 4755 /* 4756 * SDM 11.11.5 requires: 4757 * - IA32_MTRR_DEF_TYPE MSR.E = 0 4758 * - IA32_MTRR_PHYSMASKn.V = 0 4759 * All other bits are undefined. For simplification, zero it all. 4760 */ 4761 env->mtrr_deftype = 0; 4762 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 4763 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 4764 4765 env->interrupt_injected = -1; 4766 env->exception_nr = -1; 4767 env->exception_pending = 0; 4768 env->exception_injected = 0; 4769 env->exception_has_payload = false; 4770 env->exception_payload = 0; 4771 env->nmi_injected = false; 4772 #if !defined(CONFIG_USER_ONLY) 4773 /* We hard-wire the BSP to the first CPU. */ 4774 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 4775 4776 s->halted = !cpu_is_bsp(cpu); 4777 4778 if (kvm_enabled()) { 4779 kvm_arch_reset_vcpu(cpu); 4780 } 4781 else if (hvf_enabled()) { 4782 hvf_reset_vcpu(s); 4783 } 4784 #endif 4785 } 4786 4787 #ifndef CONFIG_USER_ONLY 4788 bool cpu_is_bsp(X86CPU *cpu) 4789 { 4790 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 4791 } 4792 4793 /* TODO: remove me, when reset over QOM tree is implemented */ 4794 static void x86_cpu_machine_reset_cb(void *opaque) 4795 { 4796 X86CPU *cpu = opaque; 4797 cpu_reset(CPU(cpu)); 4798 } 4799 #endif 4800 4801 static void mce_init(X86CPU *cpu) 4802 { 4803 CPUX86State *cenv = &cpu->env; 4804 unsigned int bank; 4805 4806 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 4807 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 4808 (CPUID_MCE | CPUID_MCA)) { 4809 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 4810 (cpu->enable_lmce ? MCG_LMCE_P : 0); 4811 cenv->mcg_ctl = ~(uint64_t)0; 4812 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 4813 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 4814 } 4815 } 4816 } 4817 4818 #ifndef CONFIG_USER_ONLY 4819 APICCommonClass *apic_get_class(void) 4820 { 4821 const char *apic_type = "apic"; 4822 4823 /* TODO: in-kernel irqchip for hvf */ 4824 if (kvm_apic_in_kernel()) { 4825 apic_type = "kvm-apic"; 4826 } else if (xen_enabled()) { 4827 apic_type = "xen-apic"; 4828 } 4829 4830 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 4831 } 4832 4833 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 4834 { 4835 APICCommonState *apic; 4836 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 4837 4838 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class))); 4839 4840 object_property_add_child(OBJECT(cpu), "lapic", 4841 OBJECT(cpu->apic_state), &error_abort); 4842 object_unref(OBJECT(cpu->apic_state)); 4843 4844 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 4845 /* TODO: convert to link<> */ 4846 apic = APIC_COMMON(cpu->apic_state); 4847 apic->cpu = cpu; 4848 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 4849 } 4850 4851 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4852 { 4853 APICCommonState *apic; 4854 static bool apic_mmio_map_once; 4855 4856 if (cpu->apic_state == NULL) { 4857 return; 4858 } 4859 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized", 4860 errp); 4861 4862 /* Map APIC MMIO area */ 4863 apic = APIC_COMMON(cpu->apic_state); 4864 if (!apic_mmio_map_once) { 4865 memory_region_add_subregion_overlap(get_system_memory(), 4866 apic->apicbase & 4867 MSR_IA32_APICBASE_BASE, 4868 &apic->io_memory, 4869 0x1000); 4870 apic_mmio_map_once = true; 4871 } 4872 } 4873 4874 static void x86_cpu_machine_done(Notifier *n, void *unused) 4875 { 4876 X86CPU *cpu = container_of(n, X86CPU, machine_done); 4877 MemoryRegion *smram = 4878 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 4879 4880 if (smram) { 4881 cpu->smram = g_new(MemoryRegion, 1); 4882 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 4883 smram, 0, 1ull << 32); 4884 memory_region_set_enabled(cpu->smram, true); 4885 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 4886 } 4887 } 4888 #else 4889 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4890 { 4891 } 4892 #endif 4893 4894 /* Note: Only safe for use on x86(-64) hosts */ 4895 static uint32_t x86_host_phys_bits(void) 4896 { 4897 uint32_t eax; 4898 uint32_t host_phys_bits; 4899 4900 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 4901 if (eax >= 0x80000008) { 4902 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 4903 /* Note: According to AMD doc 25481 rev 2.34 they have a field 4904 * at 23:16 that can specify a maximum physical address bits for 4905 * the guest that can override this value; but I've not seen 4906 * anything with that set. 4907 */ 4908 host_phys_bits = eax & 0xff; 4909 } else { 4910 /* It's an odd 64 bit machine that doesn't have the leaf for 4911 * physical address bits; fall back to 36 that's most older 4912 * Intel. 4913 */ 4914 host_phys_bits = 36; 4915 } 4916 4917 return host_phys_bits; 4918 } 4919 4920 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 4921 { 4922 if (*min < value) { 4923 *min = value; 4924 } 4925 } 4926 4927 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 4928 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 4929 { 4930 CPUX86State *env = &cpu->env; 4931 FeatureWordInfo *fi = &feature_word_info[w]; 4932 uint32_t eax = fi->cpuid.eax; 4933 uint32_t region = eax & 0xF0000000; 4934 4935 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 4936 if (!env->features[w]) { 4937 return; 4938 } 4939 4940 switch (region) { 4941 case 0x00000000: 4942 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 4943 break; 4944 case 0x80000000: 4945 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 4946 break; 4947 case 0xC0000000: 4948 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 4949 break; 4950 } 4951 } 4952 4953 /* Calculate XSAVE components based on the configured CPU feature flags */ 4954 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 4955 { 4956 CPUX86State *env = &cpu->env; 4957 int i; 4958 uint64_t mask; 4959 4960 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 4961 return; 4962 } 4963 4964 mask = 0; 4965 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 4966 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 4967 if (env->features[esa->feature] & esa->bits) { 4968 mask |= (1ULL << i); 4969 } 4970 } 4971 4972 env->features[FEAT_XSAVE_COMP_LO] = mask; 4973 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 4974 } 4975 4976 /***** Steps involved on loading and filtering CPUID data 4977 * 4978 * When initializing and realizing a CPU object, the steps 4979 * involved in setting up CPUID data are: 4980 * 4981 * 1) Loading CPU model definition (X86CPUDefinition). This is 4982 * implemented by x86_cpu_load_model() and should be completely 4983 * transparent, as it is done automatically by instance_init. 4984 * No code should need to look at X86CPUDefinition structs 4985 * outside instance_init. 4986 * 4987 * 2) CPU expansion. This is done by realize before CPUID 4988 * filtering, and will make sure host/accelerator data is 4989 * loaded for CPU models that depend on host capabilities 4990 * (e.g. "host"). Done by x86_cpu_expand_features(). 4991 * 4992 * 3) CPUID filtering. This initializes extra data related to 4993 * CPUID, and checks if the host supports all capabilities 4994 * required by the CPU. Runnability of a CPU model is 4995 * determined at this step. Done by x86_cpu_filter_features(). 4996 * 4997 * Some operations don't require all steps to be performed. 4998 * More precisely: 4999 * 5000 * - CPU instance creation (instance_init) will run only CPU 5001 * model loading. CPU expansion can't run at instance_init-time 5002 * because host/accelerator data may be not available yet. 5003 * - CPU realization will perform both CPU model expansion and CPUID 5004 * filtering, and return an error in case one of them fails. 5005 * - query-cpu-definitions needs to run all 3 steps. It needs 5006 * to run CPUID filtering, as the 'unavailable-features' 5007 * field is set based on the filtering results. 5008 * - The query-cpu-model-expansion QMP command only needs to run 5009 * CPU model loading and CPU expansion. It should not filter 5010 * any CPUID data based on host capabilities. 5011 */ 5012 5013 /* Expand CPU configuration data, based on configured features 5014 * and host/accelerator capabilities when appropriate. 5015 */ 5016 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 5017 { 5018 CPUX86State *env = &cpu->env; 5019 FeatureWord w; 5020 GList *l; 5021 Error *local_err = NULL; 5022 5023 /*TODO: Now cpu->max_features doesn't overwrite features 5024 * set using QOM properties, and we can convert 5025 * plus_features & minus_features to global properties 5026 * inside x86_cpu_parse_featurestr() too. 5027 */ 5028 if (cpu->max_features) { 5029 for (w = 0; w < FEATURE_WORDS; w++) { 5030 /* Override only features that weren't set explicitly 5031 * by the user. 5032 */ 5033 env->features[w] |= 5034 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 5035 ~env->user_features[w] & \ 5036 ~feature_word_info[w].no_autoenable_flags; 5037 } 5038 } 5039 5040 for (l = plus_features; l; l = l->next) { 5041 const char *prop = l->data; 5042 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 5043 if (local_err) { 5044 goto out; 5045 } 5046 } 5047 5048 for (l = minus_features; l; l = l->next) { 5049 const char *prop = l->data; 5050 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 5051 if (local_err) { 5052 goto out; 5053 } 5054 } 5055 5056 if (!kvm_enabled() || !cpu->expose_kvm) { 5057 env->features[FEAT_KVM] = 0; 5058 } 5059 5060 x86_cpu_enable_xsave_components(cpu); 5061 5062 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 5063 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 5064 if (cpu->full_cpuid_auto_level) { 5065 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 5066 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 5067 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 5068 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 5069 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 5070 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 5071 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 5072 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 5073 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 5074 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 5075 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 5076 5077 /* Intel Processor Trace requires CPUID[0x14] */ 5078 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 5079 kvm_enabled() && cpu->intel_pt_auto_level) { 5080 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 5081 } 5082 5083 /* CPU topology with multi-dies support requires CPUID[0x1F] */ 5084 if (env->nr_dies > 1) { 5085 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); 5086 } 5087 5088 /* SVM requires CPUID[0x8000000A] */ 5089 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5090 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 5091 } 5092 5093 /* SEV requires CPUID[0x8000001F] */ 5094 if (sev_enabled()) { 5095 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 5096 } 5097 } 5098 5099 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 5100 if (env->cpuid_level == UINT32_MAX) { 5101 env->cpuid_level = env->cpuid_min_level; 5102 } 5103 if (env->cpuid_xlevel == UINT32_MAX) { 5104 env->cpuid_xlevel = env->cpuid_min_xlevel; 5105 } 5106 if (env->cpuid_xlevel2 == UINT32_MAX) { 5107 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 5108 } 5109 5110 out: 5111 if (local_err != NULL) { 5112 error_propagate(errp, local_err); 5113 } 5114 } 5115 5116 /* 5117 * Finishes initialization of CPUID data, filters CPU feature 5118 * words based on host availability of each feature. 5119 * 5120 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 5121 */ 5122 static int x86_cpu_filter_features(X86CPU *cpu) 5123 { 5124 CPUX86State *env = &cpu->env; 5125 FeatureWord w; 5126 int rv = 0; 5127 5128 for (w = 0; w < FEATURE_WORDS; w++) { 5129 uint32_t host_feat = 5130 x86_cpu_get_supported_feature_word(w, false); 5131 uint32_t requested_features = env->features[w]; 5132 uint32_t available_features = requested_features & host_feat; 5133 if (!cpu->force_features) { 5134 env->features[w] = available_features; 5135 } 5136 cpu->filtered_features[w] = requested_features & ~available_features; 5137 if (cpu->filtered_features[w]) { 5138 rv = 1; 5139 } 5140 } 5141 5142 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 5143 kvm_enabled()) { 5144 KVMState *s = CPU(cpu)->kvm_state; 5145 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 5146 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 5147 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 5148 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 5149 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 5150 5151 if (!eax_0 || 5152 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 5153 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 5154 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 5155 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 5156 INTEL_PT_ADDR_RANGES_NUM) || 5157 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 5158 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 5159 (ecx_0 & INTEL_PT_IP_LIP)) { 5160 /* 5161 * Processor Trace capabilities aren't configurable, so if the 5162 * host can't emulate the capabilities we report on 5163 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 5164 */ 5165 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT; 5166 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT; 5167 rv = 1; 5168 } 5169 } 5170 5171 return rv; 5172 } 5173 5174 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 5175 { 5176 CPUState *cs = CPU(dev); 5177 X86CPU *cpu = X86_CPU(dev); 5178 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 5179 CPUX86State *env = &cpu->env; 5180 Error *local_err = NULL; 5181 static bool ht_warned; 5182 5183 if (xcc->host_cpuid_required) { 5184 if (!accel_uses_host_cpuid()) { 5185 char *name = x86_cpu_class_get_model_name(xcc); 5186 error_setg(&local_err, "CPU model '%s' requires KVM", name); 5187 g_free(name); 5188 goto out; 5189 } 5190 5191 if (enable_cpu_pm) { 5192 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 5193 &cpu->mwait.ecx, &cpu->mwait.edx); 5194 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 5195 } 5196 } 5197 5198 /* mwait extended info: needed for Core compatibility */ 5199 /* We always wake on interrupt even if host does not have the capability */ 5200 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 5201 5202 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 5203 error_setg(errp, "apic-id property was not initialized properly"); 5204 return; 5205 } 5206 5207 x86_cpu_expand_features(cpu, &local_err); 5208 if (local_err) { 5209 goto out; 5210 } 5211 5212 if (x86_cpu_filter_features(cpu) && 5213 (cpu->check_cpuid || cpu->enforce_cpuid)) { 5214 x86_cpu_report_filtered_features(cpu); 5215 if (cpu->enforce_cpuid) { 5216 error_setg(&local_err, 5217 accel_uses_host_cpuid() ? 5218 "Host doesn't support requested features" : 5219 "TCG doesn't support requested features"); 5220 goto out; 5221 } 5222 } 5223 5224 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 5225 * CPUID[1].EDX. 5226 */ 5227 if (IS_AMD_CPU(env)) { 5228 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 5229 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 5230 & CPUID_EXT2_AMD_ALIASES); 5231 } 5232 5233 /* For 64bit systems think about the number of physical bits to present. 5234 * ideally this should be the same as the host; anything other than matching 5235 * the host can cause incorrect guest behaviour. 5236 * QEMU used to pick the magic value of 40 bits that corresponds to 5237 * consumer AMD devices but nothing else. 5238 */ 5239 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5240 if (accel_uses_host_cpuid()) { 5241 uint32_t host_phys_bits = x86_host_phys_bits(); 5242 static bool warned; 5243 5244 /* Print a warning if the user set it to a value that's not the 5245 * host value. 5246 */ 5247 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 5248 !warned) { 5249 warn_report("Host physical bits (%u)" 5250 " does not match phys-bits property (%u)", 5251 host_phys_bits, cpu->phys_bits); 5252 warned = true; 5253 } 5254 5255 if (cpu->host_phys_bits) { 5256 /* The user asked for us to use the host physical bits */ 5257 cpu->phys_bits = host_phys_bits; 5258 if (cpu->host_phys_bits_limit && 5259 cpu->phys_bits > cpu->host_phys_bits_limit) { 5260 cpu->phys_bits = cpu->host_phys_bits_limit; 5261 } 5262 } 5263 5264 if (cpu->phys_bits && 5265 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 5266 cpu->phys_bits < 32)) { 5267 error_setg(errp, "phys-bits should be between 32 and %u " 5268 " (but is %u)", 5269 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 5270 return; 5271 } 5272 } else { 5273 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 5274 error_setg(errp, "TCG only supports phys-bits=%u", 5275 TCG_PHYS_ADDR_BITS); 5276 return; 5277 } 5278 } 5279 /* 0 means it was not explicitly set by the user (or by machine 5280 * compat_props or by the host code above). In this case, the default 5281 * is the value used by TCG (40). 5282 */ 5283 if (cpu->phys_bits == 0) { 5284 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 5285 } 5286 } else { 5287 /* For 32 bit systems don't use the user set value, but keep 5288 * phys_bits consistent with what we tell the guest. 5289 */ 5290 if (cpu->phys_bits != 0) { 5291 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 5292 return; 5293 } 5294 5295 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 5296 cpu->phys_bits = 36; 5297 } else { 5298 cpu->phys_bits = 32; 5299 } 5300 } 5301 5302 /* Cache information initialization */ 5303 if (!cpu->legacy_cache) { 5304 if (!xcc->model || !xcc->model->cpudef->cache_info) { 5305 char *name = x86_cpu_class_get_model_name(xcc); 5306 error_setg(errp, 5307 "CPU model '%s' doesn't support legacy-cache=off", name); 5308 g_free(name); 5309 return; 5310 } 5311 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 5312 *xcc->model->cpudef->cache_info; 5313 } else { 5314 /* Build legacy cache information */ 5315 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 5316 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 5317 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 5318 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 5319 5320 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 5321 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 5322 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 5323 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 5324 5325 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 5326 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 5327 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 5328 env->cache_info_amd.l3_cache = &legacy_l3_cache; 5329 } 5330 5331 5332 cpu_exec_realizefn(cs, &local_err); 5333 if (local_err != NULL) { 5334 error_propagate(errp, local_err); 5335 return; 5336 } 5337 5338 #ifndef CONFIG_USER_ONLY 5339 MachineState *ms = MACHINE(qdev_get_machine()); 5340 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 5341 5342 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { 5343 x86_cpu_apic_create(cpu, &local_err); 5344 if (local_err != NULL) { 5345 goto out; 5346 } 5347 } 5348 #endif 5349 5350 mce_init(cpu); 5351 5352 #ifndef CONFIG_USER_ONLY 5353 if (tcg_enabled()) { 5354 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 5355 cpu->cpu_as_root = g_new(MemoryRegion, 1); 5356 5357 /* Outer container... */ 5358 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 5359 memory_region_set_enabled(cpu->cpu_as_root, true); 5360 5361 /* ... with two regions inside: normal system memory with low 5362 * priority, and... 5363 */ 5364 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 5365 get_system_memory(), 0, ~0ull); 5366 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 5367 memory_region_set_enabled(cpu->cpu_as_mem, true); 5368 5369 cs->num_ases = 2; 5370 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 5371 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 5372 5373 /* ... SMRAM with higher priority, linked from /machine/smram. */ 5374 cpu->machine_done.notify = x86_cpu_machine_done; 5375 qemu_add_machine_init_done_notifier(&cpu->machine_done); 5376 } 5377 #endif 5378 5379 qemu_init_vcpu(cs); 5380 5381 /* 5382 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 5383 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 5384 * based on inputs (sockets,cores,threads), it is still better to give 5385 * users a warning. 5386 * 5387 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 5388 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 5389 */ 5390 if (IS_AMD_CPU(env) && 5391 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 5392 cs->nr_threads > 1 && !ht_warned) { 5393 warn_report("This family of AMD CPU doesn't support " 5394 "hyperthreading(%d)", 5395 cs->nr_threads); 5396 error_printf("Please configure -smp options properly" 5397 " or try enabling topoext feature.\n"); 5398 ht_warned = true; 5399 } 5400 5401 x86_cpu_apic_realize(cpu, &local_err); 5402 if (local_err != NULL) { 5403 goto out; 5404 } 5405 cpu_reset(cs); 5406 5407 xcc->parent_realize(dev, &local_err); 5408 5409 out: 5410 if (local_err != NULL) { 5411 error_propagate(errp, local_err); 5412 return; 5413 } 5414 } 5415 5416 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) 5417 { 5418 X86CPU *cpu = X86_CPU(dev); 5419 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 5420 Error *local_err = NULL; 5421 5422 #ifndef CONFIG_USER_ONLY 5423 cpu_remove_sync(CPU(dev)); 5424 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 5425 #endif 5426 5427 if (cpu->apic_state) { 5428 object_unparent(OBJECT(cpu->apic_state)); 5429 cpu->apic_state = NULL; 5430 } 5431 5432 xcc->parent_unrealize(dev, &local_err); 5433 if (local_err != NULL) { 5434 error_propagate(errp, local_err); 5435 return; 5436 } 5437 } 5438 5439 typedef struct BitProperty { 5440 FeatureWord w; 5441 uint32_t mask; 5442 } BitProperty; 5443 5444 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 5445 void *opaque, Error **errp) 5446 { 5447 X86CPU *cpu = X86_CPU(obj); 5448 BitProperty *fp = opaque; 5449 uint32_t f = cpu->env.features[fp->w]; 5450 bool value = (f & fp->mask) == fp->mask; 5451 visit_type_bool(v, name, &value, errp); 5452 } 5453 5454 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 5455 void *opaque, Error **errp) 5456 { 5457 DeviceState *dev = DEVICE(obj); 5458 X86CPU *cpu = X86_CPU(obj); 5459 BitProperty *fp = opaque; 5460 Error *local_err = NULL; 5461 bool value; 5462 5463 if (dev->realized) { 5464 qdev_prop_set_after_realize(dev, name, errp); 5465 return; 5466 } 5467 5468 visit_type_bool(v, name, &value, &local_err); 5469 if (local_err) { 5470 error_propagate(errp, local_err); 5471 return; 5472 } 5473 5474 if (value) { 5475 cpu->env.features[fp->w] |= fp->mask; 5476 } else { 5477 cpu->env.features[fp->w] &= ~fp->mask; 5478 } 5479 cpu->env.user_features[fp->w] |= fp->mask; 5480 } 5481 5482 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 5483 void *opaque) 5484 { 5485 BitProperty *prop = opaque; 5486 g_free(prop); 5487 } 5488 5489 /* Register a boolean property to get/set a single bit in a uint32_t field. 5490 * 5491 * The same property name can be registered multiple times to make it affect 5492 * multiple bits in the same FeatureWord. In that case, the getter will return 5493 * true only if all bits are set. 5494 */ 5495 static void x86_cpu_register_bit_prop(X86CPU *cpu, 5496 const char *prop_name, 5497 FeatureWord w, 5498 int bitnr) 5499 { 5500 BitProperty *fp; 5501 ObjectProperty *op; 5502 uint32_t mask = (1UL << bitnr); 5503 5504 op = object_property_find(OBJECT(cpu), prop_name, NULL); 5505 if (op) { 5506 fp = op->opaque; 5507 assert(fp->w == w); 5508 fp->mask |= mask; 5509 } else { 5510 fp = g_new0(BitProperty, 1); 5511 fp->w = w; 5512 fp->mask = mask; 5513 object_property_add(OBJECT(cpu), prop_name, "bool", 5514 x86_cpu_get_bit_prop, 5515 x86_cpu_set_bit_prop, 5516 x86_cpu_release_bit_prop, fp, &error_abort); 5517 } 5518 } 5519 5520 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 5521 FeatureWord w, 5522 int bitnr) 5523 { 5524 FeatureWordInfo *fi = &feature_word_info[w]; 5525 const char *name = fi->feat_names[bitnr]; 5526 5527 if (!name) { 5528 return; 5529 } 5530 5531 /* Property names should use "-" instead of "_". 5532 * Old names containing underscores are registered as aliases 5533 * using object_property_add_alias() 5534 */ 5535 assert(!strchr(name, '_')); 5536 /* aliases don't use "|" delimiters anymore, they are registered 5537 * manually using object_property_add_alias() */ 5538 assert(!strchr(name, '|')); 5539 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 5540 } 5541 5542 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 5543 { 5544 X86CPU *cpu = X86_CPU(cs); 5545 CPUX86State *env = &cpu->env; 5546 GuestPanicInformation *panic_info = NULL; 5547 5548 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 5549 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 5550 5551 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 5552 5553 assert(HV_CRASH_PARAMS >= 5); 5554 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 5555 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 5556 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 5557 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 5558 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 5559 } 5560 5561 return panic_info; 5562 } 5563 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 5564 const char *name, void *opaque, 5565 Error **errp) 5566 { 5567 CPUState *cs = CPU(obj); 5568 GuestPanicInformation *panic_info; 5569 5570 if (!cs->crash_occurred) { 5571 error_setg(errp, "No crash occured"); 5572 return; 5573 } 5574 5575 panic_info = x86_cpu_get_crash_info(cs); 5576 if (panic_info == NULL) { 5577 error_setg(errp, "No crash information"); 5578 return; 5579 } 5580 5581 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 5582 errp); 5583 qapi_free_GuestPanicInformation(panic_info); 5584 } 5585 5586 static void x86_cpu_initfn(Object *obj) 5587 { 5588 X86CPU *cpu = X86_CPU(obj); 5589 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 5590 CPUX86State *env = &cpu->env; 5591 FeatureWord w; 5592 5593 env->nr_dies = 1; 5594 cpu_set_cpustate_pointers(cpu); 5595 5596 object_property_add(obj, "family", "int", 5597 x86_cpuid_version_get_family, 5598 x86_cpuid_version_set_family, NULL, NULL, NULL); 5599 object_property_add(obj, "model", "int", 5600 x86_cpuid_version_get_model, 5601 x86_cpuid_version_set_model, NULL, NULL, NULL); 5602 object_property_add(obj, "stepping", "int", 5603 x86_cpuid_version_get_stepping, 5604 x86_cpuid_version_set_stepping, NULL, NULL, NULL); 5605 object_property_add_str(obj, "vendor", 5606 x86_cpuid_get_vendor, 5607 x86_cpuid_set_vendor, NULL); 5608 object_property_add_str(obj, "model-id", 5609 x86_cpuid_get_model_id, 5610 x86_cpuid_set_model_id, NULL); 5611 object_property_add(obj, "tsc-frequency", "int", 5612 x86_cpuid_get_tsc_freq, 5613 x86_cpuid_set_tsc_freq, NULL, NULL, NULL); 5614 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 5615 x86_cpu_get_feature_words, 5616 NULL, NULL, (void *)env->features, NULL); 5617 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 5618 x86_cpu_get_feature_words, 5619 NULL, NULL, (void *)cpu->filtered_features, NULL); 5620 /* 5621 * The "unavailable-features" property has the same semantics as 5622 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 5623 * QMP command: they list the features that would have prevented the 5624 * CPU from running if the "enforce" flag was set. 5625 */ 5626 object_property_add(obj, "unavailable-features", "strList", 5627 x86_cpu_get_unavailable_features, 5628 NULL, NULL, NULL, &error_abort); 5629 5630 object_property_add(obj, "crash-information", "GuestPanicInformation", 5631 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); 5632 5633 for (w = 0; w < FEATURE_WORDS; w++) { 5634 int bitnr; 5635 5636 for (bitnr = 0; bitnr < 32; bitnr++) { 5637 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 5638 } 5639 } 5640 5641 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort); 5642 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort); 5643 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort); 5644 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort); 5645 object_property_add_alias(obj, "xd", obj, "nx", &error_abort); 5646 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort); 5647 object_property_add_alias(obj, "i64", obj, "lm", &error_abort); 5648 5649 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort); 5650 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort); 5651 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort); 5652 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort); 5653 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort); 5654 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort); 5655 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort); 5656 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort); 5657 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort); 5658 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort); 5659 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort); 5660 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort); 5661 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort); 5662 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort); 5663 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control", 5664 &error_abort); 5665 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort); 5666 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort); 5667 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort); 5668 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort); 5669 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort); 5670 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort); 5671 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort); 5672 5673 if (xcc->model) { 5674 x86_cpu_load_model(cpu, xcc->model, &error_abort); 5675 } 5676 } 5677 5678 static int64_t x86_cpu_get_arch_id(CPUState *cs) 5679 { 5680 X86CPU *cpu = X86_CPU(cs); 5681 5682 return cpu->apic_id; 5683 } 5684 5685 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 5686 { 5687 X86CPU *cpu = X86_CPU(cs); 5688 5689 return cpu->env.cr[0] & CR0_PG_MASK; 5690 } 5691 5692 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 5693 { 5694 X86CPU *cpu = X86_CPU(cs); 5695 5696 cpu->env.eip = value; 5697 } 5698 5699 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 5700 { 5701 X86CPU *cpu = X86_CPU(cs); 5702 5703 cpu->env.eip = tb->pc - tb->cs_base; 5704 } 5705 5706 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 5707 { 5708 X86CPU *cpu = X86_CPU(cs); 5709 CPUX86State *env = &cpu->env; 5710 5711 #if !defined(CONFIG_USER_ONLY) 5712 if (interrupt_request & CPU_INTERRUPT_POLL) { 5713 return CPU_INTERRUPT_POLL; 5714 } 5715 #endif 5716 if (interrupt_request & CPU_INTERRUPT_SIPI) { 5717 return CPU_INTERRUPT_SIPI; 5718 } 5719 5720 if (env->hflags2 & HF2_GIF_MASK) { 5721 if ((interrupt_request & CPU_INTERRUPT_SMI) && 5722 !(env->hflags & HF_SMM_MASK)) { 5723 return CPU_INTERRUPT_SMI; 5724 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 5725 !(env->hflags2 & HF2_NMI_MASK)) { 5726 return CPU_INTERRUPT_NMI; 5727 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 5728 return CPU_INTERRUPT_MCE; 5729 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 5730 (((env->hflags2 & HF2_VINTR_MASK) && 5731 (env->hflags2 & HF2_HIF_MASK)) || 5732 (!(env->hflags2 & HF2_VINTR_MASK) && 5733 (env->eflags & IF_MASK && 5734 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 5735 return CPU_INTERRUPT_HARD; 5736 #if !defined(CONFIG_USER_ONLY) 5737 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 5738 (env->eflags & IF_MASK) && 5739 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 5740 return CPU_INTERRUPT_VIRQ; 5741 #endif 5742 } 5743 } 5744 5745 return 0; 5746 } 5747 5748 static bool x86_cpu_has_work(CPUState *cs) 5749 { 5750 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 5751 } 5752 5753 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 5754 { 5755 X86CPU *cpu = X86_CPU(cs); 5756 CPUX86State *env = &cpu->env; 5757 5758 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 5759 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 5760 : bfd_mach_i386_i8086); 5761 info->print_insn = print_insn_i386; 5762 5763 info->cap_arch = CS_ARCH_X86; 5764 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 5765 : env->hflags & HF_CS32_MASK ? CS_MODE_32 5766 : CS_MODE_16); 5767 info->cap_insn_unit = 1; 5768 info->cap_insn_split = 8; 5769 } 5770 5771 void x86_update_hflags(CPUX86State *env) 5772 { 5773 uint32_t hflags; 5774 #define HFLAG_COPY_MASK \ 5775 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 5776 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 5777 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 5778 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 5779 5780 hflags = env->hflags & HFLAG_COPY_MASK; 5781 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 5782 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 5783 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 5784 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 5785 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 5786 5787 if (env->cr[4] & CR4_OSFXSR_MASK) { 5788 hflags |= HF_OSFXSR_MASK; 5789 } 5790 5791 if (env->efer & MSR_EFER_LMA) { 5792 hflags |= HF_LMA_MASK; 5793 } 5794 5795 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 5796 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 5797 } else { 5798 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 5799 (DESC_B_SHIFT - HF_CS32_SHIFT); 5800 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 5801 (DESC_B_SHIFT - HF_SS32_SHIFT); 5802 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 5803 !(hflags & HF_CS32_MASK)) { 5804 hflags |= HF_ADDSEG_MASK; 5805 } else { 5806 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 5807 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 5808 } 5809 } 5810 env->hflags = hflags; 5811 } 5812 5813 static Property x86_cpu_properties[] = { 5814 #ifdef CONFIG_USER_ONLY 5815 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 5816 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 5817 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 5818 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 5819 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0), 5820 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 5821 #else 5822 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 5823 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 5824 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 5825 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1), 5826 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 5827 #endif 5828 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 5829 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 5830 5831 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, 5832 HYPERV_SPINLOCK_NEVER_RETRY), 5833 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 5834 HYPERV_FEAT_RELAXED, 0), 5835 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 5836 HYPERV_FEAT_VAPIC, 0), 5837 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 5838 HYPERV_FEAT_TIME, 0), 5839 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 5840 HYPERV_FEAT_CRASH, 0), 5841 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 5842 HYPERV_FEAT_RESET, 0), 5843 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 5844 HYPERV_FEAT_VPINDEX, 0), 5845 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 5846 HYPERV_FEAT_RUNTIME, 0), 5847 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 5848 HYPERV_FEAT_SYNIC, 0), 5849 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 5850 HYPERV_FEAT_STIMER, 0), 5851 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 5852 HYPERV_FEAT_FREQUENCIES, 0), 5853 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 5854 HYPERV_FEAT_REENLIGHTENMENT, 0), 5855 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 5856 HYPERV_FEAT_TLBFLUSH, 0), 5857 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 5858 HYPERV_FEAT_EVMCS, 0), 5859 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 5860 HYPERV_FEAT_IPI, 0), 5861 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 5862 HYPERV_FEAT_STIMER_DIRECT, 0), 5863 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 5864 5865 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 5866 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 5867 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false), 5868 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 5869 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 5870 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 5871 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 5872 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 5873 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 5874 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 5875 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 5876 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 5877 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 5878 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 5879 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 5880 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 5881 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 5882 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 5883 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 5884 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 5885 false), 5886 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 5887 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 5888 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 5889 true), 5890 /* 5891 * lecacy_cache defaults to true unless the CPU model provides its 5892 * own cache information (see x86_cpu_load_def()). 5893 */ 5894 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 5895 5896 /* 5897 * From "Requirements for Implementing the Microsoft 5898 * Hypervisor Interface": 5899 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 5900 * 5901 * "Starting with Windows Server 2012 and Windows 8, if 5902 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 5903 * the hypervisor imposes no specific limit to the number of VPs. 5904 * In this case, Windows Server 2012 guest VMs may use more than 5905 * 64 VPs, up to the maximum supported number of processors applicable 5906 * to the specific Windows version being used." 5907 */ 5908 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 5909 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 5910 false), 5911 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 5912 true), 5913 DEFINE_PROP_END_OF_LIST() 5914 }; 5915 5916 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 5917 { 5918 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5919 CPUClass *cc = CPU_CLASS(oc); 5920 DeviceClass *dc = DEVICE_CLASS(oc); 5921 5922 device_class_set_parent_realize(dc, x86_cpu_realizefn, 5923 &xcc->parent_realize); 5924 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 5925 &xcc->parent_unrealize); 5926 dc->props = x86_cpu_properties; 5927 5928 xcc->parent_reset = cc->reset; 5929 cc->reset = x86_cpu_reset; 5930 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 5931 5932 cc->class_by_name = x86_cpu_class_by_name; 5933 cc->parse_features = x86_cpu_parse_featurestr; 5934 cc->has_work = x86_cpu_has_work; 5935 #ifdef CONFIG_TCG 5936 cc->do_interrupt = x86_cpu_do_interrupt; 5937 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 5938 #endif 5939 cc->dump_state = x86_cpu_dump_state; 5940 cc->get_crash_info = x86_cpu_get_crash_info; 5941 cc->set_pc = x86_cpu_set_pc; 5942 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 5943 cc->gdb_read_register = x86_cpu_gdb_read_register; 5944 cc->gdb_write_register = x86_cpu_gdb_write_register; 5945 cc->get_arch_id = x86_cpu_get_arch_id; 5946 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 5947 #ifndef CONFIG_USER_ONLY 5948 cc->asidx_from_attrs = x86_asidx_from_attrs; 5949 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 5950 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; 5951 cc->write_elf64_note = x86_cpu_write_elf64_note; 5952 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 5953 cc->write_elf32_note = x86_cpu_write_elf32_note; 5954 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 5955 cc->vmsd = &vmstate_x86_cpu; 5956 #endif 5957 cc->gdb_arch_name = x86_gdb_arch_name; 5958 #ifdef TARGET_X86_64 5959 cc->gdb_core_xml_file = "i386-64bit.xml"; 5960 cc->gdb_num_core_regs = 66; 5961 #else 5962 cc->gdb_core_xml_file = "i386-32bit.xml"; 5963 cc->gdb_num_core_regs = 50; 5964 #endif 5965 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 5966 cc->debug_excp_handler = breakpoint_handler; 5967 #endif 5968 cc->cpu_exec_enter = x86_cpu_exec_enter; 5969 cc->cpu_exec_exit = x86_cpu_exec_exit; 5970 #ifdef CONFIG_TCG 5971 cc->tcg_initialize = tcg_x86_init; 5972 cc->tlb_fill = x86_cpu_tlb_fill; 5973 #endif 5974 cc->disas_set_info = x86_disas_set_info; 5975 5976 dc->user_creatable = true; 5977 } 5978 5979 static const TypeInfo x86_cpu_type_info = { 5980 .name = TYPE_X86_CPU, 5981 .parent = TYPE_CPU, 5982 .instance_size = sizeof(X86CPU), 5983 .instance_init = x86_cpu_initfn, 5984 .abstract = true, 5985 .class_size = sizeof(X86CPUClass), 5986 .class_init = x86_cpu_common_class_init, 5987 }; 5988 5989 5990 /* "base" CPU model, used by query-cpu-model-expansion */ 5991 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 5992 { 5993 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5994 5995 xcc->static_model = true; 5996 xcc->migration_safe = true; 5997 xcc->model_description = "base CPU model type with no features enabled"; 5998 xcc->ordering = 8; 5999 } 6000 6001 static const TypeInfo x86_base_cpu_type_info = { 6002 .name = X86_CPU_TYPE_NAME("base"), 6003 .parent = TYPE_X86_CPU, 6004 .class_init = x86_cpu_base_class_init, 6005 }; 6006 6007 static void x86_cpu_register_types(void) 6008 { 6009 int i; 6010 6011 type_register_static(&x86_cpu_type_info); 6012 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 6013 x86_register_cpudef_types(&builtin_x86_defs[i]); 6014 } 6015 type_register_static(&max_x86_cpu_type_info); 6016 type_register_static(&x86_base_cpu_type_info); 6017 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 6018 type_register_static(&host_x86_cpu_type_info); 6019 #endif 6020 } 6021 6022 type_init(x86_cpu_register_types) 6023