1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "exec/exec-all.h" 28 #include "sysemu/kvm.h" 29 #include "sysemu/reset.h" 30 #include "sysemu/hvf.h" 31 #include "sysemu/cpus.h" 32 #include "kvm_i386.h" 33 #include "sev_i386.h" 34 35 #include "qemu/error-report.h" 36 #include "qemu/module.h" 37 #include "qemu/option.h" 38 #include "qemu/config-file.h" 39 #include "qapi/error.h" 40 #include "qapi/qapi-visit-machine.h" 41 #include "qapi/qapi-visit-run-state.h" 42 #include "qapi/qmp/qdict.h" 43 #include "qapi/qmp/qerror.h" 44 #include "qapi/visitor.h" 45 #include "qom/qom-qobject.h" 46 #include "sysemu/arch_init.h" 47 #include "qapi/qapi-commands-machine-target.h" 48 49 #include "standard-headers/asm-x86/kvm_para.h" 50 51 #include "sysemu/sysemu.h" 52 #include "sysemu/tcg.h" 53 #include "hw/qdev-properties.h" 54 #include "hw/i386/topology.h" 55 #ifndef CONFIG_USER_ONLY 56 #include "exec/address-spaces.h" 57 #include "hw/hw.h" 58 #include "hw/xen/xen.h" 59 #include "hw/i386/apic_internal.h" 60 #include "hw/boards.h" 61 #endif 62 63 #include "disas/capstone.h" 64 65 /* Helpers for building CPUID[2] descriptors: */ 66 67 struct CPUID2CacheDescriptorInfo { 68 enum CacheType type; 69 int level; 70 int size; 71 int line_size; 72 int associativity; 73 }; 74 75 /* 76 * Known CPUID 2 cache descriptors. 77 * From Intel SDM Volume 2A, CPUID instruction 78 */ 79 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 80 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 81 .associativity = 4, .line_size = 32, }, 82 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 83 .associativity = 4, .line_size = 32, }, 84 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 85 .associativity = 4, .line_size = 64, }, 86 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 87 .associativity = 2, .line_size = 32, }, 88 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 89 .associativity = 4, .line_size = 32, }, 90 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 91 .associativity = 4, .line_size = 64, }, 92 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 93 .associativity = 6, .line_size = 64, }, 94 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 95 .associativity = 2, .line_size = 64, }, 96 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 97 .associativity = 8, .line_size = 64, }, 98 /* lines per sector is not supported cpuid2_cache_descriptor(), 99 * so descriptors 0x22, 0x23 are not included 100 */ 101 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 102 .associativity = 16, .line_size = 64, }, 103 /* lines per sector is not supported cpuid2_cache_descriptor(), 104 * so descriptors 0x25, 0x20 are not included 105 */ 106 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 107 .associativity = 8, .line_size = 64, }, 108 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 109 .associativity = 8, .line_size = 64, }, 110 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 111 .associativity = 4, .line_size = 32, }, 112 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 113 .associativity = 4, .line_size = 32, }, 114 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 115 .associativity = 4, .line_size = 32, }, 116 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 117 .associativity = 4, .line_size = 32, }, 118 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 119 .associativity = 4, .line_size = 32, }, 120 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 121 .associativity = 4, .line_size = 64, }, 122 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 123 .associativity = 8, .line_size = 64, }, 124 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 125 .associativity = 12, .line_size = 64, }, 126 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 127 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 128 .associativity = 12, .line_size = 64, }, 129 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 130 .associativity = 16, .line_size = 64, }, 131 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 132 .associativity = 12, .line_size = 64, }, 133 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 134 .associativity = 16, .line_size = 64, }, 135 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 136 .associativity = 24, .line_size = 64, }, 137 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 138 .associativity = 8, .line_size = 64, }, 139 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 140 .associativity = 4, .line_size = 64, }, 141 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 142 .associativity = 4, .line_size = 64, }, 143 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 144 .associativity = 4, .line_size = 64, }, 145 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 146 .associativity = 4, .line_size = 64, }, 147 /* lines per sector is not supported cpuid2_cache_descriptor(), 148 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 149 */ 150 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 151 .associativity = 8, .line_size = 64, }, 152 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 153 .associativity = 2, .line_size = 64, }, 154 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 155 .associativity = 8, .line_size = 64, }, 156 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 157 .associativity = 8, .line_size = 32, }, 158 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 159 .associativity = 8, .line_size = 32, }, 160 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 161 .associativity = 8, .line_size = 32, }, 162 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 163 .associativity = 8, .line_size = 32, }, 164 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 165 .associativity = 4, .line_size = 64, }, 166 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 167 .associativity = 8, .line_size = 64, }, 168 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 169 .associativity = 4, .line_size = 64, }, 170 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 171 .associativity = 4, .line_size = 64, }, 172 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 173 .associativity = 4, .line_size = 64, }, 174 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 175 .associativity = 8, .line_size = 64, }, 176 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 177 .associativity = 8, .line_size = 64, }, 178 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 179 .associativity = 8, .line_size = 64, }, 180 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 181 .associativity = 12, .line_size = 64, }, 182 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 183 .associativity = 12, .line_size = 64, }, 184 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 185 .associativity = 12, .line_size = 64, }, 186 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 187 .associativity = 16, .line_size = 64, }, 188 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 189 .associativity = 16, .line_size = 64, }, 190 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 191 .associativity = 16, .line_size = 64, }, 192 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 193 .associativity = 24, .line_size = 64, }, 194 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 195 .associativity = 24, .line_size = 64, }, 196 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 197 .associativity = 24, .line_size = 64, }, 198 }; 199 200 /* 201 * "CPUID leaf 2 does not report cache descriptor information, 202 * use CPUID leaf 4 to query cache parameters" 203 */ 204 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 205 206 /* 207 * Return a CPUID 2 cache descriptor for a given cache. 208 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 209 */ 210 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 211 { 212 int i; 213 214 assert(cache->size > 0); 215 assert(cache->level > 0); 216 assert(cache->line_size > 0); 217 assert(cache->associativity > 0); 218 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 219 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 220 if (d->level == cache->level && d->type == cache->type && 221 d->size == cache->size && d->line_size == cache->line_size && 222 d->associativity == cache->associativity) { 223 return i; 224 } 225 } 226 227 return CACHE_DESCRIPTOR_UNAVAILABLE; 228 } 229 230 /* CPUID Leaf 4 constants: */ 231 232 /* EAX: */ 233 #define CACHE_TYPE_D 1 234 #define CACHE_TYPE_I 2 235 #define CACHE_TYPE_UNIFIED 3 236 237 #define CACHE_LEVEL(l) (l << 5) 238 239 #define CACHE_SELF_INIT_LEVEL (1 << 8) 240 241 /* EDX: */ 242 #define CACHE_NO_INVD_SHARING (1 << 0) 243 #define CACHE_INCLUSIVE (1 << 1) 244 #define CACHE_COMPLEX_IDX (1 << 2) 245 246 /* Encode CacheType for CPUID[4].EAX */ 247 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 248 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 249 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 250 0 /* Invalid value */) 251 252 253 /* Encode cache info for CPUID[4] */ 254 static void encode_cache_cpuid4(CPUCacheInfo *cache, 255 int num_apic_ids, int num_cores, 256 uint32_t *eax, uint32_t *ebx, 257 uint32_t *ecx, uint32_t *edx) 258 { 259 assert(cache->size == cache->line_size * cache->associativity * 260 cache->partitions * cache->sets); 261 262 assert(num_apic_ids > 0); 263 *eax = CACHE_TYPE(cache->type) | 264 CACHE_LEVEL(cache->level) | 265 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 266 ((num_cores - 1) << 26) | 267 ((num_apic_ids - 1) << 14); 268 269 assert(cache->line_size > 0); 270 assert(cache->partitions > 0); 271 assert(cache->associativity > 0); 272 /* We don't implement fully-associative caches */ 273 assert(cache->associativity < cache->sets); 274 *ebx = (cache->line_size - 1) | 275 ((cache->partitions - 1) << 12) | 276 ((cache->associativity - 1) << 22); 277 278 assert(cache->sets > 0); 279 *ecx = cache->sets - 1; 280 281 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 282 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 283 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 284 } 285 286 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 287 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 288 { 289 assert(cache->size % 1024 == 0); 290 assert(cache->lines_per_tag > 0); 291 assert(cache->associativity > 0); 292 assert(cache->line_size > 0); 293 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 294 (cache->lines_per_tag << 8) | (cache->line_size); 295 } 296 297 #define ASSOC_FULL 0xFF 298 299 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 300 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 301 a == 2 ? 0x2 : \ 302 a == 4 ? 0x4 : \ 303 a == 8 ? 0x6 : \ 304 a == 16 ? 0x8 : \ 305 a == 32 ? 0xA : \ 306 a == 48 ? 0xB : \ 307 a == 64 ? 0xC : \ 308 a == 96 ? 0xD : \ 309 a == 128 ? 0xE : \ 310 a == ASSOC_FULL ? 0xF : \ 311 0 /* invalid value */) 312 313 /* 314 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 315 * @l3 can be NULL. 316 */ 317 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 318 CPUCacheInfo *l3, 319 uint32_t *ecx, uint32_t *edx) 320 { 321 assert(l2->size % 1024 == 0); 322 assert(l2->associativity > 0); 323 assert(l2->lines_per_tag > 0); 324 assert(l2->line_size > 0); 325 *ecx = ((l2->size / 1024) << 16) | 326 (AMD_ENC_ASSOC(l2->associativity) << 12) | 327 (l2->lines_per_tag << 8) | (l2->line_size); 328 329 if (l3) { 330 assert(l3->size % (512 * 1024) == 0); 331 assert(l3->associativity > 0); 332 assert(l3->lines_per_tag > 0); 333 assert(l3->line_size > 0); 334 *edx = ((l3->size / (512 * 1024)) << 18) | 335 (AMD_ENC_ASSOC(l3->associativity) << 12) | 336 (l3->lines_per_tag << 8) | (l3->line_size); 337 } else { 338 *edx = 0; 339 } 340 } 341 342 /* 343 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E 344 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3. 345 * Define the constants to build the cpu topology. Right now, TOPOEXT 346 * feature is enabled only on EPYC. So, these constants are based on 347 * EPYC supported configurations. We may need to handle the cases if 348 * these values change in future. 349 */ 350 /* Maximum core complexes in a node */ 351 #define MAX_CCX 2 352 /* Maximum cores in a core complex */ 353 #define MAX_CORES_IN_CCX 4 354 /* Maximum cores in a node */ 355 #define MAX_CORES_IN_NODE 8 356 /* Maximum nodes in a socket */ 357 #define MAX_NODES_PER_SOCKET 4 358 359 /* 360 * Figure out the number of nodes required to build this config. 361 * Max cores in a node is 8 362 */ 363 static int nodes_in_socket(int nr_cores) 364 { 365 int nodes; 366 367 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE); 368 369 /* Hardware does not support config with 3 nodes, return 4 in that case */ 370 return (nodes == 3) ? 4 : nodes; 371 } 372 373 /* 374 * Decide the number of cores in a core complex with the given nr_cores using 375 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and 376 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible 377 * L3 cache is shared across all cores in a core complex. So, this will also 378 * tell us how many cores are sharing the L3 cache. 379 */ 380 static int cores_in_core_complex(int nr_cores) 381 { 382 int nodes; 383 384 /* Check if we can fit all the cores in one core complex */ 385 if (nr_cores <= MAX_CORES_IN_CCX) { 386 return nr_cores; 387 } 388 /* Get the number of nodes required to build this config */ 389 nodes = nodes_in_socket(nr_cores); 390 391 /* 392 * Divide the cores accros all the core complexes 393 * Return rounded up value 394 */ 395 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX); 396 } 397 398 /* Encode cache info for CPUID[8000001D] */ 399 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs, 400 uint32_t *eax, uint32_t *ebx, 401 uint32_t *ecx, uint32_t *edx) 402 { 403 uint32_t l3_cores; 404 assert(cache->size == cache->line_size * cache->associativity * 405 cache->partitions * cache->sets); 406 407 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 408 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 409 410 /* L3 is shared among multiple cores */ 411 if (cache->level == 3) { 412 l3_cores = cores_in_core_complex(cs->nr_cores); 413 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14; 414 } else { 415 *eax |= ((cs->nr_threads - 1) << 14); 416 } 417 418 assert(cache->line_size > 0); 419 assert(cache->partitions > 0); 420 assert(cache->associativity > 0); 421 /* We don't implement fully-associative caches */ 422 assert(cache->associativity < cache->sets); 423 *ebx = (cache->line_size - 1) | 424 ((cache->partitions - 1) << 12) | 425 ((cache->associativity - 1) << 22); 426 427 assert(cache->sets > 0); 428 *ecx = cache->sets - 1; 429 430 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 431 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 432 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 433 } 434 435 /* Data structure to hold the configuration info for a given core index */ 436 struct core_topology { 437 /* core complex id of the current core index */ 438 int ccx_id; 439 /* 440 * Adjusted core index for this core in the topology 441 * This can be 0,1,2,3 with max 4 cores in a core complex 442 */ 443 int core_id; 444 /* Node id for this core index */ 445 int node_id; 446 /* Number of nodes in this config */ 447 int num_nodes; 448 }; 449 450 /* 451 * Build the configuration closely match the EPYC hardware. Using the EPYC 452 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE) 453 * right now. This could change in future. 454 * nr_cores : Total number of cores in the config 455 * core_id : Core index of the current CPU 456 * topo : Data structure to hold all the config info for this core index 457 */ 458 static void build_core_topology(int nr_cores, int core_id, 459 struct core_topology *topo) 460 { 461 int nodes, cores_in_ccx; 462 463 /* First get the number of nodes required */ 464 nodes = nodes_in_socket(nr_cores); 465 466 cores_in_ccx = cores_in_core_complex(nr_cores); 467 468 topo->node_id = core_id / (cores_in_ccx * MAX_CCX); 469 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx; 470 topo->core_id = core_id % cores_in_ccx; 471 topo->num_nodes = nodes; 472 } 473 474 /* Encode cache info for CPUID[8000001E] */ 475 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu, 476 uint32_t *eax, uint32_t *ebx, 477 uint32_t *ecx, uint32_t *edx) 478 { 479 struct core_topology topo = {0}; 480 unsigned long nodes; 481 int shift; 482 483 build_core_topology(cs->nr_cores, cpu->core_id, &topo); 484 *eax = cpu->apic_id; 485 /* 486 * CPUID_Fn8000001E_EBX 487 * 31:16 Reserved 488 * 15:8 Threads per core (The number of threads per core is 489 * Threads per core + 1) 490 * 7:0 Core id (see bit decoding below) 491 * SMT: 492 * 4:3 node id 493 * 2 Core complex id 494 * 1:0 Core id 495 * Non SMT: 496 * 5:4 node id 497 * 3 Core complex id 498 * 1:0 Core id 499 */ 500 if (cs->nr_threads - 1) { 501 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) | 502 (topo.ccx_id << 2) | topo.core_id; 503 } else { 504 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id; 505 } 506 /* 507 * CPUID_Fn8000001E_ECX 508 * 31:11 Reserved 509 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) 510 * 7:0 Node id (see bit decoding below) 511 * 2 Socket id 512 * 1:0 Node id 513 */ 514 if (topo.num_nodes <= 4) { 515 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) | 516 topo.node_id; 517 } else { 518 /* 519 * Node id fix up. Actual hardware supports up to 4 nodes. But with 520 * more than 32 cores, we may end up with more than 4 nodes. 521 * Node id is a combination of socket id and node id. Only requirement 522 * here is that this number should be unique accross the system. 523 * Shift the socket id to accommodate more nodes. We dont expect both 524 * socket id and node id to be big number at the same time. This is not 525 * an ideal config but we need to to support it. Max nodes we can have 526 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need 527 * 5 bits for nodes. Find the left most set bit to represent the total 528 * number of nodes. find_last_bit returns last set bit(0 based). Left 529 * shift(+1) the socket id to represent all the nodes. 530 */ 531 nodes = topo.num_nodes - 1; 532 shift = find_last_bit(&nodes, 8); 533 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) | 534 topo.node_id; 535 } 536 *edx = 0; 537 } 538 539 /* 540 * Definitions of the hardcoded cache entries we expose: 541 * These are legacy cache values. If there is a need to change any 542 * of these values please use builtin_x86_defs 543 */ 544 545 /* L1 data cache: */ 546 static CPUCacheInfo legacy_l1d_cache = { 547 .type = DATA_CACHE, 548 .level = 1, 549 .size = 32 * KiB, 550 .self_init = 1, 551 .line_size = 64, 552 .associativity = 8, 553 .sets = 64, 554 .partitions = 1, 555 .no_invd_sharing = true, 556 }; 557 558 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 559 static CPUCacheInfo legacy_l1d_cache_amd = { 560 .type = DATA_CACHE, 561 .level = 1, 562 .size = 64 * KiB, 563 .self_init = 1, 564 .line_size = 64, 565 .associativity = 2, 566 .sets = 512, 567 .partitions = 1, 568 .lines_per_tag = 1, 569 .no_invd_sharing = true, 570 }; 571 572 /* L1 instruction cache: */ 573 static CPUCacheInfo legacy_l1i_cache = { 574 .type = INSTRUCTION_CACHE, 575 .level = 1, 576 .size = 32 * KiB, 577 .self_init = 1, 578 .line_size = 64, 579 .associativity = 8, 580 .sets = 64, 581 .partitions = 1, 582 .no_invd_sharing = true, 583 }; 584 585 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 586 static CPUCacheInfo legacy_l1i_cache_amd = { 587 .type = INSTRUCTION_CACHE, 588 .level = 1, 589 .size = 64 * KiB, 590 .self_init = 1, 591 .line_size = 64, 592 .associativity = 2, 593 .sets = 512, 594 .partitions = 1, 595 .lines_per_tag = 1, 596 .no_invd_sharing = true, 597 }; 598 599 /* Level 2 unified cache: */ 600 static CPUCacheInfo legacy_l2_cache = { 601 .type = UNIFIED_CACHE, 602 .level = 2, 603 .size = 4 * MiB, 604 .self_init = 1, 605 .line_size = 64, 606 .associativity = 16, 607 .sets = 4096, 608 .partitions = 1, 609 .no_invd_sharing = true, 610 }; 611 612 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 613 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 614 .type = UNIFIED_CACHE, 615 .level = 2, 616 .size = 2 * MiB, 617 .line_size = 64, 618 .associativity = 8, 619 }; 620 621 622 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 623 static CPUCacheInfo legacy_l2_cache_amd = { 624 .type = UNIFIED_CACHE, 625 .level = 2, 626 .size = 512 * KiB, 627 .line_size = 64, 628 .lines_per_tag = 1, 629 .associativity = 16, 630 .sets = 512, 631 .partitions = 1, 632 }; 633 634 /* Level 3 unified cache: */ 635 static CPUCacheInfo legacy_l3_cache = { 636 .type = UNIFIED_CACHE, 637 .level = 3, 638 .size = 16 * MiB, 639 .line_size = 64, 640 .associativity = 16, 641 .sets = 16384, 642 .partitions = 1, 643 .lines_per_tag = 1, 644 .self_init = true, 645 .inclusive = true, 646 .complex_indexing = true, 647 }; 648 649 /* TLB definitions: */ 650 651 #define L1_DTLB_2M_ASSOC 1 652 #define L1_DTLB_2M_ENTRIES 255 653 #define L1_DTLB_4K_ASSOC 1 654 #define L1_DTLB_4K_ENTRIES 255 655 656 #define L1_ITLB_2M_ASSOC 1 657 #define L1_ITLB_2M_ENTRIES 255 658 #define L1_ITLB_4K_ASSOC 1 659 #define L1_ITLB_4K_ENTRIES 255 660 661 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 662 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 663 #define L2_DTLB_4K_ASSOC 4 664 #define L2_DTLB_4K_ENTRIES 512 665 666 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 667 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 668 #define L2_ITLB_4K_ASSOC 4 669 #define L2_ITLB_4K_ENTRIES 512 670 671 /* CPUID Leaf 0x14 constants: */ 672 #define INTEL_PT_MAX_SUBLEAF 0x1 673 /* 674 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 675 * MSR can be accessed; 676 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 677 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 678 * of Intel PT MSRs across warm reset; 679 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 680 */ 681 #define INTEL_PT_MINIMAL_EBX 0xf 682 /* 683 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 684 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 685 * accessed; 686 * bit[01]: ToPA tables can hold any number of output entries, up to the 687 * maximum allowed by the MaskOrTableOffset field of 688 * IA32_RTIT_OUTPUT_MASK_PTRS; 689 * bit[02]: Support Single-Range Output scheme; 690 */ 691 #define INTEL_PT_MINIMAL_ECX 0x7 692 /* generated packets which contain IP payloads have LIP values */ 693 #define INTEL_PT_IP_LIP (1 << 31) 694 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 695 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 696 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 697 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 698 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 699 700 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 701 uint32_t vendor2, uint32_t vendor3) 702 { 703 int i; 704 for (i = 0; i < 4; i++) { 705 dst[i] = vendor1 >> (8 * i); 706 dst[i + 4] = vendor2 >> (8 * i); 707 dst[i + 8] = vendor3 >> (8 * i); 708 } 709 dst[CPUID_VENDOR_SZ] = '\0'; 710 } 711 712 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 713 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 714 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 715 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 716 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 717 CPUID_PSE36 | CPUID_FXSR) 718 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 719 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 720 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 721 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 722 CPUID_PAE | CPUID_SEP | CPUID_APIC) 723 724 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 725 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 726 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 727 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 728 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 729 /* partly implemented: 730 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 731 /* missing: 732 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 733 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 734 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 735 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 736 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 737 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 738 CPUID_EXT_RDRAND) 739 /* missing: 740 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 741 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 742 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 743 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 744 CPUID_EXT_F16C */ 745 746 #ifdef TARGET_X86_64 747 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 748 #else 749 #define TCG_EXT2_X86_64_FEATURES 0 750 #endif 751 752 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 753 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 754 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 755 TCG_EXT2_X86_64_FEATURES) 756 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 757 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 758 #define TCG_EXT4_FEATURES 0 759 #define TCG_SVM_FEATURES CPUID_SVM_NPT 760 #define TCG_KVM_FEATURES 0 761 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 762 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 763 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 764 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 765 CPUID_7_0_EBX_ERMS) 766 /* missing: 767 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 768 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 769 CPUID_7_0_EBX_RDSEED */ 770 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 771 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 772 CPUID_7_0_ECX_LA57) 773 #define TCG_7_0_EDX_FEATURES 0 774 #define TCG_APM_FEATURES 0 775 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 776 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 777 /* missing: 778 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 779 780 typedef enum FeatureWordType { 781 CPUID_FEATURE_WORD, 782 MSR_FEATURE_WORD, 783 } FeatureWordType; 784 785 typedef struct FeatureWordInfo { 786 FeatureWordType type; 787 /* feature flags names are taken from "Intel Processor Identification and 788 * the CPUID Instruction" and AMD's "CPUID Specification". 789 * In cases of disagreement between feature naming conventions, 790 * aliases may be added. 791 */ 792 const char *feat_names[32]; 793 union { 794 /* If type==CPUID_FEATURE_WORD */ 795 struct { 796 uint32_t eax; /* Input EAX for CPUID */ 797 bool needs_ecx; /* CPUID instruction uses ECX as input */ 798 uint32_t ecx; /* Input ECX value for CPUID */ 799 int reg; /* output register (R_* constant) */ 800 } cpuid; 801 /* If type==MSR_FEATURE_WORD */ 802 struct { 803 uint32_t index; 804 struct { /*CPUID that enumerate this MSR*/ 805 FeatureWord cpuid_class; 806 uint32_t cpuid_flag; 807 } cpuid_dep; 808 } msr; 809 }; 810 uint32_t tcg_features; /* Feature flags supported by TCG */ 811 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */ 812 uint32_t migratable_flags; /* Feature flags known to be migratable */ 813 /* Features that shouldn't be auto-enabled by "-cpu host" */ 814 uint32_t no_autoenable_flags; 815 } FeatureWordInfo; 816 817 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 818 [FEAT_1_EDX] = { 819 .type = CPUID_FEATURE_WORD, 820 .feat_names = { 821 "fpu", "vme", "de", "pse", 822 "tsc", "msr", "pae", "mce", 823 "cx8", "apic", NULL, "sep", 824 "mtrr", "pge", "mca", "cmov", 825 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 826 NULL, "ds" /* Intel dts */, "acpi", "mmx", 827 "fxsr", "sse", "sse2", "ss", 828 "ht" /* Intel htt */, "tm", "ia64", "pbe", 829 }, 830 .cpuid = {.eax = 1, .reg = R_EDX, }, 831 .tcg_features = TCG_FEATURES, 832 }, 833 [FEAT_1_ECX] = { 834 .type = CPUID_FEATURE_WORD, 835 .feat_names = { 836 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 837 "ds-cpl", "vmx", "smx", "est", 838 "tm2", "ssse3", "cid", NULL, 839 "fma", "cx16", "xtpr", "pdcm", 840 NULL, "pcid", "dca", "sse4.1", 841 "sse4.2", "x2apic", "movbe", "popcnt", 842 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 843 "avx", "f16c", "rdrand", "hypervisor", 844 }, 845 .cpuid = { .eax = 1, .reg = R_ECX, }, 846 .tcg_features = TCG_EXT_FEATURES, 847 }, 848 /* Feature names that are already defined on feature_name[] but 849 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 850 * names on feat_names below. They are copied automatically 851 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 852 */ 853 [FEAT_8000_0001_EDX] = { 854 .type = CPUID_FEATURE_WORD, 855 .feat_names = { 856 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 857 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 858 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 859 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 860 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 861 "nx", NULL, "mmxext", NULL /* mmx */, 862 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 863 NULL, "lm", "3dnowext", "3dnow", 864 }, 865 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 866 .tcg_features = TCG_EXT2_FEATURES, 867 }, 868 [FEAT_8000_0001_ECX] = { 869 .type = CPUID_FEATURE_WORD, 870 .feat_names = { 871 "lahf-lm", "cmp-legacy", "svm", "extapic", 872 "cr8legacy", "abm", "sse4a", "misalignsse", 873 "3dnowprefetch", "osvw", "ibs", "xop", 874 "skinit", "wdt", NULL, "lwp", 875 "fma4", "tce", NULL, "nodeid-msr", 876 NULL, "tbm", "topoext", "perfctr-core", 877 "perfctr-nb", NULL, NULL, NULL, 878 NULL, NULL, NULL, NULL, 879 }, 880 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 881 .tcg_features = TCG_EXT3_FEATURES, 882 /* 883 * TOPOEXT is always allowed but can't be enabled blindly by 884 * "-cpu host", as it requires consistent cache topology info 885 * to be provided so it doesn't confuse guests. 886 */ 887 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 888 }, 889 [FEAT_C000_0001_EDX] = { 890 .type = CPUID_FEATURE_WORD, 891 .feat_names = { 892 NULL, NULL, "xstore", "xstore-en", 893 NULL, NULL, "xcrypt", "xcrypt-en", 894 "ace2", "ace2-en", "phe", "phe-en", 895 "pmm", "pmm-en", NULL, NULL, 896 NULL, NULL, NULL, NULL, 897 NULL, NULL, NULL, NULL, 898 NULL, NULL, NULL, NULL, 899 NULL, NULL, NULL, NULL, 900 }, 901 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 902 .tcg_features = TCG_EXT4_FEATURES, 903 }, 904 [FEAT_KVM] = { 905 .type = CPUID_FEATURE_WORD, 906 .feat_names = { 907 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 908 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 909 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 910 NULL, NULL, NULL, NULL, 911 NULL, NULL, NULL, NULL, 912 NULL, NULL, NULL, NULL, 913 "kvmclock-stable-bit", NULL, NULL, NULL, 914 NULL, NULL, NULL, NULL, 915 }, 916 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 917 .tcg_features = TCG_KVM_FEATURES, 918 }, 919 [FEAT_KVM_HINTS] = { 920 .type = CPUID_FEATURE_WORD, 921 .feat_names = { 922 "kvm-hint-dedicated", NULL, NULL, NULL, 923 NULL, NULL, NULL, NULL, 924 NULL, NULL, NULL, NULL, 925 NULL, NULL, NULL, NULL, 926 NULL, NULL, NULL, NULL, 927 NULL, NULL, NULL, NULL, 928 NULL, NULL, NULL, NULL, 929 NULL, NULL, NULL, NULL, 930 }, 931 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 932 .tcg_features = TCG_KVM_FEATURES, 933 /* 934 * KVM hints aren't auto-enabled by -cpu host, they need to be 935 * explicitly enabled in the command-line. 936 */ 937 .no_autoenable_flags = ~0U, 938 }, 939 /* 940 * .feat_names are commented out for Hyper-V enlightenments because we 941 * don't want to have two different ways for enabling them on QEMU command 942 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 943 * enabling several feature bits simultaneously, exposing these bits 944 * individually may just confuse guests. 945 */ 946 [FEAT_HYPERV_EAX] = { 947 .type = CPUID_FEATURE_WORD, 948 .feat_names = { 949 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 950 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 951 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 952 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 953 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 954 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 955 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 956 NULL, NULL, 957 NULL, NULL, NULL, NULL, 958 NULL, NULL, NULL, NULL, 959 NULL, NULL, NULL, NULL, 960 NULL, NULL, NULL, NULL, 961 }, 962 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 963 }, 964 [FEAT_HYPERV_EBX] = { 965 .type = CPUID_FEATURE_WORD, 966 .feat_names = { 967 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 968 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 969 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 970 NULL /* hv_create_port */, NULL /* hv_connect_port */, 971 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 972 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 973 NULL, NULL, 974 NULL, NULL, NULL, NULL, 975 NULL, NULL, NULL, NULL, 976 NULL, NULL, NULL, NULL, 977 NULL, NULL, NULL, NULL, 978 }, 979 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 980 }, 981 [FEAT_HYPERV_EDX] = { 982 .type = CPUID_FEATURE_WORD, 983 .feat_names = { 984 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 985 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 986 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 987 NULL, NULL, 988 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 989 NULL, NULL, NULL, NULL, 990 NULL, NULL, NULL, NULL, 991 NULL, NULL, NULL, NULL, 992 NULL, NULL, NULL, NULL, 993 NULL, NULL, NULL, NULL, 994 }, 995 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 996 }, 997 [FEAT_HV_RECOMM_EAX] = { 998 .type = CPUID_FEATURE_WORD, 999 .feat_names = { 1000 NULL /* hv_recommend_pv_as_switch */, 1001 NULL /* hv_recommend_pv_tlbflush_local */, 1002 NULL /* hv_recommend_pv_tlbflush_remote */, 1003 NULL /* hv_recommend_msr_apic_access */, 1004 NULL /* hv_recommend_msr_reset */, 1005 NULL /* hv_recommend_relaxed_timing */, 1006 NULL /* hv_recommend_dma_remapping */, 1007 NULL /* hv_recommend_int_remapping */, 1008 NULL /* hv_recommend_x2apic_msrs */, 1009 NULL /* hv_recommend_autoeoi_deprecation */, 1010 NULL /* hv_recommend_pv_ipi */, 1011 NULL /* hv_recommend_ex_hypercalls */, 1012 NULL /* hv_hypervisor_is_nested */, 1013 NULL /* hv_recommend_int_mbec */, 1014 NULL /* hv_recommend_evmcs */, 1015 NULL, 1016 NULL, NULL, NULL, NULL, 1017 NULL, NULL, NULL, NULL, 1018 NULL, NULL, NULL, NULL, 1019 NULL, NULL, NULL, NULL, 1020 }, 1021 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 1022 }, 1023 [FEAT_HV_NESTED_EAX] = { 1024 .type = CPUID_FEATURE_WORD, 1025 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 1026 }, 1027 [FEAT_SVM] = { 1028 .type = CPUID_FEATURE_WORD, 1029 .feat_names = { 1030 "npt", "lbrv", "svm-lock", "nrip-save", 1031 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 1032 NULL, NULL, "pause-filter", NULL, 1033 "pfthreshold", NULL, NULL, NULL, 1034 NULL, NULL, NULL, NULL, 1035 NULL, NULL, NULL, NULL, 1036 NULL, NULL, NULL, NULL, 1037 NULL, NULL, NULL, NULL, 1038 }, 1039 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 1040 .tcg_features = TCG_SVM_FEATURES, 1041 }, 1042 [FEAT_7_0_EBX] = { 1043 .type = CPUID_FEATURE_WORD, 1044 .feat_names = { 1045 "fsgsbase", "tsc-adjust", NULL, "bmi1", 1046 "hle", "avx2", NULL, "smep", 1047 "bmi2", "erms", "invpcid", "rtm", 1048 NULL, NULL, "mpx", NULL, 1049 "avx512f", "avx512dq", "rdseed", "adx", 1050 "smap", "avx512ifma", "pcommit", "clflushopt", 1051 "clwb", "intel-pt", "avx512pf", "avx512er", 1052 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 1053 }, 1054 .cpuid = { 1055 .eax = 7, 1056 .needs_ecx = true, .ecx = 0, 1057 .reg = R_EBX, 1058 }, 1059 .tcg_features = TCG_7_0_EBX_FEATURES, 1060 }, 1061 [FEAT_7_0_ECX] = { 1062 .type = CPUID_FEATURE_WORD, 1063 .feat_names = { 1064 NULL, "avx512vbmi", "umip", "pku", 1065 NULL /* ospke */, NULL, "avx512vbmi2", NULL, 1066 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 1067 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 1068 "la57", NULL, NULL, NULL, 1069 NULL, NULL, "rdpid", NULL, 1070 NULL, "cldemote", NULL, "movdiri", 1071 "movdir64b", NULL, NULL, NULL, 1072 }, 1073 .cpuid = { 1074 .eax = 7, 1075 .needs_ecx = true, .ecx = 0, 1076 .reg = R_ECX, 1077 }, 1078 .tcg_features = TCG_7_0_ECX_FEATURES, 1079 }, 1080 [FEAT_7_0_EDX] = { 1081 .type = CPUID_FEATURE_WORD, 1082 .feat_names = { 1083 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 1084 NULL, NULL, NULL, NULL, 1085 NULL, NULL, "md-clear", NULL, 1086 NULL, NULL, NULL, NULL, 1087 NULL, NULL, NULL /* pconfig */, NULL, 1088 NULL, NULL, NULL, NULL, 1089 NULL, NULL, "spec-ctrl", "stibp", 1090 NULL, "arch-capabilities", "core-capability", "ssbd", 1091 }, 1092 .cpuid = { 1093 .eax = 7, 1094 .needs_ecx = true, .ecx = 0, 1095 .reg = R_EDX, 1096 }, 1097 .tcg_features = TCG_7_0_EDX_FEATURES, 1098 }, 1099 [FEAT_8000_0007_EDX] = { 1100 .type = CPUID_FEATURE_WORD, 1101 .feat_names = { 1102 NULL, NULL, NULL, NULL, 1103 NULL, NULL, NULL, NULL, 1104 "invtsc", NULL, NULL, NULL, 1105 NULL, NULL, NULL, NULL, 1106 NULL, NULL, NULL, NULL, 1107 NULL, NULL, NULL, NULL, 1108 NULL, NULL, NULL, NULL, 1109 NULL, NULL, NULL, NULL, 1110 }, 1111 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1112 .tcg_features = TCG_APM_FEATURES, 1113 .unmigratable_flags = CPUID_APM_INVTSC, 1114 }, 1115 [FEAT_8000_0008_EBX] = { 1116 .type = CPUID_FEATURE_WORD, 1117 .feat_names = { 1118 NULL, NULL, NULL, NULL, 1119 NULL, NULL, NULL, NULL, 1120 NULL, "wbnoinvd", NULL, NULL, 1121 "ibpb", NULL, NULL, NULL, 1122 NULL, NULL, NULL, NULL, 1123 NULL, NULL, NULL, NULL, 1124 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1125 NULL, NULL, NULL, NULL, 1126 }, 1127 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1128 .tcg_features = 0, 1129 .unmigratable_flags = 0, 1130 }, 1131 [FEAT_XSAVE] = { 1132 .type = CPUID_FEATURE_WORD, 1133 .feat_names = { 1134 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1135 NULL, NULL, NULL, NULL, 1136 NULL, NULL, NULL, NULL, 1137 NULL, NULL, NULL, NULL, 1138 NULL, NULL, NULL, NULL, 1139 NULL, NULL, NULL, NULL, 1140 NULL, NULL, NULL, NULL, 1141 NULL, NULL, NULL, NULL, 1142 }, 1143 .cpuid = { 1144 .eax = 0xd, 1145 .needs_ecx = true, .ecx = 1, 1146 .reg = R_EAX, 1147 }, 1148 .tcg_features = TCG_XSAVE_FEATURES, 1149 }, 1150 [FEAT_6_EAX] = { 1151 .type = CPUID_FEATURE_WORD, 1152 .feat_names = { 1153 NULL, NULL, "arat", NULL, 1154 NULL, NULL, NULL, NULL, 1155 NULL, NULL, NULL, NULL, 1156 NULL, NULL, NULL, NULL, 1157 NULL, NULL, NULL, NULL, 1158 NULL, NULL, NULL, NULL, 1159 NULL, NULL, NULL, NULL, 1160 NULL, NULL, NULL, NULL, 1161 }, 1162 .cpuid = { .eax = 6, .reg = R_EAX, }, 1163 .tcg_features = TCG_6_EAX_FEATURES, 1164 }, 1165 [FEAT_XSAVE_COMP_LO] = { 1166 .type = CPUID_FEATURE_WORD, 1167 .cpuid = { 1168 .eax = 0xD, 1169 .needs_ecx = true, .ecx = 0, 1170 .reg = R_EAX, 1171 }, 1172 .tcg_features = ~0U, 1173 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1174 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1175 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1176 XSTATE_PKRU_MASK, 1177 }, 1178 [FEAT_XSAVE_COMP_HI] = { 1179 .type = CPUID_FEATURE_WORD, 1180 .cpuid = { 1181 .eax = 0xD, 1182 .needs_ecx = true, .ecx = 0, 1183 .reg = R_EDX, 1184 }, 1185 .tcg_features = ~0U, 1186 }, 1187 /*Below are MSR exposed features*/ 1188 [FEAT_ARCH_CAPABILITIES] = { 1189 .type = MSR_FEATURE_WORD, 1190 .feat_names = { 1191 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1192 "ssb-no", "mds-no", NULL, NULL, 1193 NULL, NULL, NULL, NULL, 1194 NULL, NULL, NULL, NULL, 1195 NULL, NULL, NULL, NULL, 1196 NULL, NULL, NULL, NULL, 1197 NULL, NULL, NULL, NULL, 1198 NULL, NULL, NULL, NULL, 1199 }, 1200 .msr = { 1201 .index = MSR_IA32_ARCH_CAPABILITIES, 1202 .cpuid_dep = { 1203 FEAT_7_0_EDX, 1204 CPUID_7_0_EDX_ARCH_CAPABILITIES 1205 } 1206 }, 1207 }, 1208 [FEAT_CORE_CAPABILITY] = { 1209 .type = MSR_FEATURE_WORD, 1210 .feat_names = { 1211 NULL, NULL, NULL, NULL, 1212 NULL, "split-lock-detect", NULL, NULL, 1213 NULL, NULL, NULL, NULL, 1214 NULL, NULL, NULL, NULL, 1215 NULL, NULL, NULL, NULL, 1216 NULL, NULL, NULL, NULL, 1217 NULL, NULL, NULL, NULL, 1218 NULL, NULL, NULL, NULL, 1219 }, 1220 .msr = { 1221 .index = MSR_IA32_CORE_CAPABILITY, 1222 .cpuid_dep = { 1223 FEAT_7_0_EDX, 1224 CPUID_7_0_EDX_CORE_CAPABILITY, 1225 }, 1226 }, 1227 }, 1228 }; 1229 1230 typedef struct X86RegisterInfo32 { 1231 /* Name of register */ 1232 const char *name; 1233 /* QAPI enum value register */ 1234 X86CPURegister32 qapi_enum; 1235 } X86RegisterInfo32; 1236 1237 #define REGISTER(reg) \ 1238 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1239 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1240 REGISTER(EAX), 1241 REGISTER(ECX), 1242 REGISTER(EDX), 1243 REGISTER(EBX), 1244 REGISTER(ESP), 1245 REGISTER(EBP), 1246 REGISTER(ESI), 1247 REGISTER(EDI), 1248 }; 1249 #undef REGISTER 1250 1251 typedef struct ExtSaveArea { 1252 uint32_t feature, bits; 1253 uint32_t offset, size; 1254 } ExtSaveArea; 1255 1256 static const ExtSaveArea x86_ext_save_areas[] = { 1257 [XSTATE_FP_BIT] = { 1258 /* x87 FP state component is always enabled if XSAVE is supported */ 1259 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1260 /* x87 state is in the legacy region of the XSAVE area */ 1261 .offset = 0, 1262 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1263 }, 1264 [XSTATE_SSE_BIT] = { 1265 /* SSE state component is always enabled if XSAVE is supported */ 1266 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1267 /* SSE state is in the legacy region of the XSAVE area */ 1268 .offset = 0, 1269 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1270 }, 1271 [XSTATE_YMM_BIT] = 1272 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1273 .offset = offsetof(X86XSaveArea, avx_state), 1274 .size = sizeof(XSaveAVX) }, 1275 [XSTATE_BNDREGS_BIT] = 1276 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1277 .offset = offsetof(X86XSaveArea, bndreg_state), 1278 .size = sizeof(XSaveBNDREG) }, 1279 [XSTATE_BNDCSR_BIT] = 1280 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1281 .offset = offsetof(X86XSaveArea, bndcsr_state), 1282 .size = sizeof(XSaveBNDCSR) }, 1283 [XSTATE_OPMASK_BIT] = 1284 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1285 .offset = offsetof(X86XSaveArea, opmask_state), 1286 .size = sizeof(XSaveOpmask) }, 1287 [XSTATE_ZMM_Hi256_BIT] = 1288 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1289 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1290 .size = sizeof(XSaveZMM_Hi256) }, 1291 [XSTATE_Hi16_ZMM_BIT] = 1292 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1293 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1294 .size = sizeof(XSaveHi16_ZMM) }, 1295 [XSTATE_PKRU_BIT] = 1296 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1297 .offset = offsetof(X86XSaveArea, pkru_state), 1298 .size = sizeof(XSavePKRU) }, 1299 }; 1300 1301 static uint32_t xsave_area_size(uint64_t mask) 1302 { 1303 int i; 1304 uint64_t ret = 0; 1305 1306 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1307 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1308 if ((mask >> i) & 1) { 1309 ret = MAX(ret, esa->offset + esa->size); 1310 } 1311 } 1312 return ret; 1313 } 1314 1315 static inline bool accel_uses_host_cpuid(void) 1316 { 1317 return kvm_enabled() || hvf_enabled(); 1318 } 1319 1320 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1321 { 1322 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1323 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1324 } 1325 1326 const char *get_register_name_32(unsigned int reg) 1327 { 1328 if (reg >= CPU_NB_REGS32) { 1329 return NULL; 1330 } 1331 return x86_reg_info_32[reg].name; 1332 } 1333 1334 /* 1335 * Returns the set of feature flags that are supported and migratable by 1336 * QEMU, for a given FeatureWord. 1337 */ 1338 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w) 1339 { 1340 FeatureWordInfo *wi = &feature_word_info[w]; 1341 uint32_t r = 0; 1342 int i; 1343 1344 for (i = 0; i < 32; i++) { 1345 uint32_t f = 1U << i; 1346 1347 /* If the feature name is known, it is implicitly considered migratable, 1348 * unless it is explicitly set in unmigratable_flags */ 1349 if ((wi->migratable_flags & f) || 1350 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1351 r |= f; 1352 } 1353 } 1354 return r; 1355 } 1356 1357 void host_cpuid(uint32_t function, uint32_t count, 1358 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1359 { 1360 uint32_t vec[4]; 1361 1362 #ifdef __x86_64__ 1363 asm volatile("cpuid" 1364 : "=a"(vec[0]), "=b"(vec[1]), 1365 "=c"(vec[2]), "=d"(vec[3]) 1366 : "0"(function), "c"(count) : "cc"); 1367 #elif defined(__i386__) 1368 asm volatile("pusha \n\t" 1369 "cpuid \n\t" 1370 "mov %%eax, 0(%2) \n\t" 1371 "mov %%ebx, 4(%2) \n\t" 1372 "mov %%ecx, 8(%2) \n\t" 1373 "mov %%edx, 12(%2) \n\t" 1374 "popa" 1375 : : "a"(function), "c"(count), "S"(vec) 1376 : "memory", "cc"); 1377 #else 1378 abort(); 1379 #endif 1380 1381 if (eax) 1382 *eax = vec[0]; 1383 if (ebx) 1384 *ebx = vec[1]; 1385 if (ecx) 1386 *ecx = vec[2]; 1387 if (edx) 1388 *edx = vec[3]; 1389 } 1390 1391 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1392 { 1393 uint32_t eax, ebx, ecx, edx; 1394 1395 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1396 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1397 1398 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1399 if (family) { 1400 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1401 } 1402 if (model) { 1403 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1404 } 1405 if (stepping) { 1406 *stepping = eax & 0x0F; 1407 } 1408 } 1409 1410 /* CPU class name definitions: */ 1411 1412 /* Return type name for a given CPU model name 1413 * Caller is responsible for freeing the returned string. 1414 */ 1415 static char *x86_cpu_type_name(const char *model_name) 1416 { 1417 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1418 } 1419 1420 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1421 { 1422 ObjectClass *oc; 1423 char *typename = x86_cpu_type_name(cpu_model); 1424 oc = object_class_by_name(typename); 1425 g_free(typename); 1426 return oc; 1427 } 1428 1429 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1430 { 1431 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1432 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1433 return g_strndup(class_name, 1434 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1435 } 1436 1437 typedef struct PropValue { 1438 const char *prop, *value; 1439 } PropValue; 1440 1441 typedef struct X86CPUVersionDefinition { 1442 X86CPUVersion version; 1443 const char *alias; 1444 PropValue *props; 1445 } X86CPUVersionDefinition; 1446 1447 /* Base definition for a CPU model */ 1448 typedef struct X86CPUDefinition { 1449 const char *name; 1450 uint32_t level; 1451 uint32_t xlevel; 1452 /* vendor is zero-terminated, 12 character ASCII string */ 1453 char vendor[CPUID_VENDOR_SZ + 1]; 1454 int family; 1455 int model; 1456 int stepping; 1457 FeatureWordArray features; 1458 const char *model_id; 1459 CPUCaches *cache_info; 1460 /* 1461 * Definitions for alternative versions of CPU model. 1462 * List is terminated by item with version == 0. 1463 * If NULL, version 1 will be registered automatically. 1464 */ 1465 const X86CPUVersionDefinition *versions; 1466 } X86CPUDefinition; 1467 1468 /* Reference to a specific CPU model version */ 1469 struct X86CPUModel { 1470 /* Base CPU definition */ 1471 X86CPUDefinition *cpudef; 1472 /* CPU model version */ 1473 X86CPUVersion version; 1474 /* 1475 * If true, this is an alias CPU model. 1476 * This matters only for "-cpu help" and query-cpu-definitions 1477 */ 1478 bool is_alias; 1479 }; 1480 1481 /* Get full model name for CPU version */ 1482 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, 1483 X86CPUVersion version) 1484 { 1485 assert(version > 0); 1486 return g_strdup_printf("%s-v%d", cpudef->name, (int)version); 1487 } 1488 1489 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) 1490 { 1491 /* When X86CPUDefinition::versions is NULL, we register only v1 */ 1492 static const X86CPUVersionDefinition default_version_list[] = { 1493 { 1 }, 1494 { /* end of list */ } 1495 }; 1496 1497 return def->versions ?: default_version_list; 1498 } 1499 1500 static CPUCaches epyc_cache_info = { 1501 .l1d_cache = &(CPUCacheInfo) { 1502 .type = DATA_CACHE, 1503 .level = 1, 1504 .size = 32 * KiB, 1505 .line_size = 64, 1506 .associativity = 8, 1507 .partitions = 1, 1508 .sets = 64, 1509 .lines_per_tag = 1, 1510 .self_init = 1, 1511 .no_invd_sharing = true, 1512 }, 1513 .l1i_cache = &(CPUCacheInfo) { 1514 .type = INSTRUCTION_CACHE, 1515 .level = 1, 1516 .size = 64 * KiB, 1517 .line_size = 64, 1518 .associativity = 4, 1519 .partitions = 1, 1520 .sets = 256, 1521 .lines_per_tag = 1, 1522 .self_init = 1, 1523 .no_invd_sharing = true, 1524 }, 1525 .l2_cache = &(CPUCacheInfo) { 1526 .type = UNIFIED_CACHE, 1527 .level = 2, 1528 .size = 512 * KiB, 1529 .line_size = 64, 1530 .associativity = 8, 1531 .partitions = 1, 1532 .sets = 1024, 1533 .lines_per_tag = 1, 1534 }, 1535 .l3_cache = &(CPUCacheInfo) { 1536 .type = UNIFIED_CACHE, 1537 .level = 3, 1538 .size = 8 * MiB, 1539 .line_size = 64, 1540 .associativity = 16, 1541 .partitions = 1, 1542 .sets = 8192, 1543 .lines_per_tag = 1, 1544 .self_init = true, 1545 .inclusive = true, 1546 .complex_indexing = true, 1547 }, 1548 }; 1549 1550 static X86CPUDefinition builtin_x86_defs[] = { 1551 { 1552 .name = "qemu64", 1553 .level = 0xd, 1554 .vendor = CPUID_VENDOR_AMD, 1555 .family = 6, 1556 .model = 6, 1557 .stepping = 3, 1558 .features[FEAT_1_EDX] = 1559 PPRO_FEATURES | 1560 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1561 CPUID_PSE36, 1562 .features[FEAT_1_ECX] = 1563 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1564 .features[FEAT_8000_0001_EDX] = 1565 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1566 .features[FEAT_8000_0001_ECX] = 1567 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1568 .xlevel = 0x8000000A, 1569 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1570 }, 1571 { 1572 .name = "phenom", 1573 .level = 5, 1574 .vendor = CPUID_VENDOR_AMD, 1575 .family = 16, 1576 .model = 2, 1577 .stepping = 3, 1578 /* Missing: CPUID_HT */ 1579 .features[FEAT_1_EDX] = 1580 PPRO_FEATURES | 1581 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1582 CPUID_PSE36 | CPUID_VME, 1583 .features[FEAT_1_ECX] = 1584 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1585 CPUID_EXT_POPCNT, 1586 .features[FEAT_8000_0001_EDX] = 1587 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1588 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1589 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1590 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1591 CPUID_EXT3_CR8LEG, 1592 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1593 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1594 .features[FEAT_8000_0001_ECX] = 1595 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1596 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1597 /* Missing: CPUID_SVM_LBRV */ 1598 .features[FEAT_SVM] = 1599 CPUID_SVM_NPT, 1600 .xlevel = 0x8000001A, 1601 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1602 }, 1603 { 1604 .name = "core2duo", 1605 .level = 10, 1606 .vendor = CPUID_VENDOR_INTEL, 1607 .family = 6, 1608 .model = 15, 1609 .stepping = 11, 1610 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1611 .features[FEAT_1_EDX] = 1612 PPRO_FEATURES | 1613 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1614 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1615 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1616 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1617 .features[FEAT_1_ECX] = 1618 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1619 CPUID_EXT_CX16, 1620 .features[FEAT_8000_0001_EDX] = 1621 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1622 .features[FEAT_8000_0001_ECX] = 1623 CPUID_EXT3_LAHF_LM, 1624 .xlevel = 0x80000008, 1625 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1626 }, 1627 { 1628 .name = "kvm64", 1629 .level = 0xd, 1630 .vendor = CPUID_VENDOR_INTEL, 1631 .family = 15, 1632 .model = 6, 1633 .stepping = 1, 1634 /* Missing: CPUID_HT */ 1635 .features[FEAT_1_EDX] = 1636 PPRO_FEATURES | CPUID_VME | 1637 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1638 CPUID_PSE36, 1639 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1640 .features[FEAT_1_ECX] = 1641 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1642 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1643 .features[FEAT_8000_0001_EDX] = 1644 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1645 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1646 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1647 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1648 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1649 .features[FEAT_8000_0001_ECX] = 1650 0, 1651 .xlevel = 0x80000008, 1652 .model_id = "Common KVM processor" 1653 }, 1654 { 1655 .name = "qemu32", 1656 .level = 4, 1657 .vendor = CPUID_VENDOR_INTEL, 1658 .family = 6, 1659 .model = 6, 1660 .stepping = 3, 1661 .features[FEAT_1_EDX] = 1662 PPRO_FEATURES, 1663 .features[FEAT_1_ECX] = 1664 CPUID_EXT_SSE3, 1665 .xlevel = 0x80000004, 1666 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1667 }, 1668 { 1669 .name = "kvm32", 1670 .level = 5, 1671 .vendor = CPUID_VENDOR_INTEL, 1672 .family = 15, 1673 .model = 6, 1674 .stepping = 1, 1675 .features[FEAT_1_EDX] = 1676 PPRO_FEATURES | CPUID_VME | 1677 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1678 .features[FEAT_1_ECX] = 1679 CPUID_EXT_SSE3, 1680 .features[FEAT_8000_0001_ECX] = 1681 0, 1682 .xlevel = 0x80000008, 1683 .model_id = "Common 32-bit KVM processor" 1684 }, 1685 { 1686 .name = "coreduo", 1687 .level = 10, 1688 .vendor = CPUID_VENDOR_INTEL, 1689 .family = 6, 1690 .model = 14, 1691 .stepping = 8, 1692 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1693 .features[FEAT_1_EDX] = 1694 PPRO_FEATURES | CPUID_VME | 1695 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 1696 CPUID_SS, 1697 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 1698 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1699 .features[FEAT_1_ECX] = 1700 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 1701 .features[FEAT_8000_0001_EDX] = 1702 CPUID_EXT2_NX, 1703 .xlevel = 0x80000008, 1704 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 1705 }, 1706 { 1707 .name = "486", 1708 .level = 1, 1709 .vendor = CPUID_VENDOR_INTEL, 1710 .family = 4, 1711 .model = 8, 1712 .stepping = 0, 1713 .features[FEAT_1_EDX] = 1714 I486_FEATURES, 1715 .xlevel = 0, 1716 .model_id = "", 1717 }, 1718 { 1719 .name = "pentium", 1720 .level = 1, 1721 .vendor = CPUID_VENDOR_INTEL, 1722 .family = 5, 1723 .model = 4, 1724 .stepping = 3, 1725 .features[FEAT_1_EDX] = 1726 PENTIUM_FEATURES, 1727 .xlevel = 0, 1728 .model_id = "", 1729 }, 1730 { 1731 .name = "pentium2", 1732 .level = 2, 1733 .vendor = CPUID_VENDOR_INTEL, 1734 .family = 6, 1735 .model = 5, 1736 .stepping = 2, 1737 .features[FEAT_1_EDX] = 1738 PENTIUM2_FEATURES, 1739 .xlevel = 0, 1740 .model_id = "", 1741 }, 1742 { 1743 .name = "pentium3", 1744 .level = 3, 1745 .vendor = CPUID_VENDOR_INTEL, 1746 .family = 6, 1747 .model = 7, 1748 .stepping = 3, 1749 .features[FEAT_1_EDX] = 1750 PENTIUM3_FEATURES, 1751 .xlevel = 0, 1752 .model_id = "", 1753 }, 1754 { 1755 .name = "athlon", 1756 .level = 2, 1757 .vendor = CPUID_VENDOR_AMD, 1758 .family = 6, 1759 .model = 2, 1760 .stepping = 3, 1761 .features[FEAT_1_EDX] = 1762 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 1763 CPUID_MCA, 1764 .features[FEAT_8000_0001_EDX] = 1765 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 1766 .xlevel = 0x80000008, 1767 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1768 }, 1769 { 1770 .name = "n270", 1771 .level = 10, 1772 .vendor = CPUID_VENDOR_INTEL, 1773 .family = 6, 1774 .model = 28, 1775 .stepping = 2, 1776 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1777 .features[FEAT_1_EDX] = 1778 PPRO_FEATURES | 1779 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 1780 CPUID_ACPI | CPUID_SS, 1781 /* Some CPUs got no CPUID_SEP */ 1782 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 1783 * CPUID_EXT_XTPR */ 1784 .features[FEAT_1_ECX] = 1785 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1786 CPUID_EXT_MOVBE, 1787 .features[FEAT_8000_0001_EDX] = 1788 CPUID_EXT2_NX, 1789 .features[FEAT_8000_0001_ECX] = 1790 CPUID_EXT3_LAHF_LM, 1791 .xlevel = 0x80000008, 1792 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 1793 }, 1794 { 1795 .name = "Conroe", 1796 .level = 10, 1797 .vendor = CPUID_VENDOR_INTEL, 1798 .family = 6, 1799 .model = 15, 1800 .stepping = 3, 1801 .features[FEAT_1_EDX] = 1802 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1803 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1804 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1805 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1806 CPUID_DE | CPUID_FP87, 1807 .features[FEAT_1_ECX] = 1808 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1809 .features[FEAT_8000_0001_EDX] = 1810 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1811 .features[FEAT_8000_0001_ECX] = 1812 CPUID_EXT3_LAHF_LM, 1813 .xlevel = 0x80000008, 1814 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 1815 }, 1816 { 1817 .name = "Penryn", 1818 .level = 10, 1819 .vendor = CPUID_VENDOR_INTEL, 1820 .family = 6, 1821 .model = 23, 1822 .stepping = 3, 1823 .features[FEAT_1_EDX] = 1824 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1825 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1826 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1827 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1828 CPUID_DE | CPUID_FP87, 1829 .features[FEAT_1_ECX] = 1830 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1831 CPUID_EXT_SSE3, 1832 .features[FEAT_8000_0001_EDX] = 1833 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1834 .features[FEAT_8000_0001_ECX] = 1835 CPUID_EXT3_LAHF_LM, 1836 .xlevel = 0x80000008, 1837 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 1838 }, 1839 { 1840 .name = "Nehalem", 1841 .level = 11, 1842 .vendor = CPUID_VENDOR_INTEL, 1843 .family = 6, 1844 .model = 26, 1845 .stepping = 3, 1846 .features[FEAT_1_EDX] = 1847 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1848 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1849 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1850 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1851 CPUID_DE | CPUID_FP87, 1852 .features[FEAT_1_ECX] = 1853 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1854 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1855 .features[FEAT_8000_0001_EDX] = 1856 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1857 .features[FEAT_8000_0001_ECX] = 1858 CPUID_EXT3_LAHF_LM, 1859 .xlevel = 0x80000008, 1860 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 1861 .versions = (X86CPUVersionDefinition[]) { 1862 { .version = 1 }, 1863 { 1864 .version = 2, 1865 .alias = "Nehalem-IBRS", 1866 .props = (PropValue[]) { 1867 { "spec-ctrl", "on" }, 1868 { "model-id", 1869 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, 1870 { /* end of list */ } 1871 } 1872 }, 1873 { /* end of list */ } 1874 } 1875 }, 1876 { 1877 .name = "Westmere", 1878 .level = 11, 1879 .vendor = CPUID_VENDOR_INTEL, 1880 .family = 6, 1881 .model = 44, 1882 .stepping = 1, 1883 .features[FEAT_1_EDX] = 1884 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1885 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1886 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1887 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1888 CPUID_DE | CPUID_FP87, 1889 .features[FEAT_1_ECX] = 1890 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1891 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1892 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1893 .features[FEAT_8000_0001_EDX] = 1894 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1895 .features[FEAT_8000_0001_ECX] = 1896 CPUID_EXT3_LAHF_LM, 1897 .features[FEAT_6_EAX] = 1898 CPUID_6_EAX_ARAT, 1899 .xlevel = 0x80000008, 1900 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 1901 .versions = (X86CPUVersionDefinition[]) { 1902 { .version = 1 }, 1903 { 1904 .version = 2, 1905 .alias = "Westmere-IBRS", 1906 .props = (PropValue[]) { 1907 { "spec-ctrl", "on" }, 1908 { "model-id", 1909 "Westmere E56xx/L56xx/X56xx (IBRS update)" }, 1910 { /* end of list */ } 1911 } 1912 }, 1913 { /* end of list */ } 1914 } 1915 }, 1916 { 1917 .name = "SandyBridge", 1918 .level = 0xd, 1919 .vendor = CPUID_VENDOR_INTEL, 1920 .family = 6, 1921 .model = 42, 1922 .stepping = 1, 1923 .features[FEAT_1_EDX] = 1924 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1925 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1926 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1927 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1928 CPUID_DE | CPUID_FP87, 1929 .features[FEAT_1_ECX] = 1930 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1931 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1932 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1933 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1934 CPUID_EXT_SSE3, 1935 .features[FEAT_8000_0001_EDX] = 1936 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1937 CPUID_EXT2_SYSCALL, 1938 .features[FEAT_8000_0001_ECX] = 1939 CPUID_EXT3_LAHF_LM, 1940 .features[FEAT_XSAVE] = 1941 CPUID_XSAVE_XSAVEOPT, 1942 .features[FEAT_6_EAX] = 1943 CPUID_6_EAX_ARAT, 1944 .xlevel = 0x80000008, 1945 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 1946 .versions = (X86CPUVersionDefinition[]) { 1947 { .version = 1 }, 1948 { 1949 .version = 2, 1950 .alias = "SandyBridge-IBRS", 1951 .props = (PropValue[]) { 1952 { "spec-ctrl", "on" }, 1953 { "model-id", 1954 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, 1955 { /* end of list */ } 1956 } 1957 }, 1958 { /* end of list */ } 1959 } 1960 }, 1961 { 1962 .name = "IvyBridge", 1963 .level = 0xd, 1964 .vendor = CPUID_VENDOR_INTEL, 1965 .family = 6, 1966 .model = 58, 1967 .stepping = 9, 1968 .features[FEAT_1_EDX] = 1969 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1970 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1971 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1972 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1973 CPUID_DE | CPUID_FP87, 1974 .features[FEAT_1_ECX] = 1975 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1976 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1977 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1978 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1979 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1980 .features[FEAT_7_0_EBX] = 1981 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1982 CPUID_7_0_EBX_ERMS, 1983 .features[FEAT_8000_0001_EDX] = 1984 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1985 CPUID_EXT2_SYSCALL, 1986 .features[FEAT_8000_0001_ECX] = 1987 CPUID_EXT3_LAHF_LM, 1988 .features[FEAT_XSAVE] = 1989 CPUID_XSAVE_XSAVEOPT, 1990 .features[FEAT_6_EAX] = 1991 CPUID_6_EAX_ARAT, 1992 .xlevel = 0x80000008, 1993 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 1994 .versions = (X86CPUVersionDefinition[]) { 1995 { .version = 1 }, 1996 { 1997 .version = 2, 1998 .alias = "IvyBridge-IBRS", 1999 .props = (PropValue[]) { 2000 { "spec-ctrl", "on" }, 2001 { "model-id", 2002 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, 2003 { /* end of list */ } 2004 } 2005 }, 2006 { /* end of list */ } 2007 } 2008 }, 2009 { 2010 .name = "Haswell", 2011 .level = 0xd, 2012 .vendor = CPUID_VENDOR_INTEL, 2013 .family = 6, 2014 .model = 60, 2015 .stepping = 4, 2016 .features[FEAT_1_EDX] = 2017 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2018 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2019 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2020 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2021 CPUID_DE | CPUID_FP87, 2022 .features[FEAT_1_ECX] = 2023 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2024 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2025 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2026 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2027 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2028 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2029 .features[FEAT_8000_0001_EDX] = 2030 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2031 CPUID_EXT2_SYSCALL, 2032 .features[FEAT_8000_0001_ECX] = 2033 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2034 .features[FEAT_7_0_EBX] = 2035 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2036 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2037 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2038 CPUID_7_0_EBX_RTM, 2039 .features[FEAT_XSAVE] = 2040 CPUID_XSAVE_XSAVEOPT, 2041 .features[FEAT_6_EAX] = 2042 CPUID_6_EAX_ARAT, 2043 .xlevel = 0x80000008, 2044 .model_id = "Intel Core Processor (Haswell)", 2045 .versions = (X86CPUVersionDefinition[]) { 2046 { .version = 1 }, 2047 { 2048 .version = 2, 2049 .alias = "Haswell-noTSX", 2050 .props = (PropValue[]) { 2051 { "hle", "off" }, 2052 { "rtm", "off" }, 2053 { "stepping", "1" }, 2054 { "model-id", "Intel Core Processor (Haswell, no TSX)", }, 2055 { /* end of list */ } 2056 }, 2057 }, 2058 { 2059 .version = 3, 2060 .alias = "Haswell-IBRS", 2061 .props = (PropValue[]) { 2062 /* Restore TSX features removed by -v2 above */ 2063 { "hle", "on" }, 2064 { "rtm", "on" }, 2065 /* 2066 * Haswell and Haswell-IBRS had stepping=4 in 2067 * QEMU 4.0 and older 2068 */ 2069 { "stepping", "4" }, 2070 { "spec-ctrl", "on" }, 2071 { "model-id", 2072 "Intel Core Processor (Haswell, IBRS)" }, 2073 { /* end of list */ } 2074 } 2075 }, 2076 { 2077 .version = 4, 2078 .alias = "Haswell-noTSX-IBRS", 2079 .props = (PropValue[]) { 2080 { "hle", "off" }, 2081 { "rtm", "off" }, 2082 /* spec-ctrl was already enabled by -v3 above */ 2083 { "stepping", "1" }, 2084 { "model-id", 2085 "Intel Core Processor (Haswell, no TSX, IBRS)" }, 2086 { /* end of list */ } 2087 } 2088 }, 2089 { /* end of list */ } 2090 } 2091 }, 2092 { 2093 .name = "Broadwell", 2094 .level = 0xd, 2095 .vendor = CPUID_VENDOR_INTEL, 2096 .family = 6, 2097 .model = 61, 2098 .stepping = 2, 2099 .features[FEAT_1_EDX] = 2100 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2101 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2102 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2103 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2104 CPUID_DE | CPUID_FP87, 2105 .features[FEAT_1_ECX] = 2106 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2107 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2108 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2109 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2110 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2111 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2112 .features[FEAT_8000_0001_EDX] = 2113 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2114 CPUID_EXT2_SYSCALL, 2115 .features[FEAT_8000_0001_ECX] = 2116 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2117 .features[FEAT_7_0_EBX] = 2118 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2119 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2120 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2121 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2122 CPUID_7_0_EBX_SMAP, 2123 .features[FEAT_XSAVE] = 2124 CPUID_XSAVE_XSAVEOPT, 2125 .features[FEAT_6_EAX] = 2126 CPUID_6_EAX_ARAT, 2127 .xlevel = 0x80000008, 2128 .model_id = "Intel Core Processor (Broadwell)", 2129 .versions = (X86CPUVersionDefinition[]) { 2130 { .version = 1 }, 2131 { 2132 .version = 2, 2133 .alias = "Broadwell-noTSX", 2134 .props = (PropValue[]) { 2135 { "hle", "off" }, 2136 { "rtm", "off" }, 2137 { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, 2138 { /* end of list */ } 2139 }, 2140 }, 2141 { 2142 .version = 3, 2143 .alias = "Broadwell-IBRS", 2144 .props = (PropValue[]) { 2145 /* Restore TSX features removed by -v2 above */ 2146 { "hle", "on" }, 2147 { "rtm", "on" }, 2148 { "spec-ctrl", "on" }, 2149 { "model-id", 2150 "Intel Core Processor (Broadwell, IBRS)" }, 2151 { /* end of list */ } 2152 } 2153 }, 2154 { 2155 .version = 4, 2156 .alias = "Broadwell-noTSX-IBRS", 2157 .props = (PropValue[]) { 2158 { "hle", "off" }, 2159 { "rtm", "off" }, 2160 /* spec-ctrl was already enabled by -v3 above */ 2161 { "model-id", 2162 "Intel Core Processor (Broadwell, no TSX, IBRS)" }, 2163 { /* end of list */ } 2164 } 2165 }, 2166 { /* end of list */ } 2167 } 2168 }, 2169 { 2170 .name = "Skylake-Client", 2171 .level = 0xd, 2172 .vendor = CPUID_VENDOR_INTEL, 2173 .family = 6, 2174 .model = 94, 2175 .stepping = 3, 2176 .features[FEAT_1_EDX] = 2177 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2178 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2179 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2180 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2181 CPUID_DE | CPUID_FP87, 2182 .features[FEAT_1_ECX] = 2183 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2184 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2185 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2186 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2187 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2188 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2189 .features[FEAT_8000_0001_EDX] = 2190 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2191 CPUID_EXT2_SYSCALL, 2192 .features[FEAT_8000_0001_ECX] = 2193 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2194 .features[FEAT_7_0_EBX] = 2195 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2196 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2197 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2198 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2199 CPUID_7_0_EBX_SMAP, 2200 /* Missing: XSAVES (not supported by some Linux versions, 2201 * including v4.1 to v4.12). 2202 * KVM doesn't yet expose any XSAVES state save component, 2203 * and the only one defined in Skylake (processor tracing) 2204 * probably will block migration anyway. 2205 */ 2206 .features[FEAT_XSAVE] = 2207 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2208 CPUID_XSAVE_XGETBV1, 2209 .features[FEAT_6_EAX] = 2210 CPUID_6_EAX_ARAT, 2211 .xlevel = 0x80000008, 2212 .model_id = "Intel Core Processor (Skylake)", 2213 .versions = (X86CPUVersionDefinition[]) { 2214 { .version = 1 }, 2215 { 2216 .version = 2, 2217 .alias = "Skylake-Client-IBRS", 2218 .props = (PropValue[]) { 2219 { "spec-ctrl", "on" }, 2220 { "model-id", 2221 "Intel Core Processor (Skylake, IBRS)" }, 2222 { /* end of list */ } 2223 } 2224 }, 2225 { /* end of list */ } 2226 } 2227 }, 2228 { 2229 .name = "Skylake-Server", 2230 .level = 0xd, 2231 .vendor = CPUID_VENDOR_INTEL, 2232 .family = 6, 2233 .model = 85, 2234 .stepping = 4, 2235 .features[FEAT_1_EDX] = 2236 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2237 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2238 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2239 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2240 CPUID_DE | CPUID_FP87, 2241 .features[FEAT_1_ECX] = 2242 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2243 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2244 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2245 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2246 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2247 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2248 .features[FEAT_8000_0001_EDX] = 2249 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2250 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2251 .features[FEAT_8000_0001_ECX] = 2252 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2253 .features[FEAT_7_0_EBX] = 2254 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2255 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2256 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2257 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2258 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2259 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2260 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2261 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2262 .features[FEAT_7_0_ECX] = 2263 CPUID_7_0_ECX_PKU, 2264 /* Missing: XSAVES (not supported by some Linux versions, 2265 * including v4.1 to v4.12). 2266 * KVM doesn't yet expose any XSAVES state save component, 2267 * and the only one defined in Skylake (processor tracing) 2268 * probably will block migration anyway. 2269 */ 2270 .features[FEAT_XSAVE] = 2271 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2272 CPUID_XSAVE_XGETBV1, 2273 .features[FEAT_6_EAX] = 2274 CPUID_6_EAX_ARAT, 2275 .xlevel = 0x80000008, 2276 .model_id = "Intel Xeon Processor (Skylake)", 2277 .versions = (X86CPUVersionDefinition[]) { 2278 { .version = 1 }, 2279 { 2280 .version = 2, 2281 .alias = "Skylake-Server-IBRS", 2282 .props = (PropValue[]) { 2283 /* clflushopt was not added to Skylake-Server-IBRS */ 2284 /* TODO: add -v3 including clflushopt */ 2285 { "clflushopt", "off" }, 2286 { "spec-ctrl", "on" }, 2287 { "model-id", 2288 "Intel Xeon Processor (Skylake, IBRS)" }, 2289 { /* end of list */ } 2290 } 2291 }, 2292 { /* end of list */ } 2293 } 2294 }, 2295 { 2296 .name = "Cascadelake-Server", 2297 .level = 0xd, 2298 .vendor = CPUID_VENDOR_INTEL, 2299 .family = 6, 2300 .model = 85, 2301 .stepping = 6, 2302 .features[FEAT_1_EDX] = 2303 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2304 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2305 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2306 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2307 CPUID_DE | CPUID_FP87, 2308 .features[FEAT_1_ECX] = 2309 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2310 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2311 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2312 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2313 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2314 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2315 .features[FEAT_8000_0001_EDX] = 2316 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2317 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2318 .features[FEAT_8000_0001_ECX] = 2319 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2320 .features[FEAT_7_0_EBX] = 2321 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2322 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2323 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2324 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2325 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2326 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2327 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2328 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2329 .features[FEAT_7_0_ECX] = 2330 CPUID_7_0_ECX_PKU | 2331 CPUID_7_0_ECX_AVX512VNNI, 2332 .features[FEAT_7_0_EDX] = 2333 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2334 /* Missing: XSAVES (not supported by some Linux versions, 2335 * including v4.1 to v4.12). 2336 * KVM doesn't yet expose any XSAVES state save component, 2337 * and the only one defined in Skylake (processor tracing) 2338 * probably will block migration anyway. 2339 */ 2340 .features[FEAT_XSAVE] = 2341 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2342 CPUID_XSAVE_XGETBV1, 2343 .features[FEAT_6_EAX] = 2344 CPUID_6_EAX_ARAT, 2345 .xlevel = 0x80000008, 2346 .model_id = "Intel Xeon Processor (Cascadelake)", 2347 .versions = (X86CPUVersionDefinition[]) { 2348 { .version = 1 }, 2349 { .version = 2, 2350 .props = (PropValue[]) { 2351 { "arch-capabilities", "on" }, 2352 { "rdctl-no", "on" }, 2353 { "ibrs-all", "on" }, 2354 { "skip-l1dfl-vmentry", "on" }, 2355 { "mds-no", "on" }, 2356 { /* end of list */ } 2357 }, 2358 }, 2359 { /* end of list */ } 2360 } 2361 }, 2362 { 2363 .name = "Icelake-Client", 2364 .level = 0xd, 2365 .vendor = CPUID_VENDOR_INTEL, 2366 .family = 6, 2367 .model = 126, 2368 .stepping = 0, 2369 .features[FEAT_1_EDX] = 2370 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2371 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2372 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2373 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2374 CPUID_DE | CPUID_FP87, 2375 .features[FEAT_1_ECX] = 2376 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2377 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2378 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2379 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2380 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2381 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2382 .features[FEAT_8000_0001_EDX] = 2383 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2384 CPUID_EXT2_SYSCALL, 2385 .features[FEAT_8000_0001_ECX] = 2386 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2387 .features[FEAT_8000_0008_EBX] = 2388 CPUID_8000_0008_EBX_WBNOINVD, 2389 .features[FEAT_7_0_EBX] = 2390 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2391 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2392 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2393 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2394 CPUID_7_0_EBX_SMAP, 2395 .features[FEAT_7_0_ECX] = 2396 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 2397 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI | 2398 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 2399 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 2400 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 2401 .features[FEAT_7_0_EDX] = 2402 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2403 /* Missing: XSAVES (not supported by some Linux versions, 2404 * including v4.1 to v4.12). 2405 * KVM doesn't yet expose any XSAVES state save component, 2406 * and the only one defined in Skylake (processor tracing) 2407 * probably will block migration anyway. 2408 */ 2409 .features[FEAT_XSAVE] = 2410 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2411 CPUID_XSAVE_XGETBV1, 2412 .features[FEAT_6_EAX] = 2413 CPUID_6_EAX_ARAT, 2414 .xlevel = 0x80000008, 2415 .model_id = "Intel Core Processor (Icelake)", 2416 }, 2417 { 2418 .name = "Icelake-Server", 2419 .level = 0xd, 2420 .vendor = CPUID_VENDOR_INTEL, 2421 .family = 6, 2422 .model = 134, 2423 .stepping = 0, 2424 .features[FEAT_1_EDX] = 2425 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2426 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2427 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2428 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2429 CPUID_DE | CPUID_FP87, 2430 .features[FEAT_1_ECX] = 2431 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2432 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2433 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2434 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2435 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2436 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2437 .features[FEAT_8000_0001_EDX] = 2438 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2439 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2440 .features[FEAT_8000_0001_ECX] = 2441 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2442 .features[FEAT_8000_0008_EBX] = 2443 CPUID_8000_0008_EBX_WBNOINVD, 2444 .features[FEAT_7_0_EBX] = 2445 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2446 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2447 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2448 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2449 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2450 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2451 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2452 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2453 .features[FEAT_7_0_ECX] = 2454 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 2455 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI | 2456 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 2457 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 2458 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 2459 .features[FEAT_7_0_EDX] = 2460 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2461 /* Missing: XSAVES (not supported by some Linux versions, 2462 * including v4.1 to v4.12). 2463 * KVM doesn't yet expose any XSAVES state save component, 2464 * and the only one defined in Skylake (processor tracing) 2465 * probably will block migration anyway. 2466 */ 2467 .features[FEAT_XSAVE] = 2468 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2469 CPUID_XSAVE_XGETBV1, 2470 .features[FEAT_6_EAX] = 2471 CPUID_6_EAX_ARAT, 2472 .xlevel = 0x80000008, 2473 .model_id = "Intel Xeon Processor (Icelake)", 2474 }, 2475 { 2476 .name = "Snowridge", 2477 .level = 27, 2478 .vendor = CPUID_VENDOR_INTEL, 2479 .family = 6, 2480 .model = 134, 2481 .stepping = 1, 2482 .features[FEAT_1_EDX] = 2483 /* missing: CPUID_PN CPUID_IA64 */ 2484 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2485 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | 2486 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | 2487 CPUID_CX8 | CPUID_APIC | CPUID_SEP | 2488 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 2489 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | 2490 CPUID_MMX | 2491 CPUID_FXSR | CPUID_SSE | CPUID_SSE2, 2492 .features[FEAT_1_ECX] = 2493 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 2494 CPUID_EXT_SSSE3 | 2495 CPUID_EXT_CX16 | 2496 CPUID_EXT_SSE41 | 2497 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 2498 CPUID_EXT_POPCNT | 2499 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | 2500 CPUID_EXT_RDRAND, 2501 .features[FEAT_8000_0001_EDX] = 2502 CPUID_EXT2_SYSCALL | 2503 CPUID_EXT2_NX | 2504 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2505 CPUID_EXT2_LM, 2506 .features[FEAT_8000_0001_ECX] = 2507 CPUID_EXT3_LAHF_LM | 2508 CPUID_EXT3_3DNOWPREFETCH, 2509 .features[FEAT_7_0_EBX] = 2510 CPUID_7_0_EBX_FSGSBASE | 2511 CPUID_7_0_EBX_SMEP | 2512 CPUID_7_0_EBX_ERMS | 2513 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ 2514 CPUID_7_0_EBX_RDSEED | 2515 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2516 CPUID_7_0_EBX_CLWB | 2517 CPUID_7_0_EBX_SHA_NI, 2518 .features[FEAT_7_0_ECX] = 2519 CPUID_7_0_ECX_UMIP | 2520 /* missing bit 5 */ 2521 CPUID_7_0_ECX_GFNI | 2522 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | 2523 CPUID_7_0_ECX_MOVDIR64B, 2524 .features[FEAT_7_0_EDX] = 2525 CPUID_7_0_EDX_SPEC_CTRL | 2526 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | 2527 CPUID_7_0_EDX_CORE_CAPABILITY, 2528 .features[FEAT_CORE_CAPABILITY] = 2529 MSR_CORE_CAP_SPLIT_LOCK_DETECT, 2530 /* 2531 * Missing: XSAVES (not supported by some Linux versions, 2532 * including v4.1 to v4.12). 2533 * KVM doesn't yet expose any XSAVES state save component, 2534 * and the only one defined in Skylake (processor tracing) 2535 * probably will block migration anyway. 2536 */ 2537 .features[FEAT_XSAVE] = 2538 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2539 CPUID_XSAVE_XGETBV1, 2540 .features[FEAT_6_EAX] = 2541 CPUID_6_EAX_ARAT, 2542 .xlevel = 0x80000008, 2543 .model_id = "Intel Atom Processor (SnowRidge)", 2544 }, 2545 { 2546 .name = "KnightsMill", 2547 .level = 0xd, 2548 .vendor = CPUID_VENDOR_INTEL, 2549 .family = 6, 2550 .model = 133, 2551 .stepping = 0, 2552 .features[FEAT_1_EDX] = 2553 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 2554 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 2555 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 2556 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 2557 CPUID_PSE | CPUID_DE | CPUID_FP87, 2558 .features[FEAT_1_ECX] = 2559 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2560 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2561 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2562 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2563 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2564 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2565 .features[FEAT_8000_0001_EDX] = 2566 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2567 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2568 .features[FEAT_8000_0001_ECX] = 2569 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2570 .features[FEAT_7_0_EBX] = 2571 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2572 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 2573 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 2574 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 2575 CPUID_7_0_EBX_AVX512ER, 2576 .features[FEAT_7_0_ECX] = 2577 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 2578 .features[FEAT_7_0_EDX] = 2579 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 2580 .features[FEAT_XSAVE] = 2581 CPUID_XSAVE_XSAVEOPT, 2582 .features[FEAT_6_EAX] = 2583 CPUID_6_EAX_ARAT, 2584 .xlevel = 0x80000008, 2585 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 2586 }, 2587 { 2588 .name = "Opteron_G1", 2589 .level = 5, 2590 .vendor = CPUID_VENDOR_AMD, 2591 .family = 15, 2592 .model = 6, 2593 .stepping = 1, 2594 .features[FEAT_1_EDX] = 2595 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2596 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2597 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2598 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2599 CPUID_DE | CPUID_FP87, 2600 .features[FEAT_1_ECX] = 2601 CPUID_EXT_SSE3, 2602 .features[FEAT_8000_0001_EDX] = 2603 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2604 .xlevel = 0x80000008, 2605 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 2606 }, 2607 { 2608 .name = "Opteron_G2", 2609 .level = 5, 2610 .vendor = CPUID_VENDOR_AMD, 2611 .family = 15, 2612 .model = 6, 2613 .stepping = 1, 2614 .features[FEAT_1_EDX] = 2615 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2616 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2617 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2618 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2619 CPUID_DE | CPUID_FP87, 2620 .features[FEAT_1_ECX] = 2621 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 2622 .features[FEAT_8000_0001_EDX] = 2623 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2624 .features[FEAT_8000_0001_ECX] = 2625 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2626 .xlevel = 0x80000008, 2627 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 2628 }, 2629 { 2630 .name = "Opteron_G3", 2631 .level = 5, 2632 .vendor = CPUID_VENDOR_AMD, 2633 .family = 16, 2634 .model = 2, 2635 .stepping = 3, 2636 .features[FEAT_1_EDX] = 2637 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2638 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2639 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2640 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2641 CPUID_DE | CPUID_FP87, 2642 .features[FEAT_1_ECX] = 2643 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 2644 CPUID_EXT_SSE3, 2645 .features[FEAT_8000_0001_EDX] = 2646 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 2647 CPUID_EXT2_RDTSCP, 2648 .features[FEAT_8000_0001_ECX] = 2649 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 2650 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2651 .xlevel = 0x80000008, 2652 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 2653 }, 2654 { 2655 .name = "Opteron_G4", 2656 .level = 0xd, 2657 .vendor = CPUID_VENDOR_AMD, 2658 .family = 21, 2659 .model = 1, 2660 .stepping = 2, 2661 .features[FEAT_1_EDX] = 2662 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2663 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2664 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2665 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2666 CPUID_DE | CPUID_FP87, 2667 .features[FEAT_1_ECX] = 2668 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2669 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2670 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2671 CPUID_EXT_SSE3, 2672 .features[FEAT_8000_0001_EDX] = 2673 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2674 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 2675 .features[FEAT_8000_0001_ECX] = 2676 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2677 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2678 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2679 CPUID_EXT3_LAHF_LM, 2680 .features[FEAT_SVM] = 2681 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2682 /* no xsaveopt! */ 2683 .xlevel = 0x8000001A, 2684 .model_id = "AMD Opteron 62xx class CPU", 2685 }, 2686 { 2687 .name = "Opteron_G5", 2688 .level = 0xd, 2689 .vendor = CPUID_VENDOR_AMD, 2690 .family = 21, 2691 .model = 2, 2692 .stepping = 0, 2693 .features[FEAT_1_EDX] = 2694 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2695 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2696 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2697 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2698 CPUID_DE | CPUID_FP87, 2699 .features[FEAT_1_ECX] = 2700 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 2701 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2702 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 2703 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2704 .features[FEAT_8000_0001_EDX] = 2705 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2706 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 2707 .features[FEAT_8000_0001_ECX] = 2708 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2709 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2710 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2711 CPUID_EXT3_LAHF_LM, 2712 .features[FEAT_SVM] = 2713 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2714 /* no xsaveopt! */ 2715 .xlevel = 0x8000001A, 2716 .model_id = "AMD Opteron 63xx class CPU", 2717 }, 2718 { 2719 .name = "EPYC", 2720 .level = 0xd, 2721 .vendor = CPUID_VENDOR_AMD, 2722 .family = 23, 2723 .model = 1, 2724 .stepping = 2, 2725 .features[FEAT_1_EDX] = 2726 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2727 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2728 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2729 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2730 CPUID_VME | CPUID_FP87, 2731 .features[FEAT_1_ECX] = 2732 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2733 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 2734 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2735 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2736 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2737 .features[FEAT_8000_0001_EDX] = 2738 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2739 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2740 CPUID_EXT2_SYSCALL, 2741 .features[FEAT_8000_0001_ECX] = 2742 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2743 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2744 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2745 CPUID_EXT3_TOPOEXT, 2746 .features[FEAT_7_0_EBX] = 2747 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2748 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2749 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2750 CPUID_7_0_EBX_SHA_NI, 2751 /* Missing: XSAVES (not supported by some Linux versions, 2752 * including v4.1 to v4.12). 2753 * KVM doesn't yet expose any XSAVES state save component. 2754 */ 2755 .features[FEAT_XSAVE] = 2756 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2757 CPUID_XSAVE_XGETBV1, 2758 .features[FEAT_6_EAX] = 2759 CPUID_6_EAX_ARAT, 2760 .features[FEAT_SVM] = 2761 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2762 .xlevel = 0x8000001E, 2763 .model_id = "AMD EPYC Processor", 2764 .cache_info = &epyc_cache_info, 2765 .versions = (X86CPUVersionDefinition[]) { 2766 { .version = 1 }, 2767 { 2768 .version = 2, 2769 .alias = "EPYC-IBPB", 2770 .props = (PropValue[]) { 2771 { "ibpb", "on" }, 2772 { "model-id", 2773 "AMD EPYC Processor (with IBPB)" }, 2774 { /* end of list */ } 2775 } 2776 }, 2777 { /* end of list */ } 2778 } 2779 }, 2780 { 2781 .name = "Dhyana", 2782 .level = 0xd, 2783 .vendor = CPUID_VENDOR_HYGON, 2784 .family = 24, 2785 .model = 0, 2786 .stepping = 1, 2787 .features[FEAT_1_EDX] = 2788 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2789 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2790 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2791 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2792 CPUID_VME | CPUID_FP87, 2793 .features[FEAT_1_ECX] = 2794 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2795 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 2796 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2797 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2798 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 2799 .features[FEAT_8000_0001_EDX] = 2800 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2801 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2802 CPUID_EXT2_SYSCALL, 2803 .features[FEAT_8000_0001_ECX] = 2804 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2805 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2806 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2807 CPUID_EXT3_TOPOEXT, 2808 .features[FEAT_8000_0008_EBX] = 2809 CPUID_8000_0008_EBX_IBPB, 2810 .features[FEAT_7_0_EBX] = 2811 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2812 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2813 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 2814 /* 2815 * Missing: XSAVES (not supported by some Linux versions, 2816 * including v4.1 to v4.12). 2817 * KVM doesn't yet expose any XSAVES state save component. 2818 */ 2819 .features[FEAT_XSAVE] = 2820 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2821 CPUID_XSAVE_XGETBV1, 2822 .features[FEAT_6_EAX] = 2823 CPUID_6_EAX_ARAT, 2824 .features[FEAT_SVM] = 2825 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2826 .xlevel = 0x8000001E, 2827 .model_id = "Hygon Dhyana Processor", 2828 .cache_info = &epyc_cache_info, 2829 }, 2830 }; 2831 2832 /* KVM-specific features that are automatically added/removed 2833 * from all CPU models when KVM is enabled. 2834 */ 2835 static PropValue kvm_default_props[] = { 2836 { "kvmclock", "on" }, 2837 { "kvm-nopiodelay", "on" }, 2838 { "kvm-asyncpf", "on" }, 2839 { "kvm-steal-time", "on" }, 2840 { "kvm-pv-eoi", "on" }, 2841 { "kvmclock-stable-bit", "on" }, 2842 { "x2apic", "on" }, 2843 { "acpi", "off" }, 2844 { "monitor", "off" }, 2845 { "svm", "off" }, 2846 { NULL, NULL }, 2847 }; 2848 2849 /* TCG-specific defaults that override all CPU models when using TCG 2850 */ 2851 static PropValue tcg_default_props[] = { 2852 { "vme", "off" }, 2853 { NULL, NULL }, 2854 }; 2855 2856 2857 X86CPUVersion default_cpu_version = CPU_VERSION_LATEST; 2858 2859 void x86_cpu_set_default_version(X86CPUVersion version) 2860 { 2861 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ 2862 assert(version != CPU_VERSION_AUTO); 2863 default_cpu_version = version; 2864 } 2865 2866 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) 2867 { 2868 int v = 0; 2869 const X86CPUVersionDefinition *vdef = 2870 x86_cpu_def_get_versions(model->cpudef); 2871 while (vdef->version) { 2872 v = vdef->version; 2873 vdef++; 2874 } 2875 return v; 2876 } 2877 2878 /* Return the actual version being used for a specific CPU model */ 2879 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) 2880 { 2881 X86CPUVersion v = model->version; 2882 if (v == CPU_VERSION_AUTO) { 2883 v = default_cpu_version; 2884 } 2885 if (v == CPU_VERSION_LATEST) { 2886 return x86_cpu_model_last_version(model); 2887 } 2888 return v; 2889 } 2890 2891 void x86_cpu_change_kvm_default(const char *prop, const char *value) 2892 { 2893 PropValue *pv; 2894 for (pv = kvm_default_props; pv->prop; pv++) { 2895 if (!strcmp(pv->prop, prop)) { 2896 pv->value = value; 2897 break; 2898 } 2899 } 2900 2901 /* It is valid to call this function only for properties that 2902 * are already present in the kvm_default_props table. 2903 */ 2904 assert(pv->prop); 2905 } 2906 2907 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 2908 bool migratable_only); 2909 2910 static bool lmce_supported(void) 2911 { 2912 uint64_t mce_cap = 0; 2913 2914 #ifdef CONFIG_KVM 2915 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 2916 return false; 2917 } 2918 #endif 2919 2920 return !!(mce_cap & MCG_LMCE_P); 2921 } 2922 2923 #define CPUID_MODEL_ID_SZ 48 2924 2925 /** 2926 * cpu_x86_fill_model_id: 2927 * Get CPUID model ID string from host CPU. 2928 * 2929 * @str should have at least CPUID_MODEL_ID_SZ bytes 2930 * 2931 * The function does NOT add a null terminator to the string 2932 * automatically. 2933 */ 2934 static int cpu_x86_fill_model_id(char *str) 2935 { 2936 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 2937 int i; 2938 2939 for (i = 0; i < 3; i++) { 2940 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 2941 memcpy(str + i * 16 + 0, &eax, 4); 2942 memcpy(str + i * 16 + 4, &ebx, 4); 2943 memcpy(str + i * 16 + 8, &ecx, 4); 2944 memcpy(str + i * 16 + 12, &edx, 4); 2945 } 2946 return 0; 2947 } 2948 2949 static Property max_x86_cpu_properties[] = { 2950 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 2951 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 2952 DEFINE_PROP_END_OF_LIST() 2953 }; 2954 2955 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 2956 { 2957 DeviceClass *dc = DEVICE_CLASS(oc); 2958 X86CPUClass *xcc = X86_CPU_CLASS(oc); 2959 2960 xcc->ordering = 9; 2961 2962 xcc->model_description = 2963 "Enables all features supported by the accelerator in the current host"; 2964 2965 dc->props = max_x86_cpu_properties; 2966 } 2967 2968 static void max_x86_cpu_initfn(Object *obj) 2969 { 2970 X86CPU *cpu = X86_CPU(obj); 2971 CPUX86State *env = &cpu->env; 2972 KVMState *s = kvm_state; 2973 2974 /* We can't fill the features array here because we don't know yet if 2975 * "migratable" is true or false. 2976 */ 2977 cpu->max_features = true; 2978 2979 if (accel_uses_host_cpuid()) { 2980 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 2981 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 2982 int family, model, stepping; 2983 2984 host_vendor_fms(vendor, &family, &model, &stepping); 2985 cpu_x86_fill_model_id(model_id); 2986 2987 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort); 2988 object_property_set_int(OBJECT(cpu), family, "family", &error_abort); 2989 object_property_set_int(OBJECT(cpu), model, "model", &error_abort); 2990 object_property_set_int(OBJECT(cpu), stepping, "stepping", 2991 &error_abort); 2992 object_property_set_str(OBJECT(cpu), model_id, "model-id", 2993 &error_abort); 2994 2995 if (kvm_enabled()) { 2996 env->cpuid_min_level = 2997 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 2998 env->cpuid_min_xlevel = 2999 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 3000 env->cpuid_min_xlevel2 = 3001 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 3002 } else { 3003 env->cpuid_min_level = 3004 hvf_get_supported_cpuid(0x0, 0, R_EAX); 3005 env->cpuid_min_xlevel = 3006 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 3007 env->cpuid_min_xlevel2 = 3008 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 3009 } 3010 3011 if (lmce_supported()) { 3012 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 3013 } 3014 } else { 3015 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 3016 "vendor", &error_abort); 3017 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 3018 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 3019 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 3020 object_property_set_str(OBJECT(cpu), 3021 "QEMU TCG CPU version " QEMU_HW_VERSION, 3022 "model-id", &error_abort); 3023 } 3024 3025 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 3026 } 3027 3028 static const TypeInfo max_x86_cpu_type_info = { 3029 .name = X86_CPU_TYPE_NAME("max"), 3030 .parent = TYPE_X86_CPU, 3031 .instance_init = max_x86_cpu_initfn, 3032 .class_init = max_x86_cpu_class_init, 3033 }; 3034 3035 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 3036 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 3037 { 3038 X86CPUClass *xcc = X86_CPU_CLASS(oc); 3039 3040 xcc->host_cpuid_required = true; 3041 xcc->ordering = 8; 3042 3043 #if defined(CONFIG_KVM) 3044 xcc->model_description = 3045 "KVM processor with all supported host features "; 3046 #elif defined(CONFIG_HVF) 3047 xcc->model_description = 3048 "HVF processor with all supported host features "; 3049 #endif 3050 } 3051 3052 static const TypeInfo host_x86_cpu_type_info = { 3053 .name = X86_CPU_TYPE_NAME("host"), 3054 .parent = X86_CPU_TYPE_NAME("max"), 3055 .class_init = host_x86_cpu_class_init, 3056 }; 3057 3058 #endif 3059 3060 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 3061 { 3062 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 3063 3064 switch (f->type) { 3065 case CPUID_FEATURE_WORD: 3066 { 3067 const char *reg = get_register_name_32(f->cpuid.reg); 3068 assert(reg); 3069 return g_strdup_printf("CPUID.%02XH:%s", 3070 f->cpuid.eax, reg); 3071 } 3072 case MSR_FEATURE_WORD: 3073 return g_strdup_printf("MSR(%02XH)", 3074 f->msr.index); 3075 } 3076 3077 return NULL; 3078 } 3079 3080 static void report_unavailable_features(FeatureWord w, uint32_t mask) 3081 { 3082 FeatureWordInfo *f = &feature_word_info[w]; 3083 int i; 3084 char *feat_word_str; 3085 3086 for (i = 0; i < 32; ++i) { 3087 if ((1UL << i) & mask) { 3088 feat_word_str = feature_word_description(f, i); 3089 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]", 3090 accel_uses_host_cpuid() ? "host" : "TCG", 3091 feat_word_str, 3092 f->feat_names[i] ? "." : "", 3093 f->feat_names[i] ? f->feat_names[i] : "", i); 3094 g_free(feat_word_str); 3095 } 3096 } 3097 } 3098 3099 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 3100 const char *name, void *opaque, 3101 Error **errp) 3102 { 3103 X86CPU *cpu = X86_CPU(obj); 3104 CPUX86State *env = &cpu->env; 3105 int64_t value; 3106 3107 value = (env->cpuid_version >> 8) & 0xf; 3108 if (value == 0xf) { 3109 value += (env->cpuid_version >> 20) & 0xff; 3110 } 3111 visit_type_int(v, name, &value, errp); 3112 } 3113 3114 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 3115 const char *name, void *opaque, 3116 Error **errp) 3117 { 3118 X86CPU *cpu = X86_CPU(obj); 3119 CPUX86State *env = &cpu->env; 3120 const int64_t min = 0; 3121 const int64_t max = 0xff + 0xf; 3122 Error *local_err = NULL; 3123 int64_t value; 3124 3125 visit_type_int(v, name, &value, &local_err); 3126 if (local_err) { 3127 error_propagate(errp, local_err); 3128 return; 3129 } 3130 if (value < min || value > max) { 3131 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3132 name ? name : "null", value, min, max); 3133 return; 3134 } 3135 3136 env->cpuid_version &= ~0xff00f00; 3137 if (value > 0x0f) { 3138 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 3139 } else { 3140 env->cpuid_version |= value << 8; 3141 } 3142 } 3143 3144 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 3145 const char *name, void *opaque, 3146 Error **errp) 3147 { 3148 X86CPU *cpu = X86_CPU(obj); 3149 CPUX86State *env = &cpu->env; 3150 int64_t value; 3151 3152 value = (env->cpuid_version >> 4) & 0xf; 3153 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 3154 visit_type_int(v, name, &value, errp); 3155 } 3156 3157 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 3158 const char *name, void *opaque, 3159 Error **errp) 3160 { 3161 X86CPU *cpu = X86_CPU(obj); 3162 CPUX86State *env = &cpu->env; 3163 const int64_t min = 0; 3164 const int64_t max = 0xff; 3165 Error *local_err = NULL; 3166 int64_t value; 3167 3168 visit_type_int(v, name, &value, &local_err); 3169 if (local_err) { 3170 error_propagate(errp, local_err); 3171 return; 3172 } 3173 if (value < min || value > max) { 3174 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3175 name ? name : "null", value, min, max); 3176 return; 3177 } 3178 3179 env->cpuid_version &= ~0xf00f0; 3180 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 3181 } 3182 3183 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 3184 const char *name, void *opaque, 3185 Error **errp) 3186 { 3187 X86CPU *cpu = X86_CPU(obj); 3188 CPUX86State *env = &cpu->env; 3189 int64_t value; 3190 3191 value = env->cpuid_version & 0xf; 3192 visit_type_int(v, name, &value, errp); 3193 } 3194 3195 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 3196 const char *name, void *opaque, 3197 Error **errp) 3198 { 3199 X86CPU *cpu = X86_CPU(obj); 3200 CPUX86State *env = &cpu->env; 3201 const int64_t min = 0; 3202 const int64_t max = 0xf; 3203 Error *local_err = NULL; 3204 int64_t value; 3205 3206 visit_type_int(v, name, &value, &local_err); 3207 if (local_err) { 3208 error_propagate(errp, local_err); 3209 return; 3210 } 3211 if (value < min || value > max) { 3212 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3213 name ? name : "null", value, min, max); 3214 return; 3215 } 3216 3217 env->cpuid_version &= ~0xf; 3218 env->cpuid_version |= value & 0xf; 3219 } 3220 3221 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 3222 { 3223 X86CPU *cpu = X86_CPU(obj); 3224 CPUX86State *env = &cpu->env; 3225 char *value; 3226 3227 value = g_malloc(CPUID_VENDOR_SZ + 1); 3228 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 3229 env->cpuid_vendor3); 3230 return value; 3231 } 3232 3233 static void x86_cpuid_set_vendor(Object *obj, const char *value, 3234 Error **errp) 3235 { 3236 X86CPU *cpu = X86_CPU(obj); 3237 CPUX86State *env = &cpu->env; 3238 int i; 3239 3240 if (strlen(value) != CPUID_VENDOR_SZ) { 3241 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 3242 return; 3243 } 3244 3245 env->cpuid_vendor1 = 0; 3246 env->cpuid_vendor2 = 0; 3247 env->cpuid_vendor3 = 0; 3248 for (i = 0; i < 4; i++) { 3249 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 3250 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 3251 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 3252 } 3253 } 3254 3255 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 3256 { 3257 X86CPU *cpu = X86_CPU(obj); 3258 CPUX86State *env = &cpu->env; 3259 char *value; 3260 int i; 3261 3262 value = g_malloc(48 + 1); 3263 for (i = 0; i < 48; i++) { 3264 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 3265 } 3266 value[48] = '\0'; 3267 return value; 3268 } 3269 3270 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 3271 Error **errp) 3272 { 3273 X86CPU *cpu = X86_CPU(obj); 3274 CPUX86State *env = &cpu->env; 3275 int c, len, i; 3276 3277 if (model_id == NULL) { 3278 model_id = ""; 3279 } 3280 len = strlen(model_id); 3281 memset(env->cpuid_model, 0, 48); 3282 for (i = 0; i < 48; i++) { 3283 if (i >= len) { 3284 c = '\0'; 3285 } else { 3286 c = (uint8_t)model_id[i]; 3287 } 3288 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 3289 } 3290 } 3291 3292 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 3293 void *opaque, Error **errp) 3294 { 3295 X86CPU *cpu = X86_CPU(obj); 3296 int64_t value; 3297 3298 value = cpu->env.tsc_khz * 1000; 3299 visit_type_int(v, name, &value, errp); 3300 } 3301 3302 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 3303 void *opaque, Error **errp) 3304 { 3305 X86CPU *cpu = X86_CPU(obj); 3306 const int64_t min = 0; 3307 const int64_t max = INT64_MAX; 3308 Error *local_err = NULL; 3309 int64_t value; 3310 3311 visit_type_int(v, name, &value, &local_err); 3312 if (local_err) { 3313 error_propagate(errp, local_err); 3314 return; 3315 } 3316 if (value < min || value > max) { 3317 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3318 name ? name : "null", value, min, max); 3319 return; 3320 } 3321 3322 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 3323 } 3324 3325 /* Generic getter for "feature-words" and "filtered-features" properties */ 3326 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 3327 const char *name, void *opaque, 3328 Error **errp) 3329 { 3330 uint32_t *array = (uint32_t *)opaque; 3331 FeatureWord w; 3332 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 3333 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 3334 X86CPUFeatureWordInfoList *list = NULL; 3335 3336 for (w = 0; w < FEATURE_WORDS; w++) { 3337 FeatureWordInfo *wi = &feature_word_info[w]; 3338 /* 3339 * We didn't have MSR features when "feature-words" was 3340 * introduced. Therefore skipped other type entries. 3341 */ 3342 if (wi->type != CPUID_FEATURE_WORD) { 3343 continue; 3344 } 3345 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 3346 qwi->cpuid_input_eax = wi->cpuid.eax; 3347 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 3348 qwi->cpuid_input_ecx = wi->cpuid.ecx; 3349 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 3350 qwi->features = array[w]; 3351 3352 /* List will be in reverse order, but order shouldn't matter */ 3353 list_entries[w].next = list; 3354 list_entries[w].value = &word_infos[w]; 3355 list = &list_entries[w]; 3356 } 3357 3358 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 3359 } 3360 3361 /* Convert all '_' in a feature string option name to '-', to make feature 3362 * name conform to QOM property naming rule, which uses '-' instead of '_'. 3363 */ 3364 static inline void feat2prop(char *s) 3365 { 3366 while ((s = strchr(s, '_'))) { 3367 *s = '-'; 3368 } 3369 } 3370 3371 /* Return the feature property name for a feature flag bit */ 3372 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 3373 { 3374 /* XSAVE components are automatically enabled by other features, 3375 * so return the original feature name instead 3376 */ 3377 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 3378 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 3379 3380 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 3381 x86_ext_save_areas[comp].bits) { 3382 w = x86_ext_save_areas[comp].feature; 3383 bitnr = ctz32(x86_ext_save_areas[comp].bits); 3384 } 3385 } 3386 3387 assert(bitnr < 32); 3388 assert(w < FEATURE_WORDS); 3389 return feature_word_info[w].feat_names[bitnr]; 3390 } 3391 3392 /* Compatibily hack to maintain legacy +-feat semantic, 3393 * where +-feat overwrites any feature set by 3394 * feat=on|feat even if the later is parsed after +-feat 3395 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 3396 */ 3397 static GList *plus_features, *minus_features; 3398 3399 static gint compare_string(gconstpointer a, gconstpointer b) 3400 { 3401 return g_strcmp0(a, b); 3402 } 3403 3404 /* Parse "+feature,-feature,feature=foo" CPU feature string 3405 */ 3406 static void x86_cpu_parse_featurestr(const char *typename, char *features, 3407 Error **errp) 3408 { 3409 char *featurestr; /* Single 'key=value" string being parsed */ 3410 static bool cpu_globals_initialized; 3411 bool ambiguous = false; 3412 3413 if (cpu_globals_initialized) { 3414 return; 3415 } 3416 cpu_globals_initialized = true; 3417 3418 if (!features) { 3419 return; 3420 } 3421 3422 for (featurestr = strtok(features, ","); 3423 featurestr; 3424 featurestr = strtok(NULL, ",")) { 3425 const char *name; 3426 const char *val = NULL; 3427 char *eq = NULL; 3428 char num[32]; 3429 GlobalProperty *prop; 3430 3431 /* Compatibility syntax: */ 3432 if (featurestr[0] == '+') { 3433 plus_features = g_list_append(plus_features, 3434 g_strdup(featurestr + 1)); 3435 continue; 3436 } else if (featurestr[0] == '-') { 3437 minus_features = g_list_append(minus_features, 3438 g_strdup(featurestr + 1)); 3439 continue; 3440 } 3441 3442 eq = strchr(featurestr, '='); 3443 if (eq) { 3444 *eq++ = 0; 3445 val = eq; 3446 } else { 3447 val = "on"; 3448 } 3449 3450 feat2prop(featurestr); 3451 name = featurestr; 3452 3453 if (g_list_find_custom(plus_features, name, compare_string)) { 3454 warn_report("Ambiguous CPU model string. " 3455 "Don't mix both \"+%s\" and \"%s=%s\"", 3456 name, name, val); 3457 ambiguous = true; 3458 } 3459 if (g_list_find_custom(minus_features, name, compare_string)) { 3460 warn_report("Ambiguous CPU model string. " 3461 "Don't mix both \"-%s\" and \"%s=%s\"", 3462 name, name, val); 3463 ambiguous = true; 3464 } 3465 3466 /* Special case: */ 3467 if (!strcmp(name, "tsc-freq")) { 3468 int ret; 3469 uint64_t tsc_freq; 3470 3471 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 3472 if (ret < 0 || tsc_freq > INT64_MAX) { 3473 error_setg(errp, "bad numerical value %s", val); 3474 return; 3475 } 3476 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 3477 val = num; 3478 name = "tsc-frequency"; 3479 } 3480 3481 prop = g_new0(typeof(*prop), 1); 3482 prop->driver = typename; 3483 prop->property = g_strdup(name); 3484 prop->value = g_strdup(val); 3485 qdev_prop_register_global(prop); 3486 } 3487 3488 if (ambiguous) { 3489 warn_report("Compatibility of ambiguous CPU model " 3490 "strings won't be kept on future QEMU versions"); 3491 } 3492 } 3493 3494 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 3495 static int x86_cpu_filter_features(X86CPU *cpu); 3496 3497 /* Build a list with the name of all features on a feature word array */ 3498 static void x86_cpu_list_feature_names(FeatureWordArray features, 3499 strList **feat_names) 3500 { 3501 FeatureWord w; 3502 strList **next = feat_names; 3503 3504 for (w = 0; w < FEATURE_WORDS; w++) { 3505 uint32_t filtered = features[w]; 3506 int i; 3507 for (i = 0; i < 32; i++) { 3508 if (filtered & (1UL << i)) { 3509 strList *new = g_new0(strList, 1); 3510 new->value = g_strdup(x86_cpu_feature_name(w, i)); 3511 *next = new; 3512 next = &new->next; 3513 } 3514 } 3515 } 3516 } 3517 3518 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 3519 const char *name, void *opaque, 3520 Error **errp) 3521 { 3522 X86CPU *xc = X86_CPU(obj); 3523 strList *result = NULL; 3524 3525 x86_cpu_list_feature_names(xc->filtered_features, &result); 3526 visit_type_strList(v, "unavailable-features", &result, errp); 3527 } 3528 3529 /* Check for missing features that may prevent the CPU class from 3530 * running using the current machine and accelerator. 3531 */ 3532 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 3533 strList **missing_feats) 3534 { 3535 X86CPU *xc; 3536 Error *err = NULL; 3537 strList **next = missing_feats; 3538 3539 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 3540 strList *new = g_new0(strList, 1); 3541 new->value = g_strdup("kvm"); 3542 *missing_feats = new; 3543 return; 3544 } 3545 3546 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 3547 3548 x86_cpu_expand_features(xc, &err); 3549 if (err) { 3550 /* Errors at x86_cpu_expand_features should never happen, 3551 * but in case it does, just report the model as not 3552 * runnable at all using the "type" property. 3553 */ 3554 strList *new = g_new0(strList, 1); 3555 new->value = g_strdup("type"); 3556 *next = new; 3557 next = &new->next; 3558 } 3559 3560 x86_cpu_filter_features(xc); 3561 3562 x86_cpu_list_feature_names(xc->filtered_features, next); 3563 3564 object_unref(OBJECT(xc)); 3565 } 3566 3567 /* Print all cpuid feature names in featureset 3568 */ 3569 static void listflags(GList *features) 3570 { 3571 size_t len = 0; 3572 GList *tmp; 3573 3574 for (tmp = features; tmp; tmp = tmp->next) { 3575 const char *name = tmp->data; 3576 if ((len + strlen(name) + 1) >= 75) { 3577 qemu_printf("\n"); 3578 len = 0; 3579 } 3580 qemu_printf("%s%s", len == 0 ? " " : " ", name); 3581 len += strlen(name) + 1; 3582 } 3583 qemu_printf("\n"); 3584 } 3585 3586 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 3587 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 3588 { 3589 ObjectClass *class_a = (ObjectClass *)a; 3590 ObjectClass *class_b = (ObjectClass *)b; 3591 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 3592 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 3593 char *name_a, *name_b; 3594 int ret; 3595 3596 if (cc_a->ordering != cc_b->ordering) { 3597 ret = cc_a->ordering - cc_b->ordering; 3598 } else { 3599 name_a = x86_cpu_class_get_model_name(cc_a); 3600 name_b = x86_cpu_class_get_model_name(cc_b); 3601 ret = strcmp(name_a, name_b); 3602 g_free(name_a); 3603 g_free(name_b); 3604 } 3605 return ret; 3606 } 3607 3608 static GSList *get_sorted_cpu_model_list(void) 3609 { 3610 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 3611 list = g_slist_sort(list, x86_cpu_list_compare); 3612 return list; 3613 } 3614 3615 static char *x86_cpu_class_get_model_id(X86CPUClass *xc) 3616 { 3617 Object *obj = object_new(object_class_get_name(OBJECT_CLASS(xc))); 3618 char *r = object_property_get_str(obj, "model-id", &error_abort); 3619 object_unref(obj); 3620 return r; 3621 } 3622 3623 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) 3624 { 3625 X86CPUVersion version; 3626 3627 if (!cc->model || !cc->model->is_alias) { 3628 return NULL; 3629 } 3630 version = x86_cpu_model_resolve_version(cc->model); 3631 if (version <= 0) { 3632 return NULL; 3633 } 3634 return x86_cpu_versioned_model_name(cc->model->cpudef, version); 3635 } 3636 3637 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 3638 { 3639 ObjectClass *oc = data; 3640 X86CPUClass *cc = X86_CPU_CLASS(oc); 3641 char *name = x86_cpu_class_get_model_name(cc); 3642 char *desc = g_strdup(cc->model_description); 3643 char *alias_of = x86_cpu_class_get_alias_of(cc); 3644 3645 if (!desc && alias_of) { 3646 if (cc->model && cc->model->version == CPU_VERSION_AUTO) { 3647 desc = g_strdup("(alias configured by machine type)"); 3648 } else { 3649 desc = g_strdup_printf("(alias of %s)", alias_of); 3650 } 3651 } 3652 if (!desc) { 3653 desc = x86_cpu_class_get_model_id(cc); 3654 } 3655 3656 qemu_printf("x86 %-20s %-48s\n", name, desc); 3657 g_free(name); 3658 g_free(desc); 3659 g_free(alias_of); 3660 } 3661 3662 /* list available CPU models and flags */ 3663 void x86_cpu_list(void) 3664 { 3665 int i, j; 3666 GSList *list; 3667 GList *names = NULL; 3668 3669 qemu_printf("Available CPUs:\n"); 3670 list = get_sorted_cpu_model_list(); 3671 g_slist_foreach(list, x86_cpu_list_entry, NULL); 3672 g_slist_free(list); 3673 3674 names = NULL; 3675 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 3676 FeatureWordInfo *fw = &feature_word_info[i]; 3677 for (j = 0; j < 32; j++) { 3678 if (fw->feat_names[j]) { 3679 names = g_list_append(names, (gpointer)fw->feat_names[j]); 3680 } 3681 } 3682 } 3683 3684 names = g_list_sort(names, (GCompareFunc)strcmp); 3685 3686 qemu_printf("\nRecognized CPUID flags:\n"); 3687 listflags(names); 3688 qemu_printf("\n"); 3689 g_list_free(names); 3690 } 3691 3692 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 3693 { 3694 ObjectClass *oc = data; 3695 X86CPUClass *cc = X86_CPU_CLASS(oc); 3696 CpuDefinitionInfoList **cpu_list = user_data; 3697 CpuDefinitionInfoList *entry; 3698 CpuDefinitionInfo *info; 3699 3700 info = g_malloc0(sizeof(*info)); 3701 info->name = x86_cpu_class_get_model_name(cc); 3702 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 3703 info->has_unavailable_features = true; 3704 info->q_typename = g_strdup(object_class_get_name(oc)); 3705 info->migration_safe = cc->migration_safe; 3706 info->has_migration_safe = true; 3707 info->q_static = cc->static_model; 3708 /* 3709 * Old machine types won't report aliases, so that alias translation 3710 * doesn't break compatibility with previous QEMU versions. 3711 */ 3712 if (default_cpu_version != CPU_VERSION_LEGACY) { 3713 info->alias_of = x86_cpu_class_get_alias_of(cc); 3714 info->has_alias_of = !!info->alias_of; 3715 } 3716 3717 entry = g_malloc0(sizeof(*entry)); 3718 entry->value = info; 3719 entry->next = *cpu_list; 3720 *cpu_list = entry; 3721 } 3722 3723 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 3724 { 3725 CpuDefinitionInfoList *cpu_list = NULL; 3726 GSList *list = get_sorted_cpu_model_list(); 3727 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 3728 g_slist_free(list); 3729 return cpu_list; 3730 } 3731 3732 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 3733 bool migratable_only) 3734 { 3735 FeatureWordInfo *wi = &feature_word_info[w]; 3736 uint32_t r = 0; 3737 3738 if (kvm_enabled()) { 3739 switch (wi->type) { 3740 case CPUID_FEATURE_WORD: 3741 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 3742 wi->cpuid.ecx, 3743 wi->cpuid.reg); 3744 break; 3745 case MSR_FEATURE_WORD: 3746 r = kvm_arch_get_supported_msr_feature(kvm_state, 3747 wi->msr.index); 3748 break; 3749 } 3750 } else if (hvf_enabled()) { 3751 if (wi->type != CPUID_FEATURE_WORD) { 3752 return 0; 3753 } 3754 r = hvf_get_supported_cpuid(wi->cpuid.eax, 3755 wi->cpuid.ecx, 3756 wi->cpuid.reg); 3757 } else if (tcg_enabled()) { 3758 r = wi->tcg_features; 3759 } else { 3760 return ~0; 3761 } 3762 if (migratable_only) { 3763 r &= x86_cpu_get_migratable_flags(w); 3764 } 3765 return r; 3766 } 3767 3768 static void x86_cpu_report_filtered_features(X86CPU *cpu) 3769 { 3770 FeatureWord w; 3771 3772 for (w = 0; w < FEATURE_WORDS; w++) { 3773 report_unavailable_features(w, cpu->filtered_features[w]); 3774 } 3775 } 3776 3777 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 3778 { 3779 PropValue *pv; 3780 for (pv = props; pv->prop; pv++) { 3781 if (!pv->value) { 3782 continue; 3783 } 3784 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 3785 &error_abort); 3786 } 3787 } 3788 3789 /* Apply properties for the CPU model version specified in model */ 3790 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) 3791 { 3792 const X86CPUVersionDefinition *vdef; 3793 X86CPUVersion version = x86_cpu_model_resolve_version(model); 3794 3795 if (version == CPU_VERSION_LEGACY) { 3796 return; 3797 } 3798 3799 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { 3800 PropValue *p; 3801 3802 for (p = vdef->props; p && p->prop; p++) { 3803 object_property_parse(OBJECT(cpu), p->value, p->prop, 3804 &error_abort); 3805 } 3806 3807 if (vdef->version == version) { 3808 break; 3809 } 3810 } 3811 3812 /* 3813 * If we reached the end of the list, version number was invalid 3814 */ 3815 assert(vdef->version == version); 3816 } 3817 3818 /* Load data from X86CPUDefinition into a X86CPU object 3819 */ 3820 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp) 3821 { 3822 X86CPUDefinition *def = model->cpudef; 3823 CPUX86State *env = &cpu->env; 3824 const char *vendor; 3825 char host_vendor[CPUID_VENDOR_SZ + 1]; 3826 FeatureWord w; 3827 3828 /*NOTE: any property set by this function should be returned by 3829 * x86_cpu_static_props(), so static expansion of 3830 * query-cpu-model-expansion is always complete. 3831 */ 3832 3833 /* CPU models only set _minimum_ values for level/xlevel: */ 3834 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp); 3835 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp); 3836 3837 object_property_set_int(OBJECT(cpu), def->family, "family", errp); 3838 object_property_set_int(OBJECT(cpu), def->model, "model", errp); 3839 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp); 3840 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); 3841 for (w = 0; w < FEATURE_WORDS; w++) { 3842 env->features[w] = def->features[w]; 3843 } 3844 3845 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 3846 cpu->legacy_cache = !def->cache_info; 3847 3848 /* Special cases not set in the X86CPUDefinition structs: */ 3849 /* TODO: in-kernel irqchip for hvf */ 3850 if (kvm_enabled()) { 3851 if (!kvm_irqchip_in_kernel()) { 3852 x86_cpu_change_kvm_default("x2apic", "off"); 3853 } 3854 3855 x86_cpu_apply_props(cpu, kvm_default_props); 3856 } else if (tcg_enabled()) { 3857 x86_cpu_apply_props(cpu, tcg_default_props); 3858 } 3859 3860 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 3861 3862 /* sysenter isn't supported in compatibility mode on AMD, 3863 * syscall isn't supported in compatibility mode on Intel. 3864 * Normally we advertise the actual CPU vendor, but you can 3865 * override this using the 'vendor' property if you want to use 3866 * KVM's sysenter/syscall emulation in compatibility mode and 3867 * when doing cross vendor migration 3868 */ 3869 vendor = def->vendor; 3870 if (accel_uses_host_cpuid()) { 3871 uint32_t ebx = 0, ecx = 0, edx = 0; 3872 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 3873 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 3874 vendor = host_vendor; 3875 } 3876 3877 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp); 3878 3879 x86_cpu_apply_version_props(cpu, model); 3880 } 3881 3882 #ifndef CONFIG_USER_ONLY 3883 /* Return a QDict containing keys for all properties that can be included 3884 * in static expansion of CPU models. All properties set by x86_cpu_load_model() 3885 * must be included in the dictionary. 3886 */ 3887 static QDict *x86_cpu_static_props(void) 3888 { 3889 FeatureWord w; 3890 int i; 3891 static const char *props[] = { 3892 "min-level", 3893 "min-xlevel", 3894 "family", 3895 "model", 3896 "stepping", 3897 "model-id", 3898 "vendor", 3899 "lmce", 3900 NULL, 3901 }; 3902 static QDict *d; 3903 3904 if (d) { 3905 return d; 3906 } 3907 3908 d = qdict_new(); 3909 for (i = 0; props[i]; i++) { 3910 qdict_put_null(d, props[i]); 3911 } 3912 3913 for (w = 0; w < FEATURE_WORDS; w++) { 3914 FeatureWordInfo *fi = &feature_word_info[w]; 3915 int bit; 3916 for (bit = 0; bit < 32; bit++) { 3917 if (!fi->feat_names[bit]) { 3918 continue; 3919 } 3920 qdict_put_null(d, fi->feat_names[bit]); 3921 } 3922 } 3923 3924 return d; 3925 } 3926 3927 /* Add an entry to @props dict, with the value for property. */ 3928 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 3929 { 3930 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 3931 &error_abort); 3932 3933 qdict_put_obj(props, prop, value); 3934 } 3935 3936 /* Convert CPU model data from X86CPU object to a property dictionary 3937 * that can recreate exactly the same CPU model. 3938 */ 3939 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 3940 { 3941 QDict *sprops = x86_cpu_static_props(); 3942 const QDictEntry *e; 3943 3944 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 3945 const char *prop = qdict_entry_key(e); 3946 x86_cpu_expand_prop(cpu, props, prop); 3947 } 3948 } 3949 3950 /* Convert CPU model data from X86CPU object to a property dictionary 3951 * that can recreate exactly the same CPU model, including every 3952 * writeable QOM property. 3953 */ 3954 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 3955 { 3956 ObjectPropertyIterator iter; 3957 ObjectProperty *prop; 3958 3959 object_property_iter_init(&iter, OBJECT(cpu)); 3960 while ((prop = object_property_iter_next(&iter))) { 3961 /* skip read-only or write-only properties */ 3962 if (!prop->get || !prop->set) { 3963 continue; 3964 } 3965 3966 /* "hotplugged" is the only property that is configurable 3967 * on the command-line but will be set differently on CPUs 3968 * created using "-cpu ... -smp ..." and by CPUs created 3969 * on the fly by x86_cpu_from_model() for querying. Skip it. 3970 */ 3971 if (!strcmp(prop->name, "hotplugged")) { 3972 continue; 3973 } 3974 x86_cpu_expand_prop(cpu, props, prop->name); 3975 } 3976 } 3977 3978 static void object_apply_props(Object *obj, QDict *props, Error **errp) 3979 { 3980 const QDictEntry *prop; 3981 Error *err = NULL; 3982 3983 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 3984 object_property_set_qobject(obj, qdict_entry_value(prop), 3985 qdict_entry_key(prop), &err); 3986 if (err) { 3987 break; 3988 } 3989 } 3990 3991 error_propagate(errp, err); 3992 } 3993 3994 /* Create X86CPU object according to model+props specification */ 3995 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 3996 { 3997 X86CPU *xc = NULL; 3998 X86CPUClass *xcc; 3999 Error *err = NULL; 4000 4001 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 4002 if (xcc == NULL) { 4003 error_setg(&err, "CPU model '%s' not found", model); 4004 goto out; 4005 } 4006 4007 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 4008 if (props) { 4009 object_apply_props(OBJECT(xc), props, &err); 4010 if (err) { 4011 goto out; 4012 } 4013 } 4014 4015 x86_cpu_expand_features(xc, &err); 4016 if (err) { 4017 goto out; 4018 } 4019 4020 out: 4021 if (err) { 4022 error_propagate(errp, err); 4023 object_unref(OBJECT(xc)); 4024 xc = NULL; 4025 } 4026 return xc; 4027 } 4028 4029 CpuModelExpansionInfo * 4030 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 4031 CpuModelInfo *model, 4032 Error **errp) 4033 { 4034 X86CPU *xc = NULL; 4035 Error *err = NULL; 4036 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 4037 QDict *props = NULL; 4038 const char *base_name; 4039 4040 xc = x86_cpu_from_model(model->name, 4041 model->has_props ? 4042 qobject_to(QDict, model->props) : 4043 NULL, &err); 4044 if (err) { 4045 goto out; 4046 } 4047 4048 props = qdict_new(); 4049 ret->model = g_new0(CpuModelInfo, 1); 4050 ret->model->props = QOBJECT(props); 4051 ret->model->has_props = true; 4052 4053 switch (type) { 4054 case CPU_MODEL_EXPANSION_TYPE_STATIC: 4055 /* Static expansion will be based on "base" only */ 4056 base_name = "base"; 4057 x86_cpu_to_dict(xc, props); 4058 break; 4059 case CPU_MODEL_EXPANSION_TYPE_FULL: 4060 /* As we don't return every single property, full expansion needs 4061 * to keep the original model name+props, and add extra 4062 * properties on top of that. 4063 */ 4064 base_name = model->name; 4065 x86_cpu_to_dict_full(xc, props); 4066 break; 4067 default: 4068 error_setg(&err, "Unsupported expansion type"); 4069 goto out; 4070 } 4071 4072 x86_cpu_to_dict(xc, props); 4073 4074 ret->model->name = g_strdup(base_name); 4075 4076 out: 4077 object_unref(OBJECT(xc)); 4078 if (err) { 4079 error_propagate(errp, err); 4080 qapi_free_CpuModelExpansionInfo(ret); 4081 ret = NULL; 4082 } 4083 return ret; 4084 } 4085 #endif /* !CONFIG_USER_ONLY */ 4086 4087 static gchar *x86_gdb_arch_name(CPUState *cs) 4088 { 4089 #ifdef TARGET_X86_64 4090 return g_strdup("i386:x86-64"); 4091 #else 4092 return g_strdup("i386"); 4093 #endif 4094 } 4095 4096 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 4097 { 4098 X86CPUModel *model = data; 4099 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4100 4101 xcc->model = model; 4102 xcc->migration_safe = true; 4103 } 4104 4105 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) 4106 { 4107 char *typename = x86_cpu_type_name(name); 4108 TypeInfo ti = { 4109 .name = typename, 4110 .parent = TYPE_X86_CPU, 4111 .class_init = x86_cpu_cpudef_class_init, 4112 .class_data = model, 4113 }; 4114 4115 type_register(&ti); 4116 g_free(typename); 4117 } 4118 4119 static void x86_register_cpudef_types(X86CPUDefinition *def) 4120 { 4121 X86CPUModel *m; 4122 const X86CPUVersionDefinition *vdef; 4123 char *name; 4124 4125 /* AMD aliases are handled at runtime based on CPUID vendor, so 4126 * they shouldn't be set on the CPU model table. 4127 */ 4128 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 4129 /* catch mistakes instead of silently truncating model_id when too long */ 4130 assert(def->model_id && strlen(def->model_id) <= 48); 4131 4132 /* Unversioned model: */ 4133 m = g_new0(X86CPUModel, 1); 4134 m->cpudef = def; 4135 m->version = CPU_VERSION_AUTO; 4136 m->is_alias = true; 4137 x86_register_cpu_model_type(def->name, m); 4138 4139 /* Versioned models: */ 4140 4141 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { 4142 X86CPUModel *m = g_new0(X86CPUModel, 1); 4143 m->cpudef = def; 4144 m->version = vdef->version; 4145 name = x86_cpu_versioned_model_name(def, vdef->version); 4146 x86_register_cpu_model_type(name, m); 4147 g_free(name); 4148 4149 if (vdef->alias) { 4150 X86CPUModel *am = g_new0(X86CPUModel, 1); 4151 am->cpudef = def; 4152 am->version = vdef->version; 4153 am->is_alias = true; 4154 x86_register_cpu_model_type(vdef->alias, am); 4155 } 4156 } 4157 4158 } 4159 4160 #if !defined(CONFIG_USER_ONLY) 4161 4162 void cpu_clear_apic_feature(CPUX86State *env) 4163 { 4164 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 4165 } 4166 4167 #endif /* !CONFIG_USER_ONLY */ 4168 4169 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 4170 uint32_t *eax, uint32_t *ebx, 4171 uint32_t *ecx, uint32_t *edx) 4172 { 4173 X86CPU *cpu = env_archcpu(env); 4174 CPUState *cs = env_cpu(env); 4175 uint32_t die_offset; 4176 uint32_t limit; 4177 uint32_t signature[3]; 4178 4179 /* Calculate & apply limits for different index ranges */ 4180 if (index >= 0xC0000000) { 4181 limit = env->cpuid_xlevel2; 4182 } else if (index >= 0x80000000) { 4183 limit = env->cpuid_xlevel; 4184 } else if (index >= 0x40000000) { 4185 limit = 0x40000001; 4186 } else { 4187 limit = env->cpuid_level; 4188 } 4189 4190 if (index > limit) { 4191 /* Intel documentation states that invalid EAX input will 4192 * return the same information as EAX=cpuid_level 4193 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 4194 */ 4195 index = env->cpuid_level; 4196 } 4197 4198 switch(index) { 4199 case 0: 4200 *eax = env->cpuid_level; 4201 *ebx = env->cpuid_vendor1; 4202 *edx = env->cpuid_vendor2; 4203 *ecx = env->cpuid_vendor3; 4204 break; 4205 case 1: 4206 *eax = env->cpuid_version; 4207 *ebx = (cpu->apic_id << 24) | 4208 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 4209 *ecx = env->features[FEAT_1_ECX]; 4210 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 4211 *ecx |= CPUID_EXT_OSXSAVE; 4212 } 4213 *edx = env->features[FEAT_1_EDX]; 4214 if (cs->nr_cores * cs->nr_threads > 1) { 4215 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 4216 *edx |= CPUID_HT; 4217 } 4218 break; 4219 case 2: 4220 /* cache info: needed for Pentium Pro compatibility */ 4221 if (cpu->cache_info_passthrough) { 4222 host_cpuid(index, 0, eax, ebx, ecx, edx); 4223 break; 4224 } 4225 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 4226 *ebx = 0; 4227 if (!cpu->enable_l3_cache) { 4228 *ecx = 0; 4229 } else { 4230 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 4231 } 4232 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 4233 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 4234 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 4235 break; 4236 case 4: 4237 /* cache info: needed for Core compatibility */ 4238 if (cpu->cache_info_passthrough) { 4239 host_cpuid(index, count, eax, ebx, ecx, edx); 4240 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 4241 *eax &= ~0xFC000000; 4242 if ((*eax & 31) && cs->nr_cores > 1) { 4243 *eax |= (cs->nr_cores - 1) << 26; 4244 } 4245 } else { 4246 *eax = 0; 4247 switch (count) { 4248 case 0: /* L1 dcache info */ 4249 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 4250 1, cs->nr_cores, 4251 eax, ebx, ecx, edx); 4252 break; 4253 case 1: /* L1 icache info */ 4254 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 4255 1, cs->nr_cores, 4256 eax, ebx, ecx, edx); 4257 break; 4258 case 2: /* L2 cache info */ 4259 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 4260 cs->nr_threads, cs->nr_cores, 4261 eax, ebx, ecx, edx); 4262 break; 4263 case 3: /* L3 cache info */ 4264 die_offset = apicid_die_offset(env->nr_dies, 4265 cs->nr_cores, cs->nr_threads); 4266 if (cpu->enable_l3_cache) { 4267 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 4268 (1 << die_offset), cs->nr_cores, 4269 eax, ebx, ecx, edx); 4270 break; 4271 } 4272 /* fall through */ 4273 default: /* end of info */ 4274 *eax = *ebx = *ecx = *edx = 0; 4275 break; 4276 } 4277 } 4278 break; 4279 case 5: 4280 /* MONITOR/MWAIT Leaf */ 4281 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 4282 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 4283 *ecx = cpu->mwait.ecx; /* flags */ 4284 *edx = cpu->mwait.edx; /* mwait substates */ 4285 break; 4286 case 6: 4287 /* Thermal and Power Leaf */ 4288 *eax = env->features[FEAT_6_EAX]; 4289 *ebx = 0; 4290 *ecx = 0; 4291 *edx = 0; 4292 break; 4293 case 7: 4294 /* Structured Extended Feature Flags Enumeration Leaf */ 4295 if (count == 0) { 4296 *eax = 0; /* Maximum ECX value for sub-leaves */ 4297 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 4298 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 4299 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 4300 *ecx |= CPUID_7_0_ECX_OSPKE; 4301 } 4302 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 4303 } else { 4304 *eax = 0; 4305 *ebx = 0; 4306 *ecx = 0; 4307 *edx = 0; 4308 } 4309 break; 4310 case 9: 4311 /* Direct Cache Access Information Leaf */ 4312 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 4313 *ebx = 0; 4314 *ecx = 0; 4315 *edx = 0; 4316 break; 4317 case 0xA: 4318 /* Architectural Performance Monitoring Leaf */ 4319 if (kvm_enabled() && cpu->enable_pmu) { 4320 KVMState *s = cs->kvm_state; 4321 4322 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 4323 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 4324 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 4325 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 4326 } else if (hvf_enabled() && cpu->enable_pmu) { 4327 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 4328 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 4329 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 4330 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 4331 } else { 4332 *eax = 0; 4333 *ebx = 0; 4334 *ecx = 0; 4335 *edx = 0; 4336 } 4337 break; 4338 case 0xB: 4339 /* Extended Topology Enumeration Leaf */ 4340 if (!cpu->enable_cpuid_0xb) { 4341 *eax = *ebx = *ecx = *edx = 0; 4342 break; 4343 } 4344 4345 *ecx = count & 0xff; 4346 *edx = cpu->apic_id; 4347 4348 switch (count) { 4349 case 0: 4350 *eax = apicid_core_offset(env->nr_dies, 4351 cs->nr_cores, cs->nr_threads); 4352 *ebx = cs->nr_threads; 4353 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 4354 break; 4355 case 1: 4356 *eax = apicid_pkg_offset(env->nr_dies, 4357 cs->nr_cores, cs->nr_threads); 4358 *ebx = cs->nr_cores * cs->nr_threads; 4359 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 4360 break; 4361 default: 4362 *eax = 0; 4363 *ebx = 0; 4364 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 4365 } 4366 4367 assert(!(*eax & ~0x1f)); 4368 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 4369 break; 4370 case 0x1F: 4371 /* V2 Extended Topology Enumeration Leaf */ 4372 if (env->nr_dies < 2) { 4373 *eax = *ebx = *ecx = *edx = 0; 4374 break; 4375 } 4376 4377 *ecx = count & 0xff; 4378 *edx = cpu->apic_id; 4379 switch (count) { 4380 case 0: 4381 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores, 4382 cs->nr_threads); 4383 *ebx = cs->nr_threads; 4384 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 4385 break; 4386 case 1: 4387 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores, 4388 cs->nr_threads); 4389 *ebx = cs->nr_cores * cs->nr_threads; 4390 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 4391 break; 4392 case 2: 4393 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores, 4394 cs->nr_threads); 4395 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; 4396 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; 4397 break; 4398 default: 4399 *eax = 0; 4400 *ebx = 0; 4401 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 4402 } 4403 assert(!(*eax & ~0x1f)); 4404 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 4405 break; 4406 case 0xD: { 4407 /* Processor Extended State */ 4408 *eax = 0; 4409 *ebx = 0; 4410 *ecx = 0; 4411 *edx = 0; 4412 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 4413 break; 4414 } 4415 4416 if (count == 0) { 4417 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 4418 *eax = env->features[FEAT_XSAVE_COMP_LO]; 4419 *edx = env->features[FEAT_XSAVE_COMP_HI]; 4420 *ebx = xsave_area_size(env->xcr0); 4421 } else if (count == 1) { 4422 *eax = env->features[FEAT_XSAVE]; 4423 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 4424 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 4425 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 4426 *eax = esa->size; 4427 *ebx = esa->offset; 4428 } 4429 } 4430 break; 4431 } 4432 case 0x14: { 4433 /* Intel Processor Trace Enumeration */ 4434 *eax = 0; 4435 *ebx = 0; 4436 *ecx = 0; 4437 *edx = 0; 4438 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 4439 !kvm_enabled()) { 4440 break; 4441 } 4442 4443 if (count == 0) { 4444 *eax = INTEL_PT_MAX_SUBLEAF; 4445 *ebx = INTEL_PT_MINIMAL_EBX; 4446 *ecx = INTEL_PT_MINIMAL_ECX; 4447 } else if (count == 1) { 4448 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 4449 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 4450 } 4451 break; 4452 } 4453 case 0x40000000: 4454 /* 4455 * CPUID code in kvm_arch_init_vcpu() ignores stuff 4456 * set here, but we restrict to TCG none the less. 4457 */ 4458 if (tcg_enabled() && cpu->expose_tcg) { 4459 memcpy(signature, "TCGTCGTCGTCG", 12); 4460 *eax = 0x40000001; 4461 *ebx = signature[0]; 4462 *ecx = signature[1]; 4463 *edx = signature[2]; 4464 } else { 4465 *eax = 0; 4466 *ebx = 0; 4467 *ecx = 0; 4468 *edx = 0; 4469 } 4470 break; 4471 case 0x40000001: 4472 *eax = 0; 4473 *ebx = 0; 4474 *ecx = 0; 4475 *edx = 0; 4476 break; 4477 case 0x80000000: 4478 *eax = env->cpuid_xlevel; 4479 *ebx = env->cpuid_vendor1; 4480 *edx = env->cpuid_vendor2; 4481 *ecx = env->cpuid_vendor3; 4482 break; 4483 case 0x80000001: 4484 *eax = env->cpuid_version; 4485 *ebx = 0; 4486 *ecx = env->features[FEAT_8000_0001_ECX]; 4487 *edx = env->features[FEAT_8000_0001_EDX]; 4488 4489 /* The Linux kernel checks for the CMPLegacy bit and 4490 * discards multiple thread information if it is set. 4491 * So don't set it here for Intel to make Linux guests happy. 4492 */ 4493 if (cs->nr_cores * cs->nr_threads > 1) { 4494 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 4495 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 4496 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 4497 *ecx |= 1 << 1; /* CmpLegacy bit */ 4498 } 4499 } 4500 break; 4501 case 0x80000002: 4502 case 0x80000003: 4503 case 0x80000004: 4504 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 4505 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 4506 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 4507 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 4508 break; 4509 case 0x80000005: 4510 /* cache info (L1 cache) */ 4511 if (cpu->cache_info_passthrough) { 4512 host_cpuid(index, 0, eax, ebx, ecx, edx); 4513 break; 4514 } 4515 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ 4516 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 4517 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ 4518 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 4519 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 4520 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 4521 break; 4522 case 0x80000006: 4523 /* cache info (L2 cache) */ 4524 if (cpu->cache_info_passthrough) { 4525 host_cpuid(index, 0, eax, ebx, ecx, edx); 4526 break; 4527 } 4528 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ 4529 (L2_DTLB_2M_ENTRIES << 16) | \ 4530 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ 4531 (L2_ITLB_2M_ENTRIES); 4532 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ 4533 (L2_DTLB_4K_ENTRIES << 16) | \ 4534 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ 4535 (L2_ITLB_4K_ENTRIES); 4536 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 4537 cpu->enable_l3_cache ? 4538 env->cache_info_amd.l3_cache : NULL, 4539 ecx, edx); 4540 break; 4541 case 0x80000007: 4542 *eax = 0; 4543 *ebx = 0; 4544 *ecx = 0; 4545 *edx = env->features[FEAT_8000_0007_EDX]; 4546 break; 4547 case 0x80000008: 4548 /* virtual & phys address size in low 2 bytes. */ 4549 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 4550 /* 64 bit processor */ 4551 *eax = cpu->phys_bits; /* configurable physical bits */ 4552 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 4553 *eax |= 0x00003900; /* 57 bits virtual */ 4554 } else { 4555 *eax |= 0x00003000; /* 48 bits virtual */ 4556 } 4557 } else { 4558 *eax = cpu->phys_bits; 4559 } 4560 *ebx = env->features[FEAT_8000_0008_EBX]; 4561 *ecx = 0; 4562 *edx = 0; 4563 if (cs->nr_cores * cs->nr_threads > 1) { 4564 *ecx |= (cs->nr_cores * cs->nr_threads) - 1; 4565 } 4566 break; 4567 case 0x8000000A: 4568 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 4569 *eax = 0x00000001; /* SVM Revision */ 4570 *ebx = 0x00000010; /* nr of ASIDs */ 4571 *ecx = 0; 4572 *edx = env->features[FEAT_SVM]; /* optional features */ 4573 } else { 4574 *eax = 0; 4575 *ebx = 0; 4576 *ecx = 0; 4577 *edx = 0; 4578 } 4579 break; 4580 case 0x8000001D: 4581 *eax = 0; 4582 if (cpu->cache_info_passthrough) { 4583 host_cpuid(index, count, eax, ebx, ecx, edx); 4584 break; 4585 } 4586 switch (count) { 4587 case 0: /* L1 dcache info */ 4588 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs, 4589 eax, ebx, ecx, edx); 4590 break; 4591 case 1: /* L1 icache info */ 4592 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs, 4593 eax, ebx, ecx, edx); 4594 break; 4595 case 2: /* L2 cache info */ 4596 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs, 4597 eax, ebx, ecx, edx); 4598 break; 4599 case 3: /* L3 cache info */ 4600 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs, 4601 eax, ebx, ecx, edx); 4602 break; 4603 default: /* end of info */ 4604 *eax = *ebx = *ecx = *edx = 0; 4605 break; 4606 } 4607 break; 4608 case 0x8000001E: 4609 assert(cpu->core_id <= 255); 4610 encode_topo_cpuid8000001e(cs, cpu, 4611 eax, ebx, ecx, edx); 4612 break; 4613 case 0xC0000000: 4614 *eax = env->cpuid_xlevel2; 4615 *ebx = 0; 4616 *ecx = 0; 4617 *edx = 0; 4618 break; 4619 case 0xC0000001: 4620 /* Support for VIA CPU's CPUID instruction */ 4621 *eax = env->cpuid_version; 4622 *ebx = 0; 4623 *ecx = 0; 4624 *edx = env->features[FEAT_C000_0001_EDX]; 4625 break; 4626 case 0xC0000002: 4627 case 0xC0000003: 4628 case 0xC0000004: 4629 /* Reserved for the future, and now filled with zero */ 4630 *eax = 0; 4631 *ebx = 0; 4632 *ecx = 0; 4633 *edx = 0; 4634 break; 4635 case 0x8000001F: 4636 *eax = sev_enabled() ? 0x2 : 0; 4637 *ebx = sev_get_cbit_position(); 4638 *ebx |= sev_get_reduced_phys_bits() << 6; 4639 *ecx = 0; 4640 *edx = 0; 4641 break; 4642 default: 4643 /* reserved values: zero */ 4644 *eax = 0; 4645 *ebx = 0; 4646 *ecx = 0; 4647 *edx = 0; 4648 break; 4649 } 4650 } 4651 4652 /* CPUClass::reset() */ 4653 static void x86_cpu_reset(CPUState *s) 4654 { 4655 X86CPU *cpu = X86_CPU(s); 4656 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 4657 CPUX86State *env = &cpu->env; 4658 target_ulong cr4; 4659 uint64_t xcr0; 4660 int i; 4661 4662 xcc->parent_reset(s); 4663 4664 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 4665 4666 env->old_exception = -1; 4667 4668 /* init to reset state */ 4669 4670 env->hflags2 |= HF2_GIF_MASK; 4671 4672 cpu_x86_update_cr0(env, 0x60000010); 4673 env->a20_mask = ~0x0; 4674 env->smbase = 0x30000; 4675 env->msr_smi_count = 0; 4676 4677 env->idt.limit = 0xffff; 4678 env->gdt.limit = 0xffff; 4679 env->ldt.limit = 0xffff; 4680 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 4681 env->tr.limit = 0xffff; 4682 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 4683 4684 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 4685 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 4686 DESC_R_MASK | DESC_A_MASK); 4687 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 4688 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4689 DESC_A_MASK); 4690 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 4691 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4692 DESC_A_MASK); 4693 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 4694 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4695 DESC_A_MASK); 4696 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 4697 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4698 DESC_A_MASK); 4699 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 4700 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4701 DESC_A_MASK); 4702 4703 env->eip = 0xfff0; 4704 env->regs[R_EDX] = env->cpuid_version; 4705 4706 env->eflags = 0x2; 4707 4708 /* FPU init */ 4709 for (i = 0; i < 8; i++) { 4710 env->fptags[i] = 1; 4711 } 4712 cpu_set_fpuc(env, 0x37f); 4713 4714 env->mxcsr = 0x1f80; 4715 /* All units are in INIT state. */ 4716 env->xstate_bv = 0; 4717 4718 env->pat = 0x0007040600070406ULL; 4719 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 4720 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 4721 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 4722 } 4723 4724 memset(env->dr, 0, sizeof(env->dr)); 4725 env->dr[6] = DR6_FIXED_1; 4726 env->dr[7] = DR7_FIXED_1; 4727 cpu_breakpoint_remove_all(s, BP_CPU); 4728 cpu_watchpoint_remove_all(s, BP_CPU); 4729 4730 cr4 = 0; 4731 xcr0 = XSTATE_FP_MASK; 4732 4733 #ifdef CONFIG_USER_ONLY 4734 /* Enable all the features for user-mode. */ 4735 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 4736 xcr0 |= XSTATE_SSE_MASK; 4737 } 4738 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 4739 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 4740 if (env->features[esa->feature] & esa->bits) { 4741 xcr0 |= 1ull << i; 4742 } 4743 } 4744 4745 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 4746 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 4747 } 4748 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 4749 cr4 |= CR4_FSGSBASE_MASK; 4750 } 4751 #endif 4752 4753 env->xcr0 = xcr0; 4754 cpu_x86_update_cr4(env, cr4); 4755 4756 /* 4757 * SDM 11.11.5 requires: 4758 * - IA32_MTRR_DEF_TYPE MSR.E = 0 4759 * - IA32_MTRR_PHYSMASKn.V = 0 4760 * All other bits are undefined. For simplification, zero it all. 4761 */ 4762 env->mtrr_deftype = 0; 4763 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 4764 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 4765 4766 env->interrupt_injected = -1; 4767 env->exception_nr = -1; 4768 env->exception_pending = 0; 4769 env->exception_injected = 0; 4770 env->exception_has_payload = false; 4771 env->exception_payload = 0; 4772 env->nmi_injected = false; 4773 #if !defined(CONFIG_USER_ONLY) 4774 /* We hard-wire the BSP to the first CPU. */ 4775 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 4776 4777 s->halted = !cpu_is_bsp(cpu); 4778 4779 if (kvm_enabled()) { 4780 kvm_arch_reset_vcpu(cpu); 4781 } 4782 else if (hvf_enabled()) { 4783 hvf_reset_vcpu(s); 4784 } 4785 #endif 4786 } 4787 4788 #ifndef CONFIG_USER_ONLY 4789 bool cpu_is_bsp(X86CPU *cpu) 4790 { 4791 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 4792 } 4793 4794 /* TODO: remove me, when reset over QOM tree is implemented */ 4795 static void x86_cpu_machine_reset_cb(void *opaque) 4796 { 4797 X86CPU *cpu = opaque; 4798 cpu_reset(CPU(cpu)); 4799 } 4800 #endif 4801 4802 static void mce_init(X86CPU *cpu) 4803 { 4804 CPUX86State *cenv = &cpu->env; 4805 unsigned int bank; 4806 4807 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 4808 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 4809 (CPUID_MCE | CPUID_MCA)) { 4810 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 4811 (cpu->enable_lmce ? MCG_LMCE_P : 0); 4812 cenv->mcg_ctl = ~(uint64_t)0; 4813 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 4814 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 4815 } 4816 } 4817 } 4818 4819 #ifndef CONFIG_USER_ONLY 4820 APICCommonClass *apic_get_class(void) 4821 { 4822 const char *apic_type = "apic"; 4823 4824 /* TODO: in-kernel irqchip for hvf */ 4825 if (kvm_apic_in_kernel()) { 4826 apic_type = "kvm-apic"; 4827 } else if (xen_enabled()) { 4828 apic_type = "xen-apic"; 4829 } 4830 4831 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 4832 } 4833 4834 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 4835 { 4836 APICCommonState *apic; 4837 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 4838 4839 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class))); 4840 4841 object_property_add_child(OBJECT(cpu), "lapic", 4842 OBJECT(cpu->apic_state), &error_abort); 4843 object_unref(OBJECT(cpu->apic_state)); 4844 4845 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 4846 /* TODO: convert to link<> */ 4847 apic = APIC_COMMON(cpu->apic_state); 4848 apic->cpu = cpu; 4849 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 4850 } 4851 4852 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4853 { 4854 APICCommonState *apic; 4855 static bool apic_mmio_map_once; 4856 4857 if (cpu->apic_state == NULL) { 4858 return; 4859 } 4860 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized", 4861 errp); 4862 4863 /* Map APIC MMIO area */ 4864 apic = APIC_COMMON(cpu->apic_state); 4865 if (!apic_mmio_map_once) { 4866 memory_region_add_subregion_overlap(get_system_memory(), 4867 apic->apicbase & 4868 MSR_IA32_APICBASE_BASE, 4869 &apic->io_memory, 4870 0x1000); 4871 apic_mmio_map_once = true; 4872 } 4873 } 4874 4875 static void x86_cpu_machine_done(Notifier *n, void *unused) 4876 { 4877 X86CPU *cpu = container_of(n, X86CPU, machine_done); 4878 MemoryRegion *smram = 4879 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 4880 4881 if (smram) { 4882 cpu->smram = g_new(MemoryRegion, 1); 4883 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 4884 smram, 0, 1ull << 32); 4885 memory_region_set_enabled(cpu->smram, true); 4886 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 4887 } 4888 } 4889 #else 4890 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4891 { 4892 } 4893 #endif 4894 4895 /* Note: Only safe for use on x86(-64) hosts */ 4896 static uint32_t x86_host_phys_bits(void) 4897 { 4898 uint32_t eax; 4899 uint32_t host_phys_bits; 4900 4901 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 4902 if (eax >= 0x80000008) { 4903 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 4904 /* Note: According to AMD doc 25481 rev 2.34 they have a field 4905 * at 23:16 that can specify a maximum physical address bits for 4906 * the guest that can override this value; but I've not seen 4907 * anything with that set. 4908 */ 4909 host_phys_bits = eax & 0xff; 4910 } else { 4911 /* It's an odd 64 bit machine that doesn't have the leaf for 4912 * physical address bits; fall back to 36 that's most older 4913 * Intel. 4914 */ 4915 host_phys_bits = 36; 4916 } 4917 4918 return host_phys_bits; 4919 } 4920 4921 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 4922 { 4923 if (*min < value) { 4924 *min = value; 4925 } 4926 } 4927 4928 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 4929 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 4930 { 4931 CPUX86State *env = &cpu->env; 4932 FeatureWordInfo *fi = &feature_word_info[w]; 4933 uint32_t eax = fi->cpuid.eax; 4934 uint32_t region = eax & 0xF0000000; 4935 4936 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 4937 if (!env->features[w]) { 4938 return; 4939 } 4940 4941 switch (region) { 4942 case 0x00000000: 4943 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 4944 break; 4945 case 0x80000000: 4946 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 4947 break; 4948 case 0xC0000000: 4949 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 4950 break; 4951 } 4952 } 4953 4954 /* Calculate XSAVE components based on the configured CPU feature flags */ 4955 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 4956 { 4957 CPUX86State *env = &cpu->env; 4958 int i; 4959 uint64_t mask; 4960 4961 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 4962 return; 4963 } 4964 4965 mask = 0; 4966 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 4967 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 4968 if (env->features[esa->feature] & esa->bits) { 4969 mask |= (1ULL << i); 4970 } 4971 } 4972 4973 env->features[FEAT_XSAVE_COMP_LO] = mask; 4974 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 4975 } 4976 4977 /***** Steps involved on loading and filtering CPUID data 4978 * 4979 * When initializing and realizing a CPU object, the steps 4980 * involved in setting up CPUID data are: 4981 * 4982 * 1) Loading CPU model definition (X86CPUDefinition). This is 4983 * implemented by x86_cpu_load_model() and should be completely 4984 * transparent, as it is done automatically by instance_init. 4985 * No code should need to look at X86CPUDefinition structs 4986 * outside instance_init. 4987 * 4988 * 2) CPU expansion. This is done by realize before CPUID 4989 * filtering, and will make sure host/accelerator data is 4990 * loaded for CPU models that depend on host capabilities 4991 * (e.g. "host"). Done by x86_cpu_expand_features(). 4992 * 4993 * 3) CPUID filtering. This initializes extra data related to 4994 * CPUID, and checks if the host supports all capabilities 4995 * required by the CPU. Runnability of a CPU model is 4996 * determined at this step. Done by x86_cpu_filter_features(). 4997 * 4998 * Some operations don't require all steps to be performed. 4999 * More precisely: 5000 * 5001 * - CPU instance creation (instance_init) will run only CPU 5002 * model loading. CPU expansion can't run at instance_init-time 5003 * because host/accelerator data may be not available yet. 5004 * - CPU realization will perform both CPU model expansion and CPUID 5005 * filtering, and return an error in case one of them fails. 5006 * - query-cpu-definitions needs to run all 3 steps. It needs 5007 * to run CPUID filtering, as the 'unavailable-features' 5008 * field is set based on the filtering results. 5009 * - The query-cpu-model-expansion QMP command only needs to run 5010 * CPU model loading and CPU expansion. It should not filter 5011 * any CPUID data based on host capabilities. 5012 */ 5013 5014 /* Expand CPU configuration data, based on configured features 5015 * and host/accelerator capabilities when appropriate. 5016 */ 5017 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 5018 { 5019 CPUX86State *env = &cpu->env; 5020 FeatureWord w; 5021 GList *l; 5022 Error *local_err = NULL; 5023 5024 /*TODO: Now cpu->max_features doesn't overwrite features 5025 * set using QOM properties, and we can convert 5026 * plus_features & minus_features to global properties 5027 * inside x86_cpu_parse_featurestr() too. 5028 */ 5029 if (cpu->max_features) { 5030 for (w = 0; w < FEATURE_WORDS; w++) { 5031 /* Override only features that weren't set explicitly 5032 * by the user. 5033 */ 5034 env->features[w] |= 5035 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 5036 ~env->user_features[w] & \ 5037 ~feature_word_info[w].no_autoenable_flags; 5038 } 5039 } 5040 5041 for (l = plus_features; l; l = l->next) { 5042 const char *prop = l->data; 5043 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 5044 if (local_err) { 5045 goto out; 5046 } 5047 } 5048 5049 for (l = minus_features; l; l = l->next) { 5050 const char *prop = l->data; 5051 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 5052 if (local_err) { 5053 goto out; 5054 } 5055 } 5056 5057 if (!kvm_enabled() || !cpu->expose_kvm) { 5058 env->features[FEAT_KVM] = 0; 5059 } 5060 5061 x86_cpu_enable_xsave_components(cpu); 5062 5063 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 5064 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 5065 if (cpu->full_cpuid_auto_level) { 5066 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 5067 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 5068 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 5069 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 5070 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 5071 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 5072 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 5073 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 5074 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 5075 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 5076 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 5077 5078 /* Intel Processor Trace requires CPUID[0x14] */ 5079 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 5080 kvm_enabled() && cpu->intel_pt_auto_level) { 5081 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 5082 } 5083 5084 /* CPU topology with multi-dies support requires CPUID[0x1F] */ 5085 if (env->nr_dies > 1) { 5086 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); 5087 } 5088 5089 /* SVM requires CPUID[0x8000000A] */ 5090 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5091 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 5092 } 5093 5094 /* SEV requires CPUID[0x8000001F] */ 5095 if (sev_enabled()) { 5096 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 5097 } 5098 } 5099 5100 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 5101 if (env->cpuid_level == UINT32_MAX) { 5102 env->cpuid_level = env->cpuid_min_level; 5103 } 5104 if (env->cpuid_xlevel == UINT32_MAX) { 5105 env->cpuid_xlevel = env->cpuid_min_xlevel; 5106 } 5107 if (env->cpuid_xlevel2 == UINT32_MAX) { 5108 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 5109 } 5110 5111 out: 5112 if (local_err != NULL) { 5113 error_propagate(errp, local_err); 5114 } 5115 } 5116 5117 /* 5118 * Finishes initialization of CPUID data, filters CPU feature 5119 * words based on host availability of each feature. 5120 * 5121 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 5122 */ 5123 static int x86_cpu_filter_features(X86CPU *cpu) 5124 { 5125 CPUX86State *env = &cpu->env; 5126 FeatureWord w; 5127 int rv = 0; 5128 5129 for (w = 0; w < FEATURE_WORDS; w++) { 5130 uint32_t host_feat = 5131 x86_cpu_get_supported_feature_word(w, false); 5132 uint32_t requested_features = env->features[w]; 5133 uint32_t available_features = requested_features & host_feat; 5134 if (!cpu->force_features) { 5135 env->features[w] = available_features; 5136 } 5137 cpu->filtered_features[w] = requested_features & ~available_features; 5138 if (cpu->filtered_features[w]) { 5139 rv = 1; 5140 } 5141 } 5142 5143 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 5144 kvm_enabled()) { 5145 KVMState *s = CPU(cpu)->kvm_state; 5146 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 5147 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 5148 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 5149 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 5150 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 5151 5152 if (!eax_0 || 5153 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 5154 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 5155 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 5156 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 5157 INTEL_PT_ADDR_RANGES_NUM) || 5158 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 5159 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 5160 (ecx_0 & INTEL_PT_IP_LIP)) { 5161 /* 5162 * Processor Trace capabilities aren't configurable, so if the 5163 * host can't emulate the capabilities we report on 5164 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 5165 */ 5166 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT; 5167 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT; 5168 rv = 1; 5169 } 5170 } 5171 5172 return rv; 5173 } 5174 5175 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 5176 { 5177 CPUState *cs = CPU(dev); 5178 X86CPU *cpu = X86_CPU(dev); 5179 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 5180 CPUX86State *env = &cpu->env; 5181 Error *local_err = NULL; 5182 static bool ht_warned; 5183 5184 if (xcc->host_cpuid_required) { 5185 if (!accel_uses_host_cpuid()) { 5186 char *name = x86_cpu_class_get_model_name(xcc); 5187 error_setg(&local_err, "CPU model '%s' requires KVM", name); 5188 g_free(name); 5189 goto out; 5190 } 5191 5192 if (enable_cpu_pm) { 5193 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 5194 &cpu->mwait.ecx, &cpu->mwait.edx); 5195 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 5196 } 5197 } 5198 5199 /* mwait extended info: needed for Core compatibility */ 5200 /* We always wake on interrupt even if host does not have the capability */ 5201 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 5202 5203 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 5204 error_setg(errp, "apic-id property was not initialized properly"); 5205 return; 5206 } 5207 5208 x86_cpu_expand_features(cpu, &local_err); 5209 if (local_err) { 5210 goto out; 5211 } 5212 5213 if (x86_cpu_filter_features(cpu) && 5214 (cpu->check_cpuid || cpu->enforce_cpuid)) { 5215 x86_cpu_report_filtered_features(cpu); 5216 if (cpu->enforce_cpuid) { 5217 error_setg(&local_err, 5218 accel_uses_host_cpuid() ? 5219 "Host doesn't support requested features" : 5220 "TCG doesn't support requested features"); 5221 goto out; 5222 } 5223 } 5224 5225 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 5226 * CPUID[1].EDX. 5227 */ 5228 if (IS_AMD_CPU(env)) { 5229 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 5230 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 5231 & CPUID_EXT2_AMD_ALIASES); 5232 } 5233 5234 /* For 64bit systems think about the number of physical bits to present. 5235 * ideally this should be the same as the host; anything other than matching 5236 * the host can cause incorrect guest behaviour. 5237 * QEMU used to pick the magic value of 40 bits that corresponds to 5238 * consumer AMD devices but nothing else. 5239 */ 5240 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5241 if (accel_uses_host_cpuid()) { 5242 uint32_t host_phys_bits = x86_host_phys_bits(); 5243 static bool warned; 5244 5245 /* Print a warning if the user set it to a value that's not the 5246 * host value. 5247 */ 5248 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 5249 !warned) { 5250 warn_report("Host physical bits (%u)" 5251 " does not match phys-bits property (%u)", 5252 host_phys_bits, cpu->phys_bits); 5253 warned = true; 5254 } 5255 5256 if (cpu->host_phys_bits) { 5257 /* The user asked for us to use the host physical bits */ 5258 cpu->phys_bits = host_phys_bits; 5259 if (cpu->host_phys_bits_limit && 5260 cpu->phys_bits > cpu->host_phys_bits_limit) { 5261 cpu->phys_bits = cpu->host_phys_bits_limit; 5262 } 5263 } 5264 5265 if (cpu->phys_bits && 5266 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 5267 cpu->phys_bits < 32)) { 5268 error_setg(errp, "phys-bits should be between 32 and %u " 5269 " (but is %u)", 5270 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 5271 return; 5272 } 5273 } else { 5274 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 5275 error_setg(errp, "TCG only supports phys-bits=%u", 5276 TCG_PHYS_ADDR_BITS); 5277 return; 5278 } 5279 } 5280 /* 0 means it was not explicitly set by the user (or by machine 5281 * compat_props or by the host code above). In this case, the default 5282 * is the value used by TCG (40). 5283 */ 5284 if (cpu->phys_bits == 0) { 5285 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 5286 } 5287 } else { 5288 /* For 32 bit systems don't use the user set value, but keep 5289 * phys_bits consistent with what we tell the guest. 5290 */ 5291 if (cpu->phys_bits != 0) { 5292 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 5293 return; 5294 } 5295 5296 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 5297 cpu->phys_bits = 36; 5298 } else { 5299 cpu->phys_bits = 32; 5300 } 5301 } 5302 5303 /* Cache information initialization */ 5304 if (!cpu->legacy_cache) { 5305 if (!xcc->model || !xcc->model->cpudef->cache_info) { 5306 char *name = x86_cpu_class_get_model_name(xcc); 5307 error_setg(errp, 5308 "CPU model '%s' doesn't support legacy-cache=off", name); 5309 g_free(name); 5310 return; 5311 } 5312 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 5313 *xcc->model->cpudef->cache_info; 5314 } else { 5315 /* Build legacy cache information */ 5316 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 5317 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 5318 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 5319 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 5320 5321 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 5322 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 5323 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 5324 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 5325 5326 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 5327 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 5328 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 5329 env->cache_info_amd.l3_cache = &legacy_l3_cache; 5330 } 5331 5332 5333 cpu_exec_realizefn(cs, &local_err); 5334 if (local_err != NULL) { 5335 error_propagate(errp, local_err); 5336 return; 5337 } 5338 5339 #ifndef CONFIG_USER_ONLY 5340 MachineState *ms = MACHINE(qdev_get_machine()); 5341 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 5342 5343 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { 5344 x86_cpu_apic_create(cpu, &local_err); 5345 if (local_err != NULL) { 5346 goto out; 5347 } 5348 } 5349 #endif 5350 5351 mce_init(cpu); 5352 5353 #ifndef CONFIG_USER_ONLY 5354 if (tcg_enabled()) { 5355 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 5356 cpu->cpu_as_root = g_new(MemoryRegion, 1); 5357 5358 /* Outer container... */ 5359 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 5360 memory_region_set_enabled(cpu->cpu_as_root, true); 5361 5362 /* ... with two regions inside: normal system memory with low 5363 * priority, and... 5364 */ 5365 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 5366 get_system_memory(), 0, ~0ull); 5367 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 5368 memory_region_set_enabled(cpu->cpu_as_mem, true); 5369 5370 cs->num_ases = 2; 5371 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 5372 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 5373 5374 /* ... SMRAM with higher priority, linked from /machine/smram. */ 5375 cpu->machine_done.notify = x86_cpu_machine_done; 5376 qemu_add_machine_init_done_notifier(&cpu->machine_done); 5377 } 5378 #endif 5379 5380 qemu_init_vcpu(cs); 5381 5382 /* 5383 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 5384 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 5385 * based on inputs (sockets,cores,threads), it is still better to give 5386 * users a warning. 5387 * 5388 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 5389 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 5390 */ 5391 if (IS_AMD_CPU(env) && 5392 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 5393 cs->nr_threads > 1 && !ht_warned) { 5394 warn_report("This family of AMD CPU doesn't support " 5395 "hyperthreading(%d)", 5396 cs->nr_threads); 5397 error_printf("Please configure -smp options properly" 5398 " or try enabling topoext feature.\n"); 5399 ht_warned = true; 5400 } 5401 5402 x86_cpu_apic_realize(cpu, &local_err); 5403 if (local_err != NULL) { 5404 goto out; 5405 } 5406 cpu_reset(cs); 5407 5408 xcc->parent_realize(dev, &local_err); 5409 5410 out: 5411 if (local_err != NULL) { 5412 error_propagate(errp, local_err); 5413 return; 5414 } 5415 } 5416 5417 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) 5418 { 5419 X86CPU *cpu = X86_CPU(dev); 5420 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 5421 Error *local_err = NULL; 5422 5423 #ifndef CONFIG_USER_ONLY 5424 cpu_remove_sync(CPU(dev)); 5425 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 5426 #endif 5427 5428 if (cpu->apic_state) { 5429 object_unparent(OBJECT(cpu->apic_state)); 5430 cpu->apic_state = NULL; 5431 } 5432 5433 xcc->parent_unrealize(dev, &local_err); 5434 if (local_err != NULL) { 5435 error_propagate(errp, local_err); 5436 return; 5437 } 5438 } 5439 5440 typedef struct BitProperty { 5441 FeatureWord w; 5442 uint32_t mask; 5443 } BitProperty; 5444 5445 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 5446 void *opaque, Error **errp) 5447 { 5448 X86CPU *cpu = X86_CPU(obj); 5449 BitProperty *fp = opaque; 5450 uint32_t f = cpu->env.features[fp->w]; 5451 bool value = (f & fp->mask) == fp->mask; 5452 visit_type_bool(v, name, &value, errp); 5453 } 5454 5455 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 5456 void *opaque, Error **errp) 5457 { 5458 DeviceState *dev = DEVICE(obj); 5459 X86CPU *cpu = X86_CPU(obj); 5460 BitProperty *fp = opaque; 5461 Error *local_err = NULL; 5462 bool value; 5463 5464 if (dev->realized) { 5465 qdev_prop_set_after_realize(dev, name, errp); 5466 return; 5467 } 5468 5469 visit_type_bool(v, name, &value, &local_err); 5470 if (local_err) { 5471 error_propagate(errp, local_err); 5472 return; 5473 } 5474 5475 if (value) { 5476 cpu->env.features[fp->w] |= fp->mask; 5477 } else { 5478 cpu->env.features[fp->w] &= ~fp->mask; 5479 } 5480 cpu->env.user_features[fp->w] |= fp->mask; 5481 } 5482 5483 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 5484 void *opaque) 5485 { 5486 BitProperty *prop = opaque; 5487 g_free(prop); 5488 } 5489 5490 /* Register a boolean property to get/set a single bit in a uint32_t field. 5491 * 5492 * The same property name can be registered multiple times to make it affect 5493 * multiple bits in the same FeatureWord. In that case, the getter will return 5494 * true only if all bits are set. 5495 */ 5496 static void x86_cpu_register_bit_prop(X86CPU *cpu, 5497 const char *prop_name, 5498 FeatureWord w, 5499 int bitnr) 5500 { 5501 BitProperty *fp; 5502 ObjectProperty *op; 5503 uint32_t mask = (1UL << bitnr); 5504 5505 op = object_property_find(OBJECT(cpu), prop_name, NULL); 5506 if (op) { 5507 fp = op->opaque; 5508 assert(fp->w == w); 5509 fp->mask |= mask; 5510 } else { 5511 fp = g_new0(BitProperty, 1); 5512 fp->w = w; 5513 fp->mask = mask; 5514 object_property_add(OBJECT(cpu), prop_name, "bool", 5515 x86_cpu_get_bit_prop, 5516 x86_cpu_set_bit_prop, 5517 x86_cpu_release_bit_prop, fp, &error_abort); 5518 } 5519 } 5520 5521 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 5522 FeatureWord w, 5523 int bitnr) 5524 { 5525 FeatureWordInfo *fi = &feature_word_info[w]; 5526 const char *name = fi->feat_names[bitnr]; 5527 5528 if (!name) { 5529 return; 5530 } 5531 5532 /* Property names should use "-" instead of "_". 5533 * Old names containing underscores are registered as aliases 5534 * using object_property_add_alias() 5535 */ 5536 assert(!strchr(name, '_')); 5537 /* aliases don't use "|" delimiters anymore, they are registered 5538 * manually using object_property_add_alias() */ 5539 assert(!strchr(name, '|')); 5540 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 5541 } 5542 5543 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 5544 { 5545 X86CPU *cpu = X86_CPU(cs); 5546 CPUX86State *env = &cpu->env; 5547 GuestPanicInformation *panic_info = NULL; 5548 5549 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 5550 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 5551 5552 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 5553 5554 assert(HV_CRASH_PARAMS >= 5); 5555 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 5556 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 5557 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 5558 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 5559 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 5560 } 5561 5562 return panic_info; 5563 } 5564 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 5565 const char *name, void *opaque, 5566 Error **errp) 5567 { 5568 CPUState *cs = CPU(obj); 5569 GuestPanicInformation *panic_info; 5570 5571 if (!cs->crash_occurred) { 5572 error_setg(errp, "No crash occured"); 5573 return; 5574 } 5575 5576 panic_info = x86_cpu_get_crash_info(cs); 5577 if (panic_info == NULL) { 5578 error_setg(errp, "No crash information"); 5579 return; 5580 } 5581 5582 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 5583 errp); 5584 qapi_free_GuestPanicInformation(panic_info); 5585 } 5586 5587 static void x86_cpu_initfn(Object *obj) 5588 { 5589 X86CPU *cpu = X86_CPU(obj); 5590 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 5591 CPUX86State *env = &cpu->env; 5592 FeatureWord w; 5593 5594 env->nr_dies = 1; 5595 cpu_set_cpustate_pointers(cpu); 5596 5597 object_property_add(obj, "family", "int", 5598 x86_cpuid_version_get_family, 5599 x86_cpuid_version_set_family, NULL, NULL, NULL); 5600 object_property_add(obj, "model", "int", 5601 x86_cpuid_version_get_model, 5602 x86_cpuid_version_set_model, NULL, NULL, NULL); 5603 object_property_add(obj, "stepping", "int", 5604 x86_cpuid_version_get_stepping, 5605 x86_cpuid_version_set_stepping, NULL, NULL, NULL); 5606 object_property_add_str(obj, "vendor", 5607 x86_cpuid_get_vendor, 5608 x86_cpuid_set_vendor, NULL); 5609 object_property_add_str(obj, "model-id", 5610 x86_cpuid_get_model_id, 5611 x86_cpuid_set_model_id, NULL); 5612 object_property_add(obj, "tsc-frequency", "int", 5613 x86_cpuid_get_tsc_freq, 5614 x86_cpuid_set_tsc_freq, NULL, NULL, NULL); 5615 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 5616 x86_cpu_get_feature_words, 5617 NULL, NULL, (void *)env->features, NULL); 5618 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 5619 x86_cpu_get_feature_words, 5620 NULL, NULL, (void *)cpu->filtered_features, NULL); 5621 /* 5622 * The "unavailable-features" property has the same semantics as 5623 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 5624 * QMP command: they list the features that would have prevented the 5625 * CPU from running if the "enforce" flag was set. 5626 */ 5627 object_property_add(obj, "unavailable-features", "strList", 5628 x86_cpu_get_unavailable_features, 5629 NULL, NULL, NULL, &error_abort); 5630 5631 object_property_add(obj, "crash-information", "GuestPanicInformation", 5632 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); 5633 5634 for (w = 0; w < FEATURE_WORDS; w++) { 5635 int bitnr; 5636 5637 for (bitnr = 0; bitnr < 32; bitnr++) { 5638 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 5639 } 5640 } 5641 5642 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort); 5643 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort); 5644 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort); 5645 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort); 5646 object_property_add_alias(obj, "xd", obj, "nx", &error_abort); 5647 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort); 5648 object_property_add_alias(obj, "i64", obj, "lm", &error_abort); 5649 5650 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort); 5651 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort); 5652 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort); 5653 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort); 5654 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort); 5655 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort); 5656 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort); 5657 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort); 5658 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort); 5659 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort); 5660 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort); 5661 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort); 5662 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort); 5663 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort); 5664 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort); 5665 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort); 5666 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort); 5667 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort); 5668 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort); 5669 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort); 5670 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort); 5671 5672 if (xcc->model) { 5673 x86_cpu_load_model(cpu, xcc->model, &error_abort); 5674 } 5675 } 5676 5677 static int64_t x86_cpu_get_arch_id(CPUState *cs) 5678 { 5679 X86CPU *cpu = X86_CPU(cs); 5680 5681 return cpu->apic_id; 5682 } 5683 5684 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 5685 { 5686 X86CPU *cpu = X86_CPU(cs); 5687 5688 return cpu->env.cr[0] & CR0_PG_MASK; 5689 } 5690 5691 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 5692 { 5693 X86CPU *cpu = X86_CPU(cs); 5694 5695 cpu->env.eip = value; 5696 } 5697 5698 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 5699 { 5700 X86CPU *cpu = X86_CPU(cs); 5701 5702 cpu->env.eip = tb->pc - tb->cs_base; 5703 } 5704 5705 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 5706 { 5707 X86CPU *cpu = X86_CPU(cs); 5708 CPUX86State *env = &cpu->env; 5709 5710 #if !defined(CONFIG_USER_ONLY) 5711 if (interrupt_request & CPU_INTERRUPT_POLL) { 5712 return CPU_INTERRUPT_POLL; 5713 } 5714 #endif 5715 if (interrupt_request & CPU_INTERRUPT_SIPI) { 5716 return CPU_INTERRUPT_SIPI; 5717 } 5718 5719 if (env->hflags2 & HF2_GIF_MASK) { 5720 if ((interrupt_request & CPU_INTERRUPT_SMI) && 5721 !(env->hflags & HF_SMM_MASK)) { 5722 return CPU_INTERRUPT_SMI; 5723 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 5724 !(env->hflags2 & HF2_NMI_MASK)) { 5725 return CPU_INTERRUPT_NMI; 5726 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 5727 return CPU_INTERRUPT_MCE; 5728 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 5729 (((env->hflags2 & HF2_VINTR_MASK) && 5730 (env->hflags2 & HF2_HIF_MASK)) || 5731 (!(env->hflags2 & HF2_VINTR_MASK) && 5732 (env->eflags & IF_MASK && 5733 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 5734 return CPU_INTERRUPT_HARD; 5735 #if !defined(CONFIG_USER_ONLY) 5736 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 5737 (env->eflags & IF_MASK) && 5738 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 5739 return CPU_INTERRUPT_VIRQ; 5740 #endif 5741 } 5742 } 5743 5744 return 0; 5745 } 5746 5747 static bool x86_cpu_has_work(CPUState *cs) 5748 { 5749 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 5750 } 5751 5752 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 5753 { 5754 X86CPU *cpu = X86_CPU(cs); 5755 CPUX86State *env = &cpu->env; 5756 5757 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 5758 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 5759 : bfd_mach_i386_i8086); 5760 info->print_insn = print_insn_i386; 5761 5762 info->cap_arch = CS_ARCH_X86; 5763 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 5764 : env->hflags & HF_CS32_MASK ? CS_MODE_32 5765 : CS_MODE_16); 5766 info->cap_insn_unit = 1; 5767 info->cap_insn_split = 8; 5768 } 5769 5770 void x86_update_hflags(CPUX86State *env) 5771 { 5772 uint32_t hflags; 5773 #define HFLAG_COPY_MASK \ 5774 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 5775 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 5776 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 5777 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 5778 5779 hflags = env->hflags & HFLAG_COPY_MASK; 5780 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 5781 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 5782 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 5783 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 5784 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 5785 5786 if (env->cr[4] & CR4_OSFXSR_MASK) { 5787 hflags |= HF_OSFXSR_MASK; 5788 } 5789 5790 if (env->efer & MSR_EFER_LMA) { 5791 hflags |= HF_LMA_MASK; 5792 } 5793 5794 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 5795 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 5796 } else { 5797 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 5798 (DESC_B_SHIFT - HF_CS32_SHIFT); 5799 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 5800 (DESC_B_SHIFT - HF_SS32_SHIFT); 5801 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 5802 !(hflags & HF_CS32_MASK)) { 5803 hflags |= HF_ADDSEG_MASK; 5804 } else { 5805 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 5806 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 5807 } 5808 } 5809 env->hflags = hflags; 5810 } 5811 5812 static Property x86_cpu_properties[] = { 5813 #ifdef CONFIG_USER_ONLY 5814 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 5815 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 5816 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 5817 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 5818 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0), 5819 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 5820 #else 5821 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 5822 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 5823 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 5824 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1), 5825 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 5826 #endif 5827 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 5828 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 5829 5830 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, 5831 HYPERV_SPINLOCK_NEVER_RETRY), 5832 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 5833 HYPERV_FEAT_RELAXED, 0), 5834 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 5835 HYPERV_FEAT_VAPIC, 0), 5836 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 5837 HYPERV_FEAT_TIME, 0), 5838 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 5839 HYPERV_FEAT_CRASH, 0), 5840 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 5841 HYPERV_FEAT_RESET, 0), 5842 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 5843 HYPERV_FEAT_VPINDEX, 0), 5844 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 5845 HYPERV_FEAT_RUNTIME, 0), 5846 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 5847 HYPERV_FEAT_SYNIC, 0), 5848 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 5849 HYPERV_FEAT_STIMER, 0), 5850 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 5851 HYPERV_FEAT_FREQUENCIES, 0), 5852 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 5853 HYPERV_FEAT_REENLIGHTENMENT, 0), 5854 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 5855 HYPERV_FEAT_TLBFLUSH, 0), 5856 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 5857 HYPERV_FEAT_EVMCS, 0), 5858 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 5859 HYPERV_FEAT_IPI, 0), 5860 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 5861 HYPERV_FEAT_STIMER_DIRECT, 0), 5862 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 5863 5864 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 5865 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 5866 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false), 5867 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 5868 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 5869 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 5870 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 5871 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 5872 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 5873 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 5874 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 5875 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 5876 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 5877 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 5878 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 5879 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 5880 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 5881 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 5882 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 5883 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 5884 false), 5885 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 5886 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 5887 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 5888 true), 5889 /* 5890 * lecacy_cache defaults to true unless the CPU model provides its 5891 * own cache information (see x86_cpu_load_def()). 5892 */ 5893 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 5894 5895 /* 5896 * From "Requirements for Implementing the Microsoft 5897 * Hypervisor Interface": 5898 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 5899 * 5900 * "Starting with Windows Server 2012 and Windows 8, if 5901 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 5902 * the hypervisor imposes no specific limit to the number of VPs. 5903 * In this case, Windows Server 2012 guest VMs may use more than 5904 * 64 VPs, up to the maximum supported number of processors applicable 5905 * to the specific Windows version being used." 5906 */ 5907 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 5908 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 5909 false), 5910 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 5911 true), 5912 DEFINE_PROP_END_OF_LIST() 5913 }; 5914 5915 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 5916 { 5917 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5918 CPUClass *cc = CPU_CLASS(oc); 5919 DeviceClass *dc = DEVICE_CLASS(oc); 5920 5921 device_class_set_parent_realize(dc, x86_cpu_realizefn, 5922 &xcc->parent_realize); 5923 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 5924 &xcc->parent_unrealize); 5925 dc->props = x86_cpu_properties; 5926 5927 xcc->parent_reset = cc->reset; 5928 cc->reset = x86_cpu_reset; 5929 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 5930 5931 cc->class_by_name = x86_cpu_class_by_name; 5932 cc->parse_features = x86_cpu_parse_featurestr; 5933 cc->has_work = x86_cpu_has_work; 5934 #ifdef CONFIG_TCG 5935 cc->do_interrupt = x86_cpu_do_interrupt; 5936 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 5937 #endif 5938 cc->dump_state = x86_cpu_dump_state; 5939 cc->get_crash_info = x86_cpu_get_crash_info; 5940 cc->set_pc = x86_cpu_set_pc; 5941 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 5942 cc->gdb_read_register = x86_cpu_gdb_read_register; 5943 cc->gdb_write_register = x86_cpu_gdb_write_register; 5944 cc->get_arch_id = x86_cpu_get_arch_id; 5945 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 5946 #ifndef CONFIG_USER_ONLY 5947 cc->asidx_from_attrs = x86_asidx_from_attrs; 5948 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 5949 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; 5950 cc->write_elf64_note = x86_cpu_write_elf64_note; 5951 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 5952 cc->write_elf32_note = x86_cpu_write_elf32_note; 5953 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 5954 cc->vmsd = &vmstate_x86_cpu; 5955 #endif 5956 cc->gdb_arch_name = x86_gdb_arch_name; 5957 #ifdef TARGET_X86_64 5958 cc->gdb_core_xml_file = "i386-64bit.xml"; 5959 cc->gdb_num_core_regs = 66; 5960 #else 5961 cc->gdb_core_xml_file = "i386-32bit.xml"; 5962 cc->gdb_num_core_regs = 50; 5963 #endif 5964 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 5965 cc->debug_excp_handler = breakpoint_handler; 5966 #endif 5967 cc->cpu_exec_enter = x86_cpu_exec_enter; 5968 cc->cpu_exec_exit = x86_cpu_exec_exit; 5969 #ifdef CONFIG_TCG 5970 cc->tcg_initialize = tcg_x86_init; 5971 cc->tlb_fill = x86_cpu_tlb_fill; 5972 #endif 5973 cc->disas_set_info = x86_disas_set_info; 5974 5975 dc->user_creatable = true; 5976 } 5977 5978 static const TypeInfo x86_cpu_type_info = { 5979 .name = TYPE_X86_CPU, 5980 .parent = TYPE_CPU, 5981 .instance_size = sizeof(X86CPU), 5982 .instance_init = x86_cpu_initfn, 5983 .abstract = true, 5984 .class_size = sizeof(X86CPUClass), 5985 .class_init = x86_cpu_common_class_init, 5986 }; 5987 5988 5989 /* "base" CPU model, used by query-cpu-model-expansion */ 5990 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 5991 { 5992 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5993 5994 xcc->static_model = true; 5995 xcc->migration_safe = true; 5996 xcc->model_description = "base CPU model type with no features enabled"; 5997 xcc->ordering = 8; 5998 } 5999 6000 static const TypeInfo x86_base_cpu_type_info = { 6001 .name = X86_CPU_TYPE_NAME("base"), 6002 .parent = TYPE_X86_CPU, 6003 .class_init = x86_cpu_base_class_init, 6004 }; 6005 6006 static void x86_cpu_register_types(void) 6007 { 6008 int i; 6009 6010 type_register_static(&x86_cpu_type_info); 6011 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 6012 x86_register_cpudef_types(&builtin_x86_defs[i]); 6013 } 6014 type_register_static(&max_x86_cpu_type_info); 6015 type_register_static(&x86_base_cpu_type_info); 6016 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 6017 type_register_static(&host_x86_cpu_type_info); 6018 #endif 6019 } 6020 6021 type_init(x86_cpu_register_types) 6022