1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "exec/exec-all.h" 28 #include "sysemu/kvm.h" 29 #include "sysemu/hvf.h" 30 #include "sysemu/cpus.h" 31 #include "kvm_i386.h" 32 #include "sev_i386.h" 33 34 #include "qemu/error-report.h" 35 #include "qemu/module.h" 36 #include "qemu/option.h" 37 #include "qemu/config-file.h" 38 #include "qapi/error.h" 39 #include "qapi/qapi-visit-machine.h" 40 #include "qapi/qapi-visit-run-state.h" 41 #include "qapi/qmp/qdict.h" 42 #include "qapi/qmp/qerror.h" 43 #include "qapi/visitor.h" 44 #include "qom/qom-qobject.h" 45 #include "sysemu/arch_init.h" 46 #include "qapi/qapi-commands-machine-target.h" 47 48 #include "standard-headers/asm-x86/kvm_para.h" 49 50 #include "sysemu/sysemu.h" 51 #include "sysemu/tcg.h" 52 #include "hw/qdev-properties.h" 53 #include "hw/i386/topology.h" 54 #ifndef CONFIG_USER_ONLY 55 #include "exec/address-spaces.h" 56 #include "hw/hw.h" 57 #include "hw/xen/xen.h" 58 #include "hw/i386/apic_internal.h" 59 #include "hw/boards.h" 60 #endif 61 62 #include "disas/capstone.h" 63 64 /* Helpers for building CPUID[2] descriptors: */ 65 66 struct CPUID2CacheDescriptorInfo { 67 enum CacheType type; 68 int level; 69 int size; 70 int line_size; 71 int associativity; 72 }; 73 74 /* 75 * Known CPUID 2 cache descriptors. 76 * From Intel SDM Volume 2A, CPUID instruction 77 */ 78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 80 .associativity = 4, .line_size = 32, }, 81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 82 .associativity = 4, .line_size = 32, }, 83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 84 .associativity = 4, .line_size = 64, }, 85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 86 .associativity = 2, .line_size = 32, }, 87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 88 .associativity = 4, .line_size = 32, }, 89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 90 .associativity = 4, .line_size = 64, }, 91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 92 .associativity = 6, .line_size = 64, }, 93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 94 .associativity = 2, .line_size = 64, }, 95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 96 .associativity = 8, .line_size = 64, }, 97 /* lines per sector is not supported cpuid2_cache_descriptor(), 98 * so descriptors 0x22, 0x23 are not included 99 */ 100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 101 .associativity = 16, .line_size = 64, }, 102 /* lines per sector is not supported cpuid2_cache_descriptor(), 103 * so descriptors 0x25, 0x20 are not included 104 */ 105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 106 .associativity = 8, .line_size = 64, }, 107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 108 .associativity = 8, .line_size = 64, }, 109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 110 .associativity = 4, .line_size = 32, }, 111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 112 .associativity = 4, .line_size = 32, }, 113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 114 .associativity = 4, .line_size = 32, }, 115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 116 .associativity = 4, .line_size = 32, }, 117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 118 .associativity = 4, .line_size = 32, }, 119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 120 .associativity = 4, .line_size = 64, }, 121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 122 .associativity = 8, .line_size = 64, }, 123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 124 .associativity = 12, .line_size = 64, }, 125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 127 .associativity = 12, .line_size = 64, }, 128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 129 .associativity = 16, .line_size = 64, }, 130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 131 .associativity = 12, .line_size = 64, }, 132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 133 .associativity = 16, .line_size = 64, }, 134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 135 .associativity = 24, .line_size = 64, }, 136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 137 .associativity = 8, .line_size = 64, }, 138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 139 .associativity = 4, .line_size = 64, }, 140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 141 .associativity = 4, .line_size = 64, }, 142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 143 .associativity = 4, .line_size = 64, }, 144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 145 .associativity = 4, .line_size = 64, }, 146 /* lines per sector is not supported cpuid2_cache_descriptor(), 147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 148 */ 149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 150 .associativity = 8, .line_size = 64, }, 151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 152 .associativity = 2, .line_size = 64, }, 153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 154 .associativity = 8, .line_size = 64, }, 155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 156 .associativity = 8, .line_size = 32, }, 157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 158 .associativity = 8, .line_size = 32, }, 159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 160 .associativity = 8, .line_size = 32, }, 161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 162 .associativity = 8, .line_size = 32, }, 163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 164 .associativity = 4, .line_size = 64, }, 165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 166 .associativity = 8, .line_size = 64, }, 167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 168 .associativity = 4, .line_size = 64, }, 169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 170 .associativity = 4, .line_size = 64, }, 171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 172 .associativity = 4, .line_size = 64, }, 173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 174 .associativity = 8, .line_size = 64, }, 175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 176 .associativity = 8, .line_size = 64, }, 177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 178 .associativity = 8, .line_size = 64, }, 179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 180 .associativity = 12, .line_size = 64, }, 181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 182 .associativity = 12, .line_size = 64, }, 183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 184 .associativity = 12, .line_size = 64, }, 185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 186 .associativity = 16, .line_size = 64, }, 187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 188 .associativity = 16, .line_size = 64, }, 189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 190 .associativity = 16, .line_size = 64, }, 191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 192 .associativity = 24, .line_size = 64, }, 193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 194 .associativity = 24, .line_size = 64, }, 195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 196 .associativity = 24, .line_size = 64, }, 197 }; 198 199 /* 200 * "CPUID leaf 2 does not report cache descriptor information, 201 * use CPUID leaf 4 to query cache parameters" 202 */ 203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 204 205 /* 206 * Return a CPUID 2 cache descriptor for a given cache. 207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 208 */ 209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 210 { 211 int i; 212 213 assert(cache->size > 0); 214 assert(cache->level > 0); 215 assert(cache->line_size > 0); 216 assert(cache->associativity > 0); 217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 219 if (d->level == cache->level && d->type == cache->type && 220 d->size == cache->size && d->line_size == cache->line_size && 221 d->associativity == cache->associativity) { 222 return i; 223 } 224 } 225 226 return CACHE_DESCRIPTOR_UNAVAILABLE; 227 } 228 229 /* CPUID Leaf 4 constants: */ 230 231 /* EAX: */ 232 #define CACHE_TYPE_D 1 233 #define CACHE_TYPE_I 2 234 #define CACHE_TYPE_UNIFIED 3 235 236 #define CACHE_LEVEL(l) (l << 5) 237 238 #define CACHE_SELF_INIT_LEVEL (1 << 8) 239 240 /* EDX: */ 241 #define CACHE_NO_INVD_SHARING (1 << 0) 242 #define CACHE_INCLUSIVE (1 << 1) 243 #define CACHE_COMPLEX_IDX (1 << 2) 244 245 /* Encode CacheType for CPUID[4].EAX */ 246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 249 0 /* Invalid value */) 250 251 252 /* Encode cache info for CPUID[4] */ 253 static void encode_cache_cpuid4(CPUCacheInfo *cache, 254 int num_apic_ids, int num_cores, 255 uint32_t *eax, uint32_t *ebx, 256 uint32_t *ecx, uint32_t *edx) 257 { 258 assert(cache->size == cache->line_size * cache->associativity * 259 cache->partitions * cache->sets); 260 261 assert(num_apic_ids > 0); 262 *eax = CACHE_TYPE(cache->type) | 263 CACHE_LEVEL(cache->level) | 264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 265 ((num_cores - 1) << 26) | 266 ((num_apic_ids - 1) << 14); 267 268 assert(cache->line_size > 0); 269 assert(cache->partitions > 0); 270 assert(cache->associativity > 0); 271 /* We don't implement fully-associative caches */ 272 assert(cache->associativity < cache->sets); 273 *ebx = (cache->line_size - 1) | 274 ((cache->partitions - 1) << 12) | 275 ((cache->associativity - 1) << 22); 276 277 assert(cache->sets > 0); 278 *ecx = cache->sets - 1; 279 280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 281 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 283 } 284 285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 287 { 288 assert(cache->size % 1024 == 0); 289 assert(cache->lines_per_tag > 0); 290 assert(cache->associativity > 0); 291 assert(cache->line_size > 0); 292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 293 (cache->lines_per_tag << 8) | (cache->line_size); 294 } 295 296 #define ASSOC_FULL 0xFF 297 298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 300 a == 2 ? 0x2 : \ 301 a == 4 ? 0x4 : \ 302 a == 8 ? 0x6 : \ 303 a == 16 ? 0x8 : \ 304 a == 32 ? 0xA : \ 305 a == 48 ? 0xB : \ 306 a == 64 ? 0xC : \ 307 a == 96 ? 0xD : \ 308 a == 128 ? 0xE : \ 309 a == ASSOC_FULL ? 0xF : \ 310 0 /* invalid value */) 311 312 /* 313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 314 * @l3 can be NULL. 315 */ 316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 317 CPUCacheInfo *l3, 318 uint32_t *ecx, uint32_t *edx) 319 { 320 assert(l2->size % 1024 == 0); 321 assert(l2->associativity > 0); 322 assert(l2->lines_per_tag > 0); 323 assert(l2->line_size > 0); 324 *ecx = ((l2->size / 1024) << 16) | 325 (AMD_ENC_ASSOC(l2->associativity) << 12) | 326 (l2->lines_per_tag << 8) | (l2->line_size); 327 328 if (l3) { 329 assert(l3->size % (512 * 1024) == 0); 330 assert(l3->associativity > 0); 331 assert(l3->lines_per_tag > 0); 332 assert(l3->line_size > 0); 333 *edx = ((l3->size / (512 * 1024)) << 18) | 334 (AMD_ENC_ASSOC(l3->associativity) << 12) | 335 (l3->lines_per_tag << 8) | (l3->line_size); 336 } else { 337 *edx = 0; 338 } 339 } 340 341 /* 342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E 343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3. 344 * Define the constants to build the cpu topology. Right now, TOPOEXT 345 * feature is enabled only on EPYC. So, these constants are based on 346 * EPYC supported configurations. We may need to handle the cases if 347 * these values change in future. 348 */ 349 /* Maximum core complexes in a node */ 350 #define MAX_CCX 2 351 /* Maximum cores in a core complex */ 352 #define MAX_CORES_IN_CCX 4 353 /* Maximum cores in a node */ 354 #define MAX_CORES_IN_NODE 8 355 /* Maximum nodes in a socket */ 356 #define MAX_NODES_PER_SOCKET 4 357 358 /* 359 * Figure out the number of nodes required to build this config. 360 * Max cores in a node is 8 361 */ 362 static int nodes_in_socket(int nr_cores) 363 { 364 int nodes; 365 366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE); 367 368 /* Hardware does not support config with 3 nodes, return 4 in that case */ 369 return (nodes == 3) ? 4 : nodes; 370 } 371 372 /* 373 * Decide the number of cores in a core complex with the given nr_cores using 374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and 375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible 376 * L3 cache is shared across all cores in a core complex. So, this will also 377 * tell us how many cores are sharing the L3 cache. 378 */ 379 static int cores_in_core_complex(int nr_cores) 380 { 381 int nodes; 382 383 /* Check if we can fit all the cores in one core complex */ 384 if (nr_cores <= MAX_CORES_IN_CCX) { 385 return nr_cores; 386 } 387 /* Get the number of nodes required to build this config */ 388 nodes = nodes_in_socket(nr_cores); 389 390 /* 391 * Divide the cores accros all the core complexes 392 * Return rounded up value 393 */ 394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX); 395 } 396 397 /* Encode cache info for CPUID[8000001D] */ 398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs, 399 uint32_t *eax, uint32_t *ebx, 400 uint32_t *ecx, uint32_t *edx) 401 { 402 uint32_t l3_cores; 403 assert(cache->size == cache->line_size * cache->associativity * 404 cache->partitions * cache->sets); 405 406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 408 409 /* L3 is shared among multiple cores */ 410 if (cache->level == 3) { 411 l3_cores = cores_in_core_complex(cs->nr_cores); 412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14; 413 } else { 414 *eax |= ((cs->nr_threads - 1) << 14); 415 } 416 417 assert(cache->line_size > 0); 418 assert(cache->partitions > 0); 419 assert(cache->associativity > 0); 420 /* We don't implement fully-associative caches */ 421 assert(cache->associativity < cache->sets); 422 *ebx = (cache->line_size - 1) | 423 ((cache->partitions - 1) << 12) | 424 ((cache->associativity - 1) << 22); 425 426 assert(cache->sets > 0); 427 *ecx = cache->sets - 1; 428 429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 430 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 432 } 433 434 /* Data structure to hold the configuration info for a given core index */ 435 struct core_topology { 436 /* core complex id of the current core index */ 437 int ccx_id; 438 /* 439 * Adjusted core index for this core in the topology 440 * This can be 0,1,2,3 with max 4 cores in a core complex 441 */ 442 int core_id; 443 /* Node id for this core index */ 444 int node_id; 445 /* Number of nodes in this config */ 446 int num_nodes; 447 }; 448 449 /* 450 * Build the configuration closely match the EPYC hardware. Using the EPYC 451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE) 452 * right now. This could change in future. 453 * nr_cores : Total number of cores in the config 454 * core_id : Core index of the current CPU 455 * topo : Data structure to hold all the config info for this core index 456 */ 457 static void build_core_topology(int nr_cores, int core_id, 458 struct core_topology *topo) 459 { 460 int nodes, cores_in_ccx; 461 462 /* First get the number of nodes required */ 463 nodes = nodes_in_socket(nr_cores); 464 465 cores_in_ccx = cores_in_core_complex(nr_cores); 466 467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX); 468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx; 469 topo->core_id = core_id % cores_in_ccx; 470 topo->num_nodes = nodes; 471 } 472 473 /* Encode cache info for CPUID[8000001E] */ 474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu, 475 uint32_t *eax, uint32_t *ebx, 476 uint32_t *ecx, uint32_t *edx) 477 { 478 struct core_topology topo = {0}; 479 unsigned long nodes; 480 int shift; 481 482 build_core_topology(cs->nr_cores, cpu->core_id, &topo); 483 *eax = cpu->apic_id; 484 /* 485 * CPUID_Fn8000001E_EBX 486 * 31:16 Reserved 487 * 15:8 Threads per core (The number of threads per core is 488 * Threads per core + 1) 489 * 7:0 Core id (see bit decoding below) 490 * SMT: 491 * 4:3 node id 492 * 2 Core complex id 493 * 1:0 Core id 494 * Non SMT: 495 * 5:4 node id 496 * 3 Core complex id 497 * 1:0 Core id 498 */ 499 if (cs->nr_threads - 1) { 500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) | 501 (topo.ccx_id << 2) | topo.core_id; 502 } else { 503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id; 504 } 505 /* 506 * CPUID_Fn8000001E_ECX 507 * 31:11 Reserved 508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) 509 * 7:0 Node id (see bit decoding below) 510 * 2 Socket id 511 * 1:0 Node id 512 */ 513 if (topo.num_nodes <= 4) { 514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) | 515 topo.node_id; 516 } else { 517 /* 518 * Node id fix up. Actual hardware supports up to 4 nodes. But with 519 * more than 32 cores, we may end up with more than 4 nodes. 520 * Node id is a combination of socket id and node id. Only requirement 521 * here is that this number should be unique accross the system. 522 * Shift the socket id to accommodate more nodes. We dont expect both 523 * socket id and node id to be big number at the same time. This is not 524 * an ideal config but we need to to support it. Max nodes we can have 525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need 526 * 5 bits for nodes. Find the left most set bit to represent the total 527 * number of nodes. find_last_bit returns last set bit(0 based). Left 528 * shift(+1) the socket id to represent all the nodes. 529 */ 530 nodes = topo.num_nodes - 1; 531 shift = find_last_bit(&nodes, 8); 532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) | 533 topo.node_id; 534 } 535 *edx = 0; 536 } 537 538 /* 539 * Definitions of the hardcoded cache entries we expose: 540 * These are legacy cache values. If there is a need to change any 541 * of these values please use builtin_x86_defs 542 */ 543 544 /* L1 data cache: */ 545 static CPUCacheInfo legacy_l1d_cache = { 546 .type = DATA_CACHE, 547 .level = 1, 548 .size = 32 * KiB, 549 .self_init = 1, 550 .line_size = 64, 551 .associativity = 8, 552 .sets = 64, 553 .partitions = 1, 554 .no_invd_sharing = true, 555 }; 556 557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 558 static CPUCacheInfo legacy_l1d_cache_amd = { 559 .type = DATA_CACHE, 560 .level = 1, 561 .size = 64 * KiB, 562 .self_init = 1, 563 .line_size = 64, 564 .associativity = 2, 565 .sets = 512, 566 .partitions = 1, 567 .lines_per_tag = 1, 568 .no_invd_sharing = true, 569 }; 570 571 /* L1 instruction cache: */ 572 static CPUCacheInfo legacy_l1i_cache = { 573 .type = INSTRUCTION_CACHE, 574 .level = 1, 575 .size = 32 * KiB, 576 .self_init = 1, 577 .line_size = 64, 578 .associativity = 8, 579 .sets = 64, 580 .partitions = 1, 581 .no_invd_sharing = true, 582 }; 583 584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 585 static CPUCacheInfo legacy_l1i_cache_amd = { 586 .type = INSTRUCTION_CACHE, 587 .level = 1, 588 .size = 64 * KiB, 589 .self_init = 1, 590 .line_size = 64, 591 .associativity = 2, 592 .sets = 512, 593 .partitions = 1, 594 .lines_per_tag = 1, 595 .no_invd_sharing = true, 596 }; 597 598 /* Level 2 unified cache: */ 599 static CPUCacheInfo legacy_l2_cache = { 600 .type = UNIFIED_CACHE, 601 .level = 2, 602 .size = 4 * MiB, 603 .self_init = 1, 604 .line_size = 64, 605 .associativity = 16, 606 .sets = 4096, 607 .partitions = 1, 608 .no_invd_sharing = true, 609 }; 610 611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 612 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 613 .type = UNIFIED_CACHE, 614 .level = 2, 615 .size = 2 * MiB, 616 .line_size = 64, 617 .associativity = 8, 618 }; 619 620 621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 622 static CPUCacheInfo legacy_l2_cache_amd = { 623 .type = UNIFIED_CACHE, 624 .level = 2, 625 .size = 512 * KiB, 626 .line_size = 64, 627 .lines_per_tag = 1, 628 .associativity = 16, 629 .sets = 512, 630 .partitions = 1, 631 }; 632 633 /* Level 3 unified cache: */ 634 static CPUCacheInfo legacy_l3_cache = { 635 .type = UNIFIED_CACHE, 636 .level = 3, 637 .size = 16 * MiB, 638 .line_size = 64, 639 .associativity = 16, 640 .sets = 16384, 641 .partitions = 1, 642 .lines_per_tag = 1, 643 .self_init = true, 644 .inclusive = true, 645 .complex_indexing = true, 646 }; 647 648 /* TLB definitions: */ 649 650 #define L1_DTLB_2M_ASSOC 1 651 #define L1_DTLB_2M_ENTRIES 255 652 #define L1_DTLB_4K_ASSOC 1 653 #define L1_DTLB_4K_ENTRIES 255 654 655 #define L1_ITLB_2M_ASSOC 1 656 #define L1_ITLB_2M_ENTRIES 255 657 #define L1_ITLB_4K_ASSOC 1 658 #define L1_ITLB_4K_ENTRIES 255 659 660 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 662 #define L2_DTLB_4K_ASSOC 4 663 #define L2_DTLB_4K_ENTRIES 512 664 665 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 667 #define L2_ITLB_4K_ASSOC 4 668 #define L2_ITLB_4K_ENTRIES 512 669 670 /* CPUID Leaf 0x14 constants: */ 671 #define INTEL_PT_MAX_SUBLEAF 0x1 672 /* 673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 674 * MSR can be accessed; 675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 677 * of Intel PT MSRs across warm reset; 678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 679 */ 680 #define INTEL_PT_MINIMAL_EBX 0xf 681 /* 682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 684 * accessed; 685 * bit[01]: ToPA tables can hold any number of output entries, up to the 686 * maximum allowed by the MaskOrTableOffset field of 687 * IA32_RTIT_OUTPUT_MASK_PTRS; 688 * bit[02]: Support Single-Range Output scheme; 689 */ 690 #define INTEL_PT_MINIMAL_ECX 0x7 691 /* generated packets which contain IP payloads have LIP values */ 692 #define INTEL_PT_IP_LIP (1 << 31) 693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 698 699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 700 uint32_t vendor2, uint32_t vendor3) 701 { 702 int i; 703 for (i = 0; i < 4; i++) { 704 dst[i] = vendor1 >> (8 * i); 705 dst[i + 4] = vendor2 >> (8 * i); 706 dst[i + 8] = vendor3 >> (8 * i); 707 } 708 dst[CPUID_VENDOR_SZ] = '\0'; 709 } 710 711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 716 CPUID_PSE36 | CPUID_FXSR) 717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 721 CPUID_PAE | CPUID_SEP | CPUID_APIC) 722 723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 728 /* partly implemented: 729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 730 /* missing: 731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 737 CPUID_EXT_RDRAND) 738 /* missing: 739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 743 CPUID_EXT_F16C */ 744 745 #ifdef TARGET_X86_64 746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 747 #else 748 #define TCG_EXT2_X86_64_FEATURES 0 749 #endif 750 751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 754 TCG_EXT2_X86_64_FEATURES) 755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 757 #define TCG_EXT4_FEATURES 0 758 #define TCG_SVM_FEATURES CPUID_SVM_NPT 759 #define TCG_KVM_FEATURES 0 760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 764 CPUID_7_0_EBX_ERMS) 765 /* missing: 766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 768 CPUID_7_0_EBX_RDSEED */ 769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 771 CPUID_7_0_ECX_LA57) 772 #define TCG_7_0_EDX_FEATURES 0 773 #define TCG_APM_FEATURES 0 774 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 775 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 776 /* missing: 777 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 778 779 typedef enum FeatureWordType { 780 CPUID_FEATURE_WORD, 781 MSR_FEATURE_WORD, 782 } FeatureWordType; 783 784 typedef struct FeatureWordInfo { 785 FeatureWordType type; 786 /* feature flags names are taken from "Intel Processor Identification and 787 * the CPUID Instruction" and AMD's "CPUID Specification". 788 * In cases of disagreement between feature naming conventions, 789 * aliases may be added. 790 */ 791 const char *feat_names[32]; 792 union { 793 /* If type==CPUID_FEATURE_WORD */ 794 struct { 795 uint32_t eax; /* Input EAX for CPUID */ 796 bool needs_ecx; /* CPUID instruction uses ECX as input */ 797 uint32_t ecx; /* Input ECX value for CPUID */ 798 int reg; /* output register (R_* constant) */ 799 } cpuid; 800 /* If type==MSR_FEATURE_WORD */ 801 struct { 802 uint32_t index; 803 struct { /*CPUID that enumerate this MSR*/ 804 FeatureWord cpuid_class; 805 uint32_t cpuid_flag; 806 } cpuid_dep; 807 } msr; 808 }; 809 uint32_t tcg_features; /* Feature flags supported by TCG */ 810 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */ 811 uint32_t migratable_flags; /* Feature flags known to be migratable */ 812 /* Features that shouldn't be auto-enabled by "-cpu host" */ 813 uint32_t no_autoenable_flags; 814 } FeatureWordInfo; 815 816 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 817 [FEAT_1_EDX] = { 818 .type = CPUID_FEATURE_WORD, 819 .feat_names = { 820 "fpu", "vme", "de", "pse", 821 "tsc", "msr", "pae", "mce", 822 "cx8", "apic", NULL, "sep", 823 "mtrr", "pge", "mca", "cmov", 824 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 825 NULL, "ds" /* Intel dts */, "acpi", "mmx", 826 "fxsr", "sse", "sse2", "ss", 827 "ht" /* Intel htt */, "tm", "ia64", "pbe", 828 }, 829 .cpuid = {.eax = 1, .reg = R_EDX, }, 830 .tcg_features = TCG_FEATURES, 831 }, 832 [FEAT_1_ECX] = { 833 .type = CPUID_FEATURE_WORD, 834 .feat_names = { 835 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 836 "ds-cpl", "vmx", "smx", "est", 837 "tm2", "ssse3", "cid", NULL, 838 "fma", "cx16", "xtpr", "pdcm", 839 NULL, "pcid", "dca", "sse4.1", 840 "sse4.2", "x2apic", "movbe", "popcnt", 841 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 842 "avx", "f16c", "rdrand", "hypervisor", 843 }, 844 .cpuid = { .eax = 1, .reg = R_ECX, }, 845 .tcg_features = TCG_EXT_FEATURES, 846 }, 847 /* Feature names that are already defined on feature_name[] but 848 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 849 * names on feat_names below. They are copied automatically 850 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 851 */ 852 [FEAT_8000_0001_EDX] = { 853 .type = CPUID_FEATURE_WORD, 854 .feat_names = { 855 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 856 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 857 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 858 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 859 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 860 "nx", NULL, "mmxext", NULL /* mmx */, 861 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 862 NULL, "lm", "3dnowext", "3dnow", 863 }, 864 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 865 .tcg_features = TCG_EXT2_FEATURES, 866 }, 867 [FEAT_8000_0001_ECX] = { 868 .type = CPUID_FEATURE_WORD, 869 .feat_names = { 870 "lahf-lm", "cmp-legacy", "svm", "extapic", 871 "cr8legacy", "abm", "sse4a", "misalignsse", 872 "3dnowprefetch", "osvw", "ibs", "xop", 873 "skinit", "wdt", NULL, "lwp", 874 "fma4", "tce", NULL, "nodeid-msr", 875 NULL, "tbm", "topoext", "perfctr-core", 876 "perfctr-nb", NULL, NULL, NULL, 877 NULL, NULL, NULL, NULL, 878 }, 879 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 880 .tcg_features = TCG_EXT3_FEATURES, 881 /* 882 * TOPOEXT is always allowed but can't be enabled blindly by 883 * "-cpu host", as it requires consistent cache topology info 884 * to be provided so it doesn't confuse guests. 885 */ 886 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 887 }, 888 [FEAT_C000_0001_EDX] = { 889 .type = CPUID_FEATURE_WORD, 890 .feat_names = { 891 NULL, NULL, "xstore", "xstore-en", 892 NULL, NULL, "xcrypt", "xcrypt-en", 893 "ace2", "ace2-en", "phe", "phe-en", 894 "pmm", "pmm-en", NULL, NULL, 895 NULL, NULL, NULL, NULL, 896 NULL, NULL, NULL, NULL, 897 NULL, NULL, NULL, NULL, 898 NULL, NULL, NULL, NULL, 899 }, 900 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 901 .tcg_features = TCG_EXT4_FEATURES, 902 }, 903 [FEAT_KVM] = { 904 .type = CPUID_FEATURE_WORD, 905 .feat_names = { 906 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 907 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 908 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 909 NULL, NULL, NULL, NULL, 910 NULL, NULL, NULL, NULL, 911 NULL, NULL, NULL, NULL, 912 "kvmclock-stable-bit", NULL, NULL, NULL, 913 NULL, NULL, NULL, NULL, 914 }, 915 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 916 .tcg_features = TCG_KVM_FEATURES, 917 }, 918 [FEAT_KVM_HINTS] = { 919 .type = CPUID_FEATURE_WORD, 920 .feat_names = { 921 "kvm-hint-dedicated", NULL, NULL, NULL, 922 NULL, NULL, NULL, NULL, 923 NULL, NULL, NULL, NULL, 924 NULL, NULL, NULL, NULL, 925 NULL, NULL, NULL, NULL, 926 NULL, NULL, NULL, NULL, 927 NULL, NULL, NULL, NULL, 928 NULL, NULL, NULL, NULL, 929 }, 930 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 931 .tcg_features = TCG_KVM_FEATURES, 932 /* 933 * KVM hints aren't auto-enabled by -cpu host, they need to be 934 * explicitly enabled in the command-line. 935 */ 936 .no_autoenable_flags = ~0U, 937 }, 938 /* 939 * .feat_names are commented out for Hyper-V enlightenments because we 940 * don't want to have two different ways for enabling them on QEMU command 941 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 942 * enabling several feature bits simultaneously, exposing these bits 943 * individually may just confuse guests. 944 */ 945 [FEAT_HYPERV_EAX] = { 946 .type = CPUID_FEATURE_WORD, 947 .feat_names = { 948 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 949 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 950 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 951 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 952 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 953 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 954 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 955 NULL, NULL, 956 NULL, NULL, NULL, NULL, 957 NULL, NULL, NULL, NULL, 958 NULL, NULL, NULL, NULL, 959 NULL, NULL, NULL, NULL, 960 }, 961 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 962 }, 963 [FEAT_HYPERV_EBX] = { 964 .type = CPUID_FEATURE_WORD, 965 .feat_names = { 966 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 967 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 968 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 969 NULL /* hv_create_port */, NULL /* hv_connect_port */, 970 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 971 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 972 NULL, NULL, 973 NULL, NULL, NULL, NULL, 974 NULL, NULL, NULL, NULL, 975 NULL, NULL, NULL, NULL, 976 NULL, NULL, NULL, NULL, 977 }, 978 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 979 }, 980 [FEAT_HYPERV_EDX] = { 981 .type = CPUID_FEATURE_WORD, 982 .feat_names = { 983 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 984 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 985 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 986 NULL, NULL, 987 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 988 NULL, NULL, NULL, NULL, 989 NULL, NULL, NULL, NULL, 990 NULL, NULL, NULL, NULL, 991 NULL, NULL, NULL, NULL, 992 NULL, NULL, NULL, NULL, 993 }, 994 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 995 }, 996 [FEAT_HV_RECOMM_EAX] = { 997 .type = CPUID_FEATURE_WORD, 998 .feat_names = { 999 NULL /* hv_recommend_pv_as_switch */, 1000 NULL /* hv_recommend_pv_tlbflush_local */, 1001 NULL /* hv_recommend_pv_tlbflush_remote */, 1002 NULL /* hv_recommend_msr_apic_access */, 1003 NULL /* hv_recommend_msr_reset */, 1004 NULL /* hv_recommend_relaxed_timing */, 1005 NULL /* hv_recommend_dma_remapping */, 1006 NULL /* hv_recommend_int_remapping */, 1007 NULL /* hv_recommend_x2apic_msrs */, 1008 NULL /* hv_recommend_autoeoi_deprecation */, 1009 NULL /* hv_recommend_pv_ipi */, 1010 NULL /* hv_recommend_ex_hypercalls */, 1011 NULL /* hv_hypervisor_is_nested */, 1012 NULL /* hv_recommend_int_mbec */, 1013 NULL /* hv_recommend_evmcs */, 1014 NULL, 1015 NULL, NULL, NULL, NULL, 1016 NULL, NULL, NULL, NULL, 1017 NULL, NULL, NULL, NULL, 1018 NULL, NULL, NULL, NULL, 1019 }, 1020 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 1021 }, 1022 [FEAT_HV_NESTED_EAX] = { 1023 .type = CPUID_FEATURE_WORD, 1024 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 1025 }, 1026 [FEAT_SVM] = { 1027 .type = CPUID_FEATURE_WORD, 1028 .feat_names = { 1029 "npt", "lbrv", "svm-lock", "nrip-save", 1030 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 1031 NULL, NULL, "pause-filter", NULL, 1032 "pfthreshold", NULL, NULL, NULL, 1033 NULL, NULL, NULL, NULL, 1034 NULL, NULL, NULL, NULL, 1035 NULL, NULL, NULL, NULL, 1036 NULL, NULL, NULL, NULL, 1037 }, 1038 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 1039 .tcg_features = TCG_SVM_FEATURES, 1040 }, 1041 [FEAT_7_0_EBX] = { 1042 .type = CPUID_FEATURE_WORD, 1043 .feat_names = { 1044 "fsgsbase", "tsc-adjust", NULL, "bmi1", 1045 "hle", "avx2", NULL, "smep", 1046 "bmi2", "erms", "invpcid", "rtm", 1047 NULL, NULL, "mpx", NULL, 1048 "avx512f", "avx512dq", "rdseed", "adx", 1049 "smap", "avx512ifma", "pcommit", "clflushopt", 1050 "clwb", "intel-pt", "avx512pf", "avx512er", 1051 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 1052 }, 1053 .cpuid = { 1054 .eax = 7, 1055 .needs_ecx = true, .ecx = 0, 1056 .reg = R_EBX, 1057 }, 1058 .tcg_features = TCG_7_0_EBX_FEATURES, 1059 }, 1060 [FEAT_7_0_ECX] = { 1061 .type = CPUID_FEATURE_WORD, 1062 .feat_names = { 1063 NULL, "avx512vbmi", "umip", "pku", 1064 NULL /* ospke */, NULL, "avx512vbmi2", NULL, 1065 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 1066 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 1067 "la57", NULL, NULL, NULL, 1068 NULL, NULL, "rdpid", NULL, 1069 NULL, "cldemote", NULL, "movdiri", 1070 "movdir64b", NULL, NULL, NULL, 1071 }, 1072 .cpuid = { 1073 .eax = 7, 1074 .needs_ecx = true, .ecx = 0, 1075 .reg = R_ECX, 1076 }, 1077 .tcg_features = TCG_7_0_ECX_FEATURES, 1078 }, 1079 [FEAT_7_0_EDX] = { 1080 .type = CPUID_FEATURE_WORD, 1081 .feat_names = { 1082 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 1083 NULL, NULL, NULL, NULL, 1084 NULL, NULL, "md-clear", NULL, 1085 NULL, NULL, NULL, NULL, 1086 NULL, NULL, NULL, NULL, 1087 NULL, NULL, NULL, NULL, 1088 NULL, NULL, "spec-ctrl", "stibp", 1089 NULL, "arch-capabilities", "core-capability", "ssbd", 1090 }, 1091 .cpuid = { 1092 .eax = 7, 1093 .needs_ecx = true, .ecx = 0, 1094 .reg = R_EDX, 1095 }, 1096 .tcg_features = TCG_7_0_EDX_FEATURES, 1097 }, 1098 [FEAT_8000_0007_EDX] = { 1099 .type = CPUID_FEATURE_WORD, 1100 .feat_names = { 1101 NULL, NULL, NULL, NULL, 1102 NULL, NULL, NULL, NULL, 1103 "invtsc", NULL, NULL, NULL, 1104 NULL, NULL, NULL, NULL, 1105 NULL, NULL, NULL, NULL, 1106 NULL, NULL, NULL, NULL, 1107 NULL, NULL, NULL, NULL, 1108 NULL, NULL, NULL, NULL, 1109 }, 1110 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1111 .tcg_features = TCG_APM_FEATURES, 1112 .unmigratable_flags = CPUID_APM_INVTSC, 1113 }, 1114 [FEAT_8000_0008_EBX] = { 1115 .type = CPUID_FEATURE_WORD, 1116 .feat_names = { 1117 NULL, NULL, NULL, NULL, 1118 NULL, NULL, NULL, NULL, 1119 NULL, "wbnoinvd", NULL, NULL, 1120 "ibpb", NULL, NULL, NULL, 1121 NULL, NULL, NULL, NULL, 1122 NULL, NULL, NULL, NULL, 1123 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1124 NULL, NULL, NULL, NULL, 1125 }, 1126 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1127 .tcg_features = 0, 1128 .unmigratable_flags = 0, 1129 }, 1130 [FEAT_XSAVE] = { 1131 .type = CPUID_FEATURE_WORD, 1132 .feat_names = { 1133 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1134 NULL, NULL, NULL, NULL, 1135 NULL, NULL, NULL, NULL, 1136 NULL, NULL, NULL, NULL, 1137 NULL, NULL, NULL, NULL, 1138 NULL, NULL, NULL, NULL, 1139 NULL, NULL, NULL, NULL, 1140 NULL, NULL, NULL, NULL, 1141 }, 1142 .cpuid = { 1143 .eax = 0xd, 1144 .needs_ecx = true, .ecx = 1, 1145 .reg = R_EAX, 1146 }, 1147 .tcg_features = TCG_XSAVE_FEATURES, 1148 }, 1149 [FEAT_6_EAX] = { 1150 .type = CPUID_FEATURE_WORD, 1151 .feat_names = { 1152 NULL, NULL, "arat", NULL, 1153 NULL, NULL, NULL, NULL, 1154 NULL, NULL, NULL, NULL, 1155 NULL, NULL, NULL, NULL, 1156 NULL, NULL, NULL, NULL, 1157 NULL, NULL, NULL, NULL, 1158 NULL, NULL, NULL, NULL, 1159 NULL, NULL, NULL, NULL, 1160 }, 1161 .cpuid = { .eax = 6, .reg = R_EAX, }, 1162 .tcg_features = TCG_6_EAX_FEATURES, 1163 }, 1164 [FEAT_XSAVE_COMP_LO] = { 1165 .type = CPUID_FEATURE_WORD, 1166 .cpuid = { 1167 .eax = 0xD, 1168 .needs_ecx = true, .ecx = 0, 1169 .reg = R_EAX, 1170 }, 1171 .tcg_features = ~0U, 1172 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1173 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1174 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1175 XSTATE_PKRU_MASK, 1176 }, 1177 [FEAT_XSAVE_COMP_HI] = { 1178 .type = CPUID_FEATURE_WORD, 1179 .cpuid = { 1180 .eax = 0xD, 1181 .needs_ecx = true, .ecx = 0, 1182 .reg = R_EDX, 1183 }, 1184 .tcg_features = ~0U, 1185 }, 1186 /*Below are MSR exposed features*/ 1187 [FEAT_ARCH_CAPABILITIES] = { 1188 .type = MSR_FEATURE_WORD, 1189 .feat_names = { 1190 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1191 "ssb-no", "mds-no", NULL, NULL, 1192 NULL, NULL, NULL, NULL, 1193 NULL, NULL, NULL, NULL, 1194 NULL, NULL, NULL, NULL, 1195 NULL, NULL, NULL, NULL, 1196 NULL, NULL, NULL, NULL, 1197 NULL, NULL, NULL, NULL, 1198 }, 1199 .msr = { 1200 .index = MSR_IA32_ARCH_CAPABILITIES, 1201 .cpuid_dep = { 1202 FEAT_7_0_EDX, 1203 CPUID_7_0_EDX_ARCH_CAPABILITIES 1204 } 1205 }, 1206 }, 1207 [FEAT_CORE_CAPABILITY] = { 1208 .type = MSR_FEATURE_WORD, 1209 .feat_names = { 1210 NULL, NULL, NULL, NULL, 1211 NULL, "split-lock-detect", NULL, NULL, 1212 NULL, NULL, NULL, NULL, 1213 NULL, NULL, NULL, NULL, 1214 NULL, NULL, NULL, NULL, 1215 NULL, NULL, NULL, NULL, 1216 NULL, NULL, NULL, NULL, 1217 NULL, NULL, NULL, NULL, 1218 }, 1219 .msr = { 1220 .index = MSR_IA32_CORE_CAPABILITY, 1221 .cpuid_dep = { 1222 FEAT_7_0_EDX, 1223 CPUID_7_0_EDX_CORE_CAPABILITY, 1224 }, 1225 }, 1226 }, 1227 }; 1228 1229 typedef struct X86RegisterInfo32 { 1230 /* Name of register */ 1231 const char *name; 1232 /* QAPI enum value register */ 1233 X86CPURegister32 qapi_enum; 1234 } X86RegisterInfo32; 1235 1236 #define REGISTER(reg) \ 1237 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1238 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1239 REGISTER(EAX), 1240 REGISTER(ECX), 1241 REGISTER(EDX), 1242 REGISTER(EBX), 1243 REGISTER(ESP), 1244 REGISTER(EBP), 1245 REGISTER(ESI), 1246 REGISTER(EDI), 1247 }; 1248 #undef REGISTER 1249 1250 typedef struct ExtSaveArea { 1251 uint32_t feature, bits; 1252 uint32_t offset, size; 1253 } ExtSaveArea; 1254 1255 static const ExtSaveArea x86_ext_save_areas[] = { 1256 [XSTATE_FP_BIT] = { 1257 /* x87 FP state component is always enabled if XSAVE is supported */ 1258 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1259 /* x87 state is in the legacy region of the XSAVE area */ 1260 .offset = 0, 1261 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1262 }, 1263 [XSTATE_SSE_BIT] = { 1264 /* SSE state component is always enabled if XSAVE is supported */ 1265 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1266 /* SSE state is in the legacy region of the XSAVE area */ 1267 .offset = 0, 1268 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1269 }, 1270 [XSTATE_YMM_BIT] = 1271 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1272 .offset = offsetof(X86XSaveArea, avx_state), 1273 .size = sizeof(XSaveAVX) }, 1274 [XSTATE_BNDREGS_BIT] = 1275 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1276 .offset = offsetof(X86XSaveArea, bndreg_state), 1277 .size = sizeof(XSaveBNDREG) }, 1278 [XSTATE_BNDCSR_BIT] = 1279 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1280 .offset = offsetof(X86XSaveArea, bndcsr_state), 1281 .size = sizeof(XSaveBNDCSR) }, 1282 [XSTATE_OPMASK_BIT] = 1283 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1284 .offset = offsetof(X86XSaveArea, opmask_state), 1285 .size = sizeof(XSaveOpmask) }, 1286 [XSTATE_ZMM_Hi256_BIT] = 1287 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1288 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1289 .size = sizeof(XSaveZMM_Hi256) }, 1290 [XSTATE_Hi16_ZMM_BIT] = 1291 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1292 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1293 .size = sizeof(XSaveHi16_ZMM) }, 1294 [XSTATE_PKRU_BIT] = 1295 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1296 .offset = offsetof(X86XSaveArea, pkru_state), 1297 .size = sizeof(XSavePKRU) }, 1298 }; 1299 1300 static uint32_t xsave_area_size(uint64_t mask) 1301 { 1302 int i; 1303 uint64_t ret = 0; 1304 1305 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1306 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1307 if ((mask >> i) & 1) { 1308 ret = MAX(ret, esa->offset + esa->size); 1309 } 1310 } 1311 return ret; 1312 } 1313 1314 static inline bool accel_uses_host_cpuid(void) 1315 { 1316 return kvm_enabled() || hvf_enabled(); 1317 } 1318 1319 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1320 { 1321 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1322 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1323 } 1324 1325 const char *get_register_name_32(unsigned int reg) 1326 { 1327 if (reg >= CPU_NB_REGS32) { 1328 return NULL; 1329 } 1330 return x86_reg_info_32[reg].name; 1331 } 1332 1333 /* 1334 * Returns the set of feature flags that are supported and migratable by 1335 * QEMU, for a given FeatureWord. 1336 */ 1337 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w) 1338 { 1339 FeatureWordInfo *wi = &feature_word_info[w]; 1340 uint32_t r = 0; 1341 int i; 1342 1343 for (i = 0; i < 32; i++) { 1344 uint32_t f = 1U << i; 1345 1346 /* If the feature name is known, it is implicitly considered migratable, 1347 * unless it is explicitly set in unmigratable_flags */ 1348 if ((wi->migratable_flags & f) || 1349 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1350 r |= f; 1351 } 1352 } 1353 return r; 1354 } 1355 1356 void host_cpuid(uint32_t function, uint32_t count, 1357 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1358 { 1359 uint32_t vec[4]; 1360 1361 #ifdef __x86_64__ 1362 asm volatile("cpuid" 1363 : "=a"(vec[0]), "=b"(vec[1]), 1364 "=c"(vec[2]), "=d"(vec[3]) 1365 : "0"(function), "c"(count) : "cc"); 1366 #elif defined(__i386__) 1367 asm volatile("pusha \n\t" 1368 "cpuid \n\t" 1369 "mov %%eax, 0(%2) \n\t" 1370 "mov %%ebx, 4(%2) \n\t" 1371 "mov %%ecx, 8(%2) \n\t" 1372 "mov %%edx, 12(%2) \n\t" 1373 "popa" 1374 : : "a"(function), "c"(count), "S"(vec) 1375 : "memory", "cc"); 1376 #else 1377 abort(); 1378 #endif 1379 1380 if (eax) 1381 *eax = vec[0]; 1382 if (ebx) 1383 *ebx = vec[1]; 1384 if (ecx) 1385 *ecx = vec[2]; 1386 if (edx) 1387 *edx = vec[3]; 1388 } 1389 1390 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1391 { 1392 uint32_t eax, ebx, ecx, edx; 1393 1394 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1395 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1396 1397 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1398 if (family) { 1399 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1400 } 1401 if (model) { 1402 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1403 } 1404 if (stepping) { 1405 *stepping = eax & 0x0F; 1406 } 1407 } 1408 1409 /* CPU class name definitions: */ 1410 1411 /* Return type name for a given CPU model name 1412 * Caller is responsible for freeing the returned string. 1413 */ 1414 static char *x86_cpu_type_name(const char *model_name) 1415 { 1416 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1417 } 1418 1419 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1420 { 1421 ObjectClass *oc; 1422 char *typename = x86_cpu_type_name(cpu_model); 1423 oc = object_class_by_name(typename); 1424 g_free(typename); 1425 return oc; 1426 } 1427 1428 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1429 { 1430 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1431 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1432 return g_strndup(class_name, 1433 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1434 } 1435 1436 typedef struct PropValue { 1437 const char *prop, *value; 1438 } PropValue; 1439 1440 typedef struct X86CPUVersionDefinition { 1441 X86CPUVersion version; 1442 const char *alias; 1443 PropValue *props; 1444 } X86CPUVersionDefinition; 1445 1446 /* Base definition for a CPU model */ 1447 typedef struct X86CPUDefinition { 1448 const char *name; 1449 uint32_t level; 1450 uint32_t xlevel; 1451 /* vendor is zero-terminated, 12 character ASCII string */ 1452 char vendor[CPUID_VENDOR_SZ + 1]; 1453 int family; 1454 int model; 1455 int stepping; 1456 FeatureWordArray features; 1457 const char *model_id; 1458 CPUCaches *cache_info; 1459 /* 1460 * Definitions for alternative versions of CPU model. 1461 * List is terminated by item with version == 0. 1462 * If NULL, version 1 will be registered automatically. 1463 */ 1464 const X86CPUVersionDefinition *versions; 1465 } X86CPUDefinition; 1466 1467 /* Reference to a specific CPU model version */ 1468 struct X86CPUModel { 1469 /* Base CPU definition */ 1470 X86CPUDefinition *cpudef; 1471 /* CPU model version */ 1472 X86CPUVersion version; 1473 /* 1474 * If true, this is an alias CPU model. 1475 * This matters only for "-cpu help" and query-cpu-definitions 1476 */ 1477 bool is_alias; 1478 }; 1479 1480 /* Get full model name for CPU version */ 1481 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, 1482 X86CPUVersion version) 1483 { 1484 assert(version > 0); 1485 return g_strdup_printf("%s-v%d", cpudef->name, (int)version); 1486 } 1487 1488 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) 1489 { 1490 /* When X86CPUDefinition::versions is NULL, we register only v1 */ 1491 static const X86CPUVersionDefinition default_version_list[] = { 1492 { 1 }, 1493 { /* end of list */ } 1494 }; 1495 1496 return def->versions ?: default_version_list; 1497 } 1498 1499 static CPUCaches epyc_cache_info = { 1500 .l1d_cache = &(CPUCacheInfo) { 1501 .type = DATA_CACHE, 1502 .level = 1, 1503 .size = 32 * KiB, 1504 .line_size = 64, 1505 .associativity = 8, 1506 .partitions = 1, 1507 .sets = 64, 1508 .lines_per_tag = 1, 1509 .self_init = 1, 1510 .no_invd_sharing = true, 1511 }, 1512 .l1i_cache = &(CPUCacheInfo) { 1513 .type = INSTRUCTION_CACHE, 1514 .level = 1, 1515 .size = 64 * KiB, 1516 .line_size = 64, 1517 .associativity = 4, 1518 .partitions = 1, 1519 .sets = 256, 1520 .lines_per_tag = 1, 1521 .self_init = 1, 1522 .no_invd_sharing = true, 1523 }, 1524 .l2_cache = &(CPUCacheInfo) { 1525 .type = UNIFIED_CACHE, 1526 .level = 2, 1527 .size = 512 * KiB, 1528 .line_size = 64, 1529 .associativity = 8, 1530 .partitions = 1, 1531 .sets = 1024, 1532 .lines_per_tag = 1, 1533 }, 1534 .l3_cache = &(CPUCacheInfo) { 1535 .type = UNIFIED_CACHE, 1536 .level = 3, 1537 .size = 8 * MiB, 1538 .line_size = 64, 1539 .associativity = 16, 1540 .partitions = 1, 1541 .sets = 8192, 1542 .lines_per_tag = 1, 1543 .self_init = true, 1544 .inclusive = true, 1545 .complex_indexing = true, 1546 }, 1547 }; 1548 1549 static X86CPUDefinition builtin_x86_defs[] = { 1550 { 1551 .name = "qemu64", 1552 .level = 0xd, 1553 .vendor = CPUID_VENDOR_AMD, 1554 .family = 6, 1555 .model = 6, 1556 .stepping = 3, 1557 .features[FEAT_1_EDX] = 1558 PPRO_FEATURES | 1559 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1560 CPUID_PSE36, 1561 .features[FEAT_1_ECX] = 1562 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1563 .features[FEAT_8000_0001_EDX] = 1564 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1565 .features[FEAT_8000_0001_ECX] = 1566 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1567 .xlevel = 0x8000000A, 1568 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1569 }, 1570 { 1571 .name = "phenom", 1572 .level = 5, 1573 .vendor = CPUID_VENDOR_AMD, 1574 .family = 16, 1575 .model = 2, 1576 .stepping = 3, 1577 /* Missing: CPUID_HT */ 1578 .features[FEAT_1_EDX] = 1579 PPRO_FEATURES | 1580 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1581 CPUID_PSE36 | CPUID_VME, 1582 .features[FEAT_1_ECX] = 1583 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1584 CPUID_EXT_POPCNT, 1585 .features[FEAT_8000_0001_EDX] = 1586 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1587 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1588 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1589 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1590 CPUID_EXT3_CR8LEG, 1591 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1592 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1593 .features[FEAT_8000_0001_ECX] = 1594 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1595 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1596 /* Missing: CPUID_SVM_LBRV */ 1597 .features[FEAT_SVM] = 1598 CPUID_SVM_NPT, 1599 .xlevel = 0x8000001A, 1600 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1601 }, 1602 { 1603 .name = "core2duo", 1604 .level = 10, 1605 .vendor = CPUID_VENDOR_INTEL, 1606 .family = 6, 1607 .model = 15, 1608 .stepping = 11, 1609 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1610 .features[FEAT_1_EDX] = 1611 PPRO_FEATURES | 1612 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1613 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1614 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1615 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1616 .features[FEAT_1_ECX] = 1617 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1618 CPUID_EXT_CX16, 1619 .features[FEAT_8000_0001_EDX] = 1620 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1621 .features[FEAT_8000_0001_ECX] = 1622 CPUID_EXT3_LAHF_LM, 1623 .xlevel = 0x80000008, 1624 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1625 }, 1626 { 1627 .name = "kvm64", 1628 .level = 0xd, 1629 .vendor = CPUID_VENDOR_INTEL, 1630 .family = 15, 1631 .model = 6, 1632 .stepping = 1, 1633 /* Missing: CPUID_HT */ 1634 .features[FEAT_1_EDX] = 1635 PPRO_FEATURES | CPUID_VME | 1636 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1637 CPUID_PSE36, 1638 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1639 .features[FEAT_1_ECX] = 1640 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1641 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1642 .features[FEAT_8000_0001_EDX] = 1643 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1644 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1645 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1646 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1647 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1648 .features[FEAT_8000_0001_ECX] = 1649 0, 1650 .xlevel = 0x80000008, 1651 .model_id = "Common KVM processor" 1652 }, 1653 { 1654 .name = "qemu32", 1655 .level = 4, 1656 .vendor = CPUID_VENDOR_INTEL, 1657 .family = 6, 1658 .model = 6, 1659 .stepping = 3, 1660 .features[FEAT_1_EDX] = 1661 PPRO_FEATURES, 1662 .features[FEAT_1_ECX] = 1663 CPUID_EXT_SSE3, 1664 .xlevel = 0x80000004, 1665 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1666 }, 1667 { 1668 .name = "kvm32", 1669 .level = 5, 1670 .vendor = CPUID_VENDOR_INTEL, 1671 .family = 15, 1672 .model = 6, 1673 .stepping = 1, 1674 .features[FEAT_1_EDX] = 1675 PPRO_FEATURES | CPUID_VME | 1676 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1677 .features[FEAT_1_ECX] = 1678 CPUID_EXT_SSE3, 1679 .features[FEAT_8000_0001_ECX] = 1680 0, 1681 .xlevel = 0x80000008, 1682 .model_id = "Common 32-bit KVM processor" 1683 }, 1684 { 1685 .name = "coreduo", 1686 .level = 10, 1687 .vendor = CPUID_VENDOR_INTEL, 1688 .family = 6, 1689 .model = 14, 1690 .stepping = 8, 1691 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1692 .features[FEAT_1_EDX] = 1693 PPRO_FEATURES | CPUID_VME | 1694 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 1695 CPUID_SS, 1696 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 1697 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1698 .features[FEAT_1_ECX] = 1699 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 1700 .features[FEAT_8000_0001_EDX] = 1701 CPUID_EXT2_NX, 1702 .xlevel = 0x80000008, 1703 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 1704 }, 1705 { 1706 .name = "486", 1707 .level = 1, 1708 .vendor = CPUID_VENDOR_INTEL, 1709 .family = 4, 1710 .model = 8, 1711 .stepping = 0, 1712 .features[FEAT_1_EDX] = 1713 I486_FEATURES, 1714 .xlevel = 0, 1715 .model_id = "", 1716 }, 1717 { 1718 .name = "pentium", 1719 .level = 1, 1720 .vendor = CPUID_VENDOR_INTEL, 1721 .family = 5, 1722 .model = 4, 1723 .stepping = 3, 1724 .features[FEAT_1_EDX] = 1725 PENTIUM_FEATURES, 1726 .xlevel = 0, 1727 .model_id = "", 1728 }, 1729 { 1730 .name = "pentium2", 1731 .level = 2, 1732 .vendor = CPUID_VENDOR_INTEL, 1733 .family = 6, 1734 .model = 5, 1735 .stepping = 2, 1736 .features[FEAT_1_EDX] = 1737 PENTIUM2_FEATURES, 1738 .xlevel = 0, 1739 .model_id = "", 1740 }, 1741 { 1742 .name = "pentium3", 1743 .level = 3, 1744 .vendor = CPUID_VENDOR_INTEL, 1745 .family = 6, 1746 .model = 7, 1747 .stepping = 3, 1748 .features[FEAT_1_EDX] = 1749 PENTIUM3_FEATURES, 1750 .xlevel = 0, 1751 .model_id = "", 1752 }, 1753 { 1754 .name = "athlon", 1755 .level = 2, 1756 .vendor = CPUID_VENDOR_AMD, 1757 .family = 6, 1758 .model = 2, 1759 .stepping = 3, 1760 .features[FEAT_1_EDX] = 1761 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 1762 CPUID_MCA, 1763 .features[FEAT_8000_0001_EDX] = 1764 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 1765 .xlevel = 0x80000008, 1766 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1767 }, 1768 { 1769 .name = "n270", 1770 .level = 10, 1771 .vendor = CPUID_VENDOR_INTEL, 1772 .family = 6, 1773 .model = 28, 1774 .stepping = 2, 1775 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1776 .features[FEAT_1_EDX] = 1777 PPRO_FEATURES | 1778 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 1779 CPUID_ACPI | CPUID_SS, 1780 /* Some CPUs got no CPUID_SEP */ 1781 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 1782 * CPUID_EXT_XTPR */ 1783 .features[FEAT_1_ECX] = 1784 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1785 CPUID_EXT_MOVBE, 1786 .features[FEAT_8000_0001_EDX] = 1787 CPUID_EXT2_NX, 1788 .features[FEAT_8000_0001_ECX] = 1789 CPUID_EXT3_LAHF_LM, 1790 .xlevel = 0x80000008, 1791 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 1792 }, 1793 { 1794 .name = "Conroe", 1795 .level = 10, 1796 .vendor = CPUID_VENDOR_INTEL, 1797 .family = 6, 1798 .model = 15, 1799 .stepping = 3, 1800 .features[FEAT_1_EDX] = 1801 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1802 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1803 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1804 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1805 CPUID_DE | CPUID_FP87, 1806 .features[FEAT_1_ECX] = 1807 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1808 .features[FEAT_8000_0001_EDX] = 1809 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1810 .features[FEAT_8000_0001_ECX] = 1811 CPUID_EXT3_LAHF_LM, 1812 .xlevel = 0x80000008, 1813 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 1814 }, 1815 { 1816 .name = "Penryn", 1817 .level = 10, 1818 .vendor = CPUID_VENDOR_INTEL, 1819 .family = 6, 1820 .model = 23, 1821 .stepping = 3, 1822 .features[FEAT_1_EDX] = 1823 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1824 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1825 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1826 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1827 CPUID_DE | CPUID_FP87, 1828 .features[FEAT_1_ECX] = 1829 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1830 CPUID_EXT_SSE3, 1831 .features[FEAT_8000_0001_EDX] = 1832 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 1833 .features[FEAT_8000_0001_ECX] = 1834 CPUID_EXT3_LAHF_LM, 1835 .xlevel = 0x80000008, 1836 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 1837 }, 1838 { 1839 .name = "Nehalem", 1840 .level = 11, 1841 .vendor = CPUID_VENDOR_INTEL, 1842 .family = 6, 1843 .model = 26, 1844 .stepping = 3, 1845 .features[FEAT_1_EDX] = 1846 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1847 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1848 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1849 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1850 CPUID_DE | CPUID_FP87, 1851 .features[FEAT_1_ECX] = 1852 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1853 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 1854 .features[FEAT_8000_0001_EDX] = 1855 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1856 .features[FEAT_8000_0001_ECX] = 1857 CPUID_EXT3_LAHF_LM, 1858 .xlevel = 0x80000008, 1859 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 1860 .versions = (X86CPUVersionDefinition[]) { 1861 { .version = 1 }, 1862 { 1863 .version = 2, 1864 .alias = "Nehalem-IBRS", 1865 .props = (PropValue[]) { 1866 { "spec-ctrl", "on" }, 1867 { "model-id", 1868 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, 1869 { /* end of list */ } 1870 } 1871 }, 1872 { /* end of list */ } 1873 } 1874 }, 1875 { 1876 .name = "Westmere", 1877 .level = 11, 1878 .vendor = CPUID_VENDOR_INTEL, 1879 .family = 6, 1880 .model = 44, 1881 .stepping = 1, 1882 .features[FEAT_1_EDX] = 1883 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1884 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1885 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1886 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1887 CPUID_DE | CPUID_FP87, 1888 .features[FEAT_1_ECX] = 1889 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 1890 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 1891 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 1892 .features[FEAT_8000_0001_EDX] = 1893 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1894 .features[FEAT_8000_0001_ECX] = 1895 CPUID_EXT3_LAHF_LM, 1896 .features[FEAT_6_EAX] = 1897 CPUID_6_EAX_ARAT, 1898 .xlevel = 0x80000008, 1899 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 1900 .versions = (X86CPUVersionDefinition[]) { 1901 { .version = 1 }, 1902 { 1903 .version = 2, 1904 .alias = "Westmere-IBRS", 1905 .props = (PropValue[]) { 1906 { "spec-ctrl", "on" }, 1907 { "model-id", 1908 "Westmere E56xx/L56xx/X56xx (IBRS update)" }, 1909 { /* end of list */ } 1910 } 1911 }, 1912 { /* end of list */ } 1913 } 1914 }, 1915 { 1916 .name = "SandyBridge", 1917 .level = 0xd, 1918 .vendor = CPUID_VENDOR_INTEL, 1919 .family = 6, 1920 .model = 42, 1921 .stepping = 1, 1922 .features[FEAT_1_EDX] = 1923 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1924 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1925 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1926 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1927 CPUID_DE | CPUID_FP87, 1928 .features[FEAT_1_ECX] = 1929 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1930 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1931 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1932 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1933 CPUID_EXT_SSE3, 1934 .features[FEAT_8000_0001_EDX] = 1935 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1936 CPUID_EXT2_SYSCALL, 1937 .features[FEAT_8000_0001_ECX] = 1938 CPUID_EXT3_LAHF_LM, 1939 .features[FEAT_XSAVE] = 1940 CPUID_XSAVE_XSAVEOPT, 1941 .features[FEAT_6_EAX] = 1942 CPUID_6_EAX_ARAT, 1943 .xlevel = 0x80000008, 1944 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 1945 .versions = (X86CPUVersionDefinition[]) { 1946 { .version = 1 }, 1947 { 1948 .version = 2, 1949 .alias = "SandyBridge-IBRS", 1950 .props = (PropValue[]) { 1951 { "spec-ctrl", "on" }, 1952 { "model-id", 1953 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, 1954 { /* end of list */ } 1955 } 1956 }, 1957 { /* end of list */ } 1958 } 1959 }, 1960 { 1961 .name = "IvyBridge", 1962 .level = 0xd, 1963 .vendor = CPUID_VENDOR_INTEL, 1964 .family = 6, 1965 .model = 58, 1966 .stepping = 9, 1967 .features[FEAT_1_EDX] = 1968 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 1969 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 1970 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 1971 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 1972 CPUID_DE | CPUID_FP87, 1973 .features[FEAT_1_ECX] = 1974 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 1975 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 1976 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 1977 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 1978 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 1979 .features[FEAT_7_0_EBX] = 1980 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 1981 CPUID_7_0_EBX_ERMS, 1982 .features[FEAT_8000_0001_EDX] = 1983 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 1984 CPUID_EXT2_SYSCALL, 1985 .features[FEAT_8000_0001_ECX] = 1986 CPUID_EXT3_LAHF_LM, 1987 .features[FEAT_XSAVE] = 1988 CPUID_XSAVE_XSAVEOPT, 1989 .features[FEAT_6_EAX] = 1990 CPUID_6_EAX_ARAT, 1991 .xlevel = 0x80000008, 1992 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 1993 .versions = (X86CPUVersionDefinition[]) { 1994 { .version = 1 }, 1995 { 1996 .version = 2, 1997 .alias = "IvyBridge-IBRS", 1998 .props = (PropValue[]) { 1999 { "spec-ctrl", "on" }, 2000 { "model-id", 2001 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, 2002 { /* end of list */ } 2003 } 2004 }, 2005 { /* end of list */ } 2006 } 2007 }, 2008 { 2009 .name = "Haswell", 2010 .level = 0xd, 2011 .vendor = CPUID_VENDOR_INTEL, 2012 .family = 6, 2013 .model = 60, 2014 .stepping = 4, 2015 .features[FEAT_1_EDX] = 2016 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2017 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2018 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2019 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2020 CPUID_DE | CPUID_FP87, 2021 .features[FEAT_1_ECX] = 2022 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2023 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2024 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2025 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2026 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2027 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2028 .features[FEAT_8000_0001_EDX] = 2029 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2030 CPUID_EXT2_SYSCALL, 2031 .features[FEAT_8000_0001_ECX] = 2032 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2033 .features[FEAT_7_0_EBX] = 2034 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2035 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2036 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2037 CPUID_7_0_EBX_RTM, 2038 .features[FEAT_XSAVE] = 2039 CPUID_XSAVE_XSAVEOPT, 2040 .features[FEAT_6_EAX] = 2041 CPUID_6_EAX_ARAT, 2042 .xlevel = 0x80000008, 2043 .model_id = "Intel Core Processor (Haswell)", 2044 .versions = (X86CPUVersionDefinition[]) { 2045 { .version = 1 }, 2046 { 2047 .version = 2, 2048 .alias = "Haswell-noTSX", 2049 .props = (PropValue[]) { 2050 { "hle", "off" }, 2051 { "rtm", "off" }, 2052 { "stepping", "1" }, 2053 { "model-id", "Intel Core Processor (Haswell, no TSX)", }, 2054 { /* end of list */ } 2055 }, 2056 }, 2057 { 2058 .version = 3, 2059 .alias = "Haswell-IBRS", 2060 .props = (PropValue[]) { 2061 /* Restore TSX features removed by -v2 above */ 2062 { "hle", "on" }, 2063 { "rtm", "on" }, 2064 /* 2065 * Haswell and Haswell-IBRS had stepping=4 in 2066 * QEMU 4.0 and older 2067 */ 2068 { "stepping", "4" }, 2069 { "spec-ctrl", "on" }, 2070 { "model-id", 2071 "Intel Core Processor (Haswell, IBRS)" }, 2072 { /* end of list */ } 2073 } 2074 }, 2075 { 2076 .version = 4, 2077 .alias = "Haswell-noTSX-IBRS", 2078 .props = (PropValue[]) { 2079 { "hle", "off" }, 2080 { "rtm", "off" }, 2081 /* spec-ctrl was already enabled by -v3 above */ 2082 { "stepping", "1" }, 2083 { "model-id", 2084 "Intel Core Processor (Haswell, no TSX, IBRS)" }, 2085 { /* end of list */ } 2086 } 2087 }, 2088 { /* end of list */ } 2089 } 2090 }, 2091 { 2092 .name = "Broadwell", 2093 .level = 0xd, 2094 .vendor = CPUID_VENDOR_INTEL, 2095 .family = 6, 2096 .model = 61, 2097 .stepping = 2, 2098 .features[FEAT_1_EDX] = 2099 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2100 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2101 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2102 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2103 CPUID_DE | CPUID_FP87, 2104 .features[FEAT_1_ECX] = 2105 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2106 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2107 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2108 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2109 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2110 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2111 .features[FEAT_8000_0001_EDX] = 2112 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2113 CPUID_EXT2_SYSCALL, 2114 .features[FEAT_8000_0001_ECX] = 2115 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2116 .features[FEAT_7_0_EBX] = 2117 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2118 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2119 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2120 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2121 CPUID_7_0_EBX_SMAP, 2122 .features[FEAT_XSAVE] = 2123 CPUID_XSAVE_XSAVEOPT, 2124 .features[FEAT_6_EAX] = 2125 CPUID_6_EAX_ARAT, 2126 .xlevel = 0x80000008, 2127 .model_id = "Intel Core Processor (Broadwell)", 2128 .versions = (X86CPUVersionDefinition[]) { 2129 { .version = 1 }, 2130 { 2131 .version = 2, 2132 .alias = "Broadwell-noTSX", 2133 .props = (PropValue[]) { 2134 { "hle", "off" }, 2135 { "rtm", "off" }, 2136 { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, 2137 { /* end of list */ } 2138 }, 2139 }, 2140 { 2141 .version = 3, 2142 .alias = "Broadwell-IBRS", 2143 .props = (PropValue[]) { 2144 /* Restore TSX features removed by -v2 above */ 2145 { "hle", "on" }, 2146 { "rtm", "on" }, 2147 { "spec-ctrl", "on" }, 2148 { "model-id", 2149 "Intel Core Processor (Broadwell, IBRS)" }, 2150 { /* end of list */ } 2151 } 2152 }, 2153 { 2154 .version = 4, 2155 .alias = "Broadwell-noTSX-IBRS", 2156 .props = (PropValue[]) { 2157 { "hle", "off" }, 2158 { "rtm", "off" }, 2159 /* spec-ctrl was already enabled by -v3 above */ 2160 { "model-id", 2161 "Intel Core Processor (Broadwell, no TSX, IBRS)" }, 2162 { /* end of list */ } 2163 } 2164 }, 2165 { /* end of list */ } 2166 } 2167 }, 2168 { 2169 .name = "Skylake-Client", 2170 .level = 0xd, 2171 .vendor = CPUID_VENDOR_INTEL, 2172 .family = 6, 2173 .model = 94, 2174 .stepping = 3, 2175 .features[FEAT_1_EDX] = 2176 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2180 CPUID_DE | CPUID_FP87, 2181 .features[FEAT_1_ECX] = 2182 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2183 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2184 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2185 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2186 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2187 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2188 .features[FEAT_8000_0001_EDX] = 2189 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2190 CPUID_EXT2_SYSCALL, 2191 .features[FEAT_8000_0001_ECX] = 2192 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2193 .features[FEAT_7_0_EBX] = 2194 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2195 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2196 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2197 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2198 CPUID_7_0_EBX_SMAP, 2199 /* Missing: XSAVES (not supported by some Linux versions, 2200 * including v4.1 to v4.12). 2201 * KVM doesn't yet expose any XSAVES state save component, 2202 * and the only one defined in Skylake (processor tracing) 2203 * probably will block migration anyway. 2204 */ 2205 .features[FEAT_XSAVE] = 2206 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2207 CPUID_XSAVE_XGETBV1, 2208 .features[FEAT_6_EAX] = 2209 CPUID_6_EAX_ARAT, 2210 .xlevel = 0x80000008, 2211 .model_id = "Intel Core Processor (Skylake)", 2212 .versions = (X86CPUVersionDefinition[]) { 2213 { .version = 1 }, 2214 { 2215 .version = 2, 2216 .alias = "Skylake-Client-IBRS", 2217 .props = (PropValue[]) { 2218 { "spec-ctrl", "on" }, 2219 { "model-id", 2220 "Intel Core Processor (Skylake, IBRS)" }, 2221 { /* end of list */ } 2222 } 2223 }, 2224 { /* end of list */ } 2225 } 2226 }, 2227 { 2228 .name = "Skylake-Server", 2229 .level = 0xd, 2230 .vendor = CPUID_VENDOR_INTEL, 2231 .family = 6, 2232 .model = 85, 2233 .stepping = 4, 2234 .features[FEAT_1_EDX] = 2235 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2236 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2237 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2238 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2239 CPUID_DE | CPUID_FP87, 2240 .features[FEAT_1_ECX] = 2241 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2242 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2243 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2244 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2245 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2246 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2247 .features[FEAT_8000_0001_EDX] = 2248 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2249 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2250 .features[FEAT_8000_0001_ECX] = 2251 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2252 .features[FEAT_7_0_EBX] = 2253 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2254 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2255 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2256 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2257 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2258 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2259 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2260 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2261 .features[FEAT_7_0_ECX] = 2262 CPUID_7_0_ECX_PKU, 2263 /* Missing: XSAVES (not supported by some Linux versions, 2264 * including v4.1 to v4.12). 2265 * KVM doesn't yet expose any XSAVES state save component, 2266 * and the only one defined in Skylake (processor tracing) 2267 * probably will block migration anyway. 2268 */ 2269 .features[FEAT_XSAVE] = 2270 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2271 CPUID_XSAVE_XGETBV1, 2272 .features[FEAT_6_EAX] = 2273 CPUID_6_EAX_ARAT, 2274 .xlevel = 0x80000008, 2275 .model_id = "Intel Xeon Processor (Skylake)", 2276 .versions = (X86CPUVersionDefinition[]) { 2277 { .version = 1 }, 2278 { 2279 .version = 2, 2280 .alias = "Skylake-Server-IBRS", 2281 .props = (PropValue[]) { 2282 /* clflushopt was not added to Skylake-Server-IBRS */ 2283 /* TODO: add -v3 including clflushopt */ 2284 { "clflushopt", "off" }, 2285 { "spec-ctrl", "on" }, 2286 { "model-id", 2287 "Intel Xeon Processor (Skylake, IBRS)" }, 2288 { /* end of list */ } 2289 } 2290 }, 2291 { /* end of list */ } 2292 } 2293 }, 2294 { 2295 .name = "Cascadelake-Server", 2296 .level = 0xd, 2297 .vendor = CPUID_VENDOR_INTEL, 2298 .family = 6, 2299 .model = 85, 2300 .stepping = 6, 2301 .features[FEAT_1_EDX] = 2302 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2303 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2304 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2305 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2306 CPUID_DE | CPUID_FP87, 2307 .features[FEAT_1_ECX] = 2308 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2309 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2310 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2311 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2312 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2313 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2314 .features[FEAT_8000_0001_EDX] = 2315 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2316 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2317 .features[FEAT_8000_0001_ECX] = 2318 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2319 .features[FEAT_7_0_EBX] = 2320 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2321 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2322 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2323 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2324 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2325 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2326 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2327 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2328 .features[FEAT_7_0_ECX] = 2329 CPUID_7_0_ECX_PKU | 2330 CPUID_7_0_ECX_AVX512VNNI, 2331 .features[FEAT_7_0_EDX] = 2332 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2333 /* Missing: XSAVES (not supported by some Linux versions, 2334 * including v4.1 to v4.12). 2335 * KVM doesn't yet expose any XSAVES state save component, 2336 * and the only one defined in Skylake (processor tracing) 2337 * probably will block migration anyway. 2338 */ 2339 .features[FEAT_XSAVE] = 2340 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2341 CPUID_XSAVE_XGETBV1, 2342 .features[FEAT_6_EAX] = 2343 CPUID_6_EAX_ARAT, 2344 .xlevel = 0x80000008, 2345 .model_id = "Intel Xeon Processor (Cascadelake)", 2346 .versions = (X86CPUVersionDefinition[]) { 2347 { .version = 1 }, 2348 { .version = 2, 2349 .props = (PropValue[]) { 2350 { "arch-capabilities", "on" }, 2351 { "rdctl-no", "on" }, 2352 { "ibrs-all", "on" }, 2353 { "skip-l1dfl-vmentry", "on" }, 2354 { "mds-no", "on" }, 2355 { /* end of list */ } 2356 }, 2357 }, 2358 { /* end of list */ } 2359 } 2360 }, 2361 { 2362 .name = "Icelake-Client", 2363 .level = 0xd, 2364 .vendor = CPUID_VENDOR_INTEL, 2365 .family = 6, 2366 .model = 126, 2367 .stepping = 0, 2368 .features[FEAT_1_EDX] = 2369 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2370 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2371 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2372 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2373 CPUID_DE | CPUID_FP87, 2374 .features[FEAT_1_ECX] = 2375 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2376 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2377 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2378 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2379 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2380 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2381 .features[FEAT_8000_0001_EDX] = 2382 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2383 CPUID_EXT2_SYSCALL, 2384 .features[FEAT_8000_0001_ECX] = 2385 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2386 .features[FEAT_8000_0008_EBX] = 2387 CPUID_8000_0008_EBX_WBNOINVD, 2388 .features[FEAT_7_0_EBX] = 2389 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2390 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2391 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2392 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2393 CPUID_7_0_EBX_SMAP, 2394 .features[FEAT_7_0_ECX] = 2395 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 2396 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI | 2397 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 2398 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 2399 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 2400 .features[FEAT_7_0_EDX] = 2401 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2402 /* Missing: XSAVES (not supported by some Linux versions, 2403 * including v4.1 to v4.12). 2404 * KVM doesn't yet expose any XSAVES state save component, 2405 * and the only one defined in Skylake (processor tracing) 2406 * probably will block migration anyway. 2407 */ 2408 .features[FEAT_XSAVE] = 2409 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2410 CPUID_XSAVE_XGETBV1, 2411 .features[FEAT_6_EAX] = 2412 CPUID_6_EAX_ARAT, 2413 .xlevel = 0x80000008, 2414 .model_id = "Intel Core Processor (Icelake)", 2415 }, 2416 { 2417 .name = "Icelake-Server", 2418 .level = 0xd, 2419 .vendor = CPUID_VENDOR_INTEL, 2420 .family = 6, 2421 .model = 134, 2422 .stepping = 0, 2423 .features[FEAT_1_EDX] = 2424 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2425 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2426 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2427 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2428 CPUID_DE | CPUID_FP87, 2429 .features[FEAT_1_ECX] = 2430 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2431 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2432 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2433 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2434 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2435 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2436 .features[FEAT_8000_0001_EDX] = 2437 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2438 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2439 .features[FEAT_8000_0001_ECX] = 2440 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2441 .features[FEAT_8000_0008_EBX] = 2442 CPUID_8000_0008_EBX_WBNOINVD, 2443 .features[FEAT_7_0_EBX] = 2444 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2445 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2446 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2447 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2448 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2449 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2450 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2451 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2452 .features[FEAT_7_0_ECX] = 2453 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 2454 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI | 2455 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 2456 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 2457 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 2458 .features[FEAT_7_0_EDX] = 2459 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 2460 /* Missing: XSAVES (not supported by some Linux versions, 2461 * including v4.1 to v4.12). 2462 * KVM doesn't yet expose any XSAVES state save component, 2463 * and the only one defined in Skylake (processor tracing) 2464 * probably will block migration anyway. 2465 */ 2466 .features[FEAT_XSAVE] = 2467 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2468 CPUID_XSAVE_XGETBV1, 2469 .features[FEAT_6_EAX] = 2470 CPUID_6_EAX_ARAT, 2471 .xlevel = 0x80000008, 2472 .model_id = "Intel Xeon Processor (Icelake)", 2473 }, 2474 { 2475 .name = "SnowRidge-Server", 2476 .level = 27, 2477 .vendor = CPUID_VENDOR_INTEL, 2478 .family = 6, 2479 .model = 134, 2480 .stepping = 1, 2481 .features[FEAT_1_EDX] = 2482 /* missing: CPUID_PN CPUID_IA64 */ 2483 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2484 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | 2485 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | 2486 CPUID_CX8 | CPUID_APIC | CPUID_SEP | 2487 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 2488 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | 2489 CPUID_MMX | 2490 CPUID_FXSR | CPUID_SSE | CPUID_SSE2, 2491 .features[FEAT_1_ECX] = 2492 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 2493 CPUID_EXT_VMX | 2494 CPUID_EXT_SSSE3 | 2495 CPUID_EXT_CX16 | 2496 CPUID_EXT_SSE41 | 2497 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 2498 CPUID_EXT_POPCNT | 2499 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | 2500 CPUID_EXT_RDRAND, 2501 .features[FEAT_8000_0001_EDX] = 2502 CPUID_EXT2_SYSCALL | 2503 CPUID_EXT2_NX | 2504 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2505 CPUID_EXT2_LM, 2506 .features[FEAT_8000_0001_ECX] = 2507 CPUID_EXT3_LAHF_LM | 2508 CPUID_EXT3_3DNOWPREFETCH, 2509 .features[FEAT_7_0_EBX] = 2510 CPUID_7_0_EBX_FSGSBASE | 2511 CPUID_7_0_EBX_SMEP | 2512 CPUID_7_0_EBX_ERMS | 2513 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ 2514 CPUID_7_0_EBX_RDSEED | 2515 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2516 CPUID_7_0_EBX_CLWB | 2517 CPUID_7_0_EBX_SHA_NI, 2518 .features[FEAT_7_0_ECX] = 2519 CPUID_7_0_ECX_UMIP | 2520 /* missing bit 5 */ 2521 CPUID_7_0_ECX_GFNI | 2522 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | 2523 CPUID_7_0_ECX_MOVDIR64B, 2524 .features[FEAT_7_0_EDX] = 2525 CPUID_7_0_EDX_SPEC_CTRL | 2526 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | 2527 CPUID_7_0_EDX_CORE_CAPABILITY, 2528 .features[FEAT_CORE_CAPABILITY] = 2529 MSR_CORE_CAP_SPLIT_LOCK_DETECT, 2530 /* 2531 * Missing: XSAVES (not supported by some Linux versions, 2532 * including v4.1 to v4.12). 2533 * KVM doesn't yet expose any XSAVES state save component, 2534 * and the only one defined in Skylake (processor tracing) 2535 * probably will block migration anyway. 2536 */ 2537 .features[FEAT_XSAVE] = 2538 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2539 CPUID_XSAVE_XGETBV1, 2540 .features[FEAT_6_EAX] = 2541 CPUID_6_EAX_ARAT, 2542 .xlevel = 0x80000008, 2543 .model_id = "Intel Atom Processor (SnowRidge)", 2544 }, 2545 { 2546 .name = "KnightsMill", 2547 .level = 0xd, 2548 .vendor = CPUID_VENDOR_INTEL, 2549 .family = 6, 2550 .model = 133, 2551 .stepping = 0, 2552 .features[FEAT_1_EDX] = 2553 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 2554 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 2555 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 2556 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 2557 CPUID_PSE | CPUID_DE | CPUID_FP87, 2558 .features[FEAT_1_ECX] = 2559 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2560 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2561 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2562 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2563 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2564 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2565 .features[FEAT_8000_0001_EDX] = 2566 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2567 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2568 .features[FEAT_8000_0001_ECX] = 2569 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2570 .features[FEAT_7_0_EBX] = 2571 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2572 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 2573 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 2574 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 2575 CPUID_7_0_EBX_AVX512ER, 2576 .features[FEAT_7_0_ECX] = 2577 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 2578 .features[FEAT_7_0_EDX] = 2579 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 2580 .features[FEAT_XSAVE] = 2581 CPUID_XSAVE_XSAVEOPT, 2582 .features[FEAT_6_EAX] = 2583 CPUID_6_EAX_ARAT, 2584 .xlevel = 0x80000008, 2585 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 2586 }, 2587 { 2588 .name = "Opteron_G1", 2589 .level = 5, 2590 .vendor = CPUID_VENDOR_AMD, 2591 .family = 15, 2592 .model = 6, 2593 .stepping = 1, 2594 .features[FEAT_1_EDX] = 2595 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2596 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2597 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2598 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2599 CPUID_DE | CPUID_FP87, 2600 .features[FEAT_1_ECX] = 2601 CPUID_EXT_SSE3, 2602 .features[FEAT_8000_0001_EDX] = 2603 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2604 .xlevel = 0x80000008, 2605 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 2606 }, 2607 { 2608 .name = "Opteron_G2", 2609 .level = 5, 2610 .vendor = CPUID_VENDOR_AMD, 2611 .family = 15, 2612 .model = 6, 2613 .stepping = 1, 2614 .features[FEAT_1_EDX] = 2615 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2616 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2617 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2618 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2619 CPUID_DE | CPUID_FP87, 2620 .features[FEAT_1_ECX] = 2621 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 2622 .features[FEAT_8000_0001_EDX] = 2623 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2624 .features[FEAT_8000_0001_ECX] = 2625 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2626 .xlevel = 0x80000008, 2627 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 2628 }, 2629 { 2630 .name = "Opteron_G3", 2631 .level = 5, 2632 .vendor = CPUID_VENDOR_AMD, 2633 .family = 16, 2634 .model = 2, 2635 .stepping = 3, 2636 .features[FEAT_1_EDX] = 2637 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2638 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2639 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2640 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2641 CPUID_DE | CPUID_FP87, 2642 .features[FEAT_1_ECX] = 2643 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 2644 CPUID_EXT_SSE3, 2645 .features[FEAT_8000_0001_EDX] = 2646 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 2647 CPUID_EXT2_RDTSCP, 2648 .features[FEAT_8000_0001_ECX] = 2649 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 2650 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 2651 .xlevel = 0x80000008, 2652 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 2653 }, 2654 { 2655 .name = "Opteron_G4", 2656 .level = 0xd, 2657 .vendor = CPUID_VENDOR_AMD, 2658 .family = 21, 2659 .model = 1, 2660 .stepping = 2, 2661 .features[FEAT_1_EDX] = 2662 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2663 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2664 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2665 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2666 CPUID_DE | CPUID_FP87, 2667 .features[FEAT_1_ECX] = 2668 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2669 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2670 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2671 CPUID_EXT_SSE3, 2672 .features[FEAT_8000_0001_EDX] = 2673 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2674 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 2675 .features[FEAT_8000_0001_ECX] = 2676 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2677 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2678 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2679 CPUID_EXT3_LAHF_LM, 2680 .features[FEAT_SVM] = 2681 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2682 /* no xsaveopt! */ 2683 .xlevel = 0x8000001A, 2684 .model_id = "AMD Opteron 62xx class CPU", 2685 }, 2686 { 2687 .name = "Opteron_G5", 2688 .level = 0xd, 2689 .vendor = CPUID_VENDOR_AMD, 2690 .family = 21, 2691 .model = 2, 2692 .stepping = 0, 2693 .features[FEAT_1_EDX] = 2694 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2695 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2696 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2697 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2698 CPUID_DE | CPUID_FP87, 2699 .features[FEAT_1_ECX] = 2700 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 2701 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2702 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 2703 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2704 .features[FEAT_8000_0001_EDX] = 2705 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 2706 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 2707 .features[FEAT_8000_0001_ECX] = 2708 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 2709 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 2710 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 2711 CPUID_EXT3_LAHF_LM, 2712 .features[FEAT_SVM] = 2713 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2714 /* no xsaveopt! */ 2715 .xlevel = 0x8000001A, 2716 .model_id = "AMD Opteron 63xx class CPU", 2717 }, 2718 { 2719 .name = "EPYC", 2720 .level = 0xd, 2721 .vendor = CPUID_VENDOR_AMD, 2722 .family = 23, 2723 .model = 1, 2724 .stepping = 2, 2725 .features[FEAT_1_EDX] = 2726 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2727 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2728 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2729 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2730 CPUID_VME | CPUID_FP87, 2731 .features[FEAT_1_ECX] = 2732 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2733 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 2734 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2735 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2736 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2737 .features[FEAT_8000_0001_EDX] = 2738 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2739 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2740 CPUID_EXT2_SYSCALL, 2741 .features[FEAT_8000_0001_ECX] = 2742 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2743 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2744 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2745 CPUID_EXT3_TOPOEXT, 2746 .features[FEAT_7_0_EBX] = 2747 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2748 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2749 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 2750 CPUID_7_0_EBX_SHA_NI, 2751 /* Missing: XSAVES (not supported by some Linux versions, 2752 * including v4.1 to v4.12). 2753 * KVM doesn't yet expose any XSAVES state save component. 2754 */ 2755 .features[FEAT_XSAVE] = 2756 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2757 CPUID_XSAVE_XGETBV1, 2758 .features[FEAT_6_EAX] = 2759 CPUID_6_EAX_ARAT, 2760 .features[FEAT_SVM] = 2761 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2762 .xlevel = 0x8000001E, 2763 .model_id = "AMD EPYC Processor", 2764 .cache_info = &epyc_cache_info, 2765 .versions = (X86CPUVersionDefinition[]) { 2766 { .version = 1 }, 2767 { 2768 .version = 2, 2769 .alias = "EPYC-IBPB", 2770 .props = (PropValue[]) { 2771 { "ibpb", "on" }, 2772 { "model-id", 2773 "AMD EPYC Processor (with IBPB)" }, 2774 { /* end of list */ } 2775 } 2776 }, 2777 { /* end of list */ } 2778 } 2779 }, 2780 { 2781 .name = "Dhyana", 2782 .level = 0xd, 2783 .vendor = CPUID_VENDOR_HYGON, 2784 .family = 24, 2785 .model = 0, 2786 .stepping = 1, 2787 .features[FEAT_1_EDX] = 2788 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 2789 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 2790 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 2791 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 2792 CPUID_VME | CPUID_FP87, 2793 .features[FEAT_1_ECX] = 2794 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 2795 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 2796 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2797 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 2798 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 2799 .features[FEAT_8000_0001_EDX] = 2800 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 2801 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 2802 CPUID_EXT2_SYSCALL, 2803 .features[FEAT_8000_0001_ECX] = 2804 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 2805 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 2806 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 2807 CPUID_EXT3_TOPOEXT, 2808 .features[FEAT_8000_0008_EBX] = 2809 CPUID_8000_0008_EBX_IBPB, 2810 .features[FEAT_7_0_EBX] = 2811 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 2812 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 2813 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 2814 /* 2815 * Missing: XSAVES (not supported by some Linux versions, 2816 * including v4.1 to v4.12). 2817 * KVM doesn't yet expose any XSAVES state save component. 2818 */ 2819 .features[FEAT_XSAVE] = 2820 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2821 CPUID_XSAVE_XGETBV1, 2822 .features[FEAT_6_EAX] = 2823 CPUID_6_EAX_ARAT, 2824 .features[FEAT_SVM] = 2825 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 2826 .xlevel = 0x8000001E, 2827 .model_id = "Hygon Dhyana Processor", 2828 .cache_info = &epyc_cache_info, 2829 }, 2830 }; 2831 2832 /* KVM-specific features that are automatically added/removed 2833 * from all CPU models when KVM is enabled. 2834 */ 2835 static PropValue kvm_default_props[] = { 2836 { "kvmclock", "on" }, 2837 { "kvm-nopiodelay", "on" }, 2838 { "kvm-asyncpf", "on" }, 2839 { "kvm-steal-time", "on" }, 2840 { "kvm-pv-eoi", "on" }, 2841 { "kvmclock-stable-bit", "on" }, 2842 { "x2apic", "on" }, 2843 { "acpi", "off" }, 2844 { "monitor", "off" }, 2845 { "svm", "off" }, 2846 { NULL, NULL }, 2847 }; 2848 2849 /* TCG-specific defaults that override all CPU models when using TCG 2850 */ 2851 static PropValue tcg_default_props[] = { 2852 { "vme", "off" }, 2853 { NULL, NULL }, 2854 }; 2855 2856 2857 X86CPUVersion default_cpu_version = CPU_VERSION_LATEST; 2858 2859 void x86_cpu_set_default_version(X86CPUVersion version) 2860 { 2861 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ 2862 assert(version != CPU_VERSION_AUTO); 2863 default_cpu_version = version; 2864 } 2865 2866 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) 2867 { 2868 int v = 0; 2869 const X86CPUVersionDefinition *vdef = 2870 x86_cpu_def_get_versions(model->cpudef); 2871 while (vdef->version) { 2872 v = vdef->version; 2873 vdef++; 2874 } 2875 return v; 2876 } 2877 2878 /* Return the actual version being used for a specific CPU model */ 2879 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) 2880 { 2881 X86CPUVersion v = model->version; 2882 if (v == CPU_VERSION_AUTO) { 2883 v = default_cpu_version; 2884 } 2885 if (v == CPU_VERSION_LATEST) { 2886 return x86_cpu_model_last_version(model); 2887 } 2888 return v; 2889 } 2890 2891 void x86_cpu_change_kvm_default(const char *prop, const char *value) 2892 { 2893 PropValue *pv; 2894 for (pv = kvm_default_props; pv->prop; pv++) { 2895 if (!strcmp(pv->prop, prop)) { 2896 pv->value = value; 2897 break; 2898 } 2899 } 2900 2901 /* It is valid to call this function only for properties that 2902 * are already present in the kvm_default_props table. 2903 */ 2904 assert(pv->prop); 2905 } 2906 2907 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 2908 bool migratable_only); 2909 2910 static bool lmce_supported(void) 2911 { 2912 uint64_t mce_cap = 0; 2913 2914 #ifdef CONFIG_KVM 2915 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 2916 return false; 2917 } 2918 #endif 2919 2920 return !!(mce_cap & MCG_LMCE_P); 2921 } 2922 2923 #define CPUID_MODEL_ID_SZ 48 2924 2925 /** 2926 * cpu_x86_fill_model_id: 2927 * Get CPUID model ID string from host CPU. 2928 * 2929 * @str should have at least CPUID_MODEL_ID_SZ bytes 2930 * 2931 * The function does NOT add a null terminator to the string 2932 * automatically. 2933 */ 2934 static int cpu_x86_fill_model_id(char *str) 2935 { 2936 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 2937 int i; 2938 2939 for (i = 0; i < 3; i++) { 2940 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 2941 memcpy(str + i * 16 + 0, &eax, 4); 2942 memcpy(str + i * 16 + 4, &ebx, 4); 2943 memcpy(str + i * 16 + 8, &ecx, 4); 2944 memcpy(str + i * 16 + 12, &edx, 4); 2945 } 2946 return 0; 2947 } 2948 2949 static Property max_x86_cpu_properties[] = { 2950 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 2951 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 2952 DEFINE_PROP_END_OF_LIST() 2953 }; 2954 2955 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 2956 { 2957 DeviceClass *dc = DEVICE_CLASS(oc); 2958 X86CPUClass *xcc = X86_CPU_CLASS(oc); 2959 2960 xcc->ordering = 9; 2961 2962 xcc->model_description = 2963 "Enables all features supported by the accelerator in the current host"; 2964 2965 dc->props = max_x86_cpu_properties; 2966 } 2967 2968 static void max_x86_cpu_initfn(Object *obj) 2969 { 2970 X86CPU *cpu = X86_CPU(obj); 2971 CPUX86State *env = &cpu->env; 2972 KVMState *s = kvm_state; 2973 2974 /* We can't fill the features array here because we don't know yet if 2975 * "migratable" is true or false. 2976 */ 2977 cpu->max_features = true; 2978 2979 if (accel_uses_host_cpuid()) { 2980 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 2981 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 2982 int family, model, stepping; 2983 2984 host_vendor_fms(vendor, &family, &model, &stepping); 2985 cpu_x86_fill_model_id(model_id); 2986 2987 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort); 2988 object_property_set_int(OBJECT(cpu), family, "family", &error_abort); 2989 object_property_set_int(OBJECT(cpu), model, "model", &error_abort); 2990 object_property_set_int(OBJECT(cpu), stepping, "stepping", 2991 &error_abort); 2992 object_property_set_str(OBJECT(cpu), model_id, "model-id", 2993 &error_abort); 2994 2995 if (kvm_enabled()) { 2996 env->cpuid_min_level = 2997 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 2998 env->cpuid_min_xlevel = 2999 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 3000 env->cpuid_min_xlevel2 = 3001 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 3002 } else { 3003 env->cpuid_min_level = 3004 hvf_get_supported_cpuid(0x0, 0, R_EAX); 3005 env->cpuid_min_xlevel = 3006 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 3007 env->cpuid_min_xlevel2 = 3008 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 3009 } 3010 3011 if (lmce_supported()) { 3012 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 3013 } 3014 } else { 3015 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 3016 "vendor", &error_abort); 3017 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 3018 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 3019 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 3020 object_property_set_str(OBJECT(cpu), 3021 "QEMU TCG CPU version " QEMU_HW_VERSION, 3022 "model-id", &error_abort); 3023 } 3024 3025 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 3026 } 3027 3028 static const TypeInfo max_x86_cpu_type_info = { 3029 .name = X86_CPU_TYPE_NAME("max"), 3030 .parent = TYPE_X86_CPU, 3031 .instance_init = max_x86_cpu_initfn, 3032 .class_init = max_x86_cpu_class_init, 3033 }; 3034 3035 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 3036 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 3037 { 3038 X86CPUClass *xcc = X86_CPU_CLASS(oc); 3039 3040 xcc->host_cpuid_required = true; 3041 xcc->ordering = 8; 3042 3043 #if defined(CONFIG_KVM) 3044 xcc->model_description = 3045 "KVM processor with all supported host features "; 3046 #elif defined(CONFIG_HVF) 3047 xcc->model_description = 3048 "HVF processor with all supported host features "; 3049 #endif 3050 } 3051 3052 static const TypeInfo host_x86_cpu_type_info = { 3053 .name = X86_CPU_TYPE_NAME("host"), 3054 .parent = X86_CPU_TYPE_NAME("max"), 3055 .class_init = host_x86_cpu_class_init, 3056 }; 3057 3058 #endif 3059 3060 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 3061 { 3062 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 3063 3064 switch (f->type) { 3065 case CPUID_FEATURE_WORD: 3066 { 3067 const char *reg = get_register_name_32(f->cpuid.reg); 3068 assert(reg); 3069 return g_strdup_printf("CPUID.%02XH:%s", 3070 f->cpuid.eax, reg); 3071 } 3072 case MSR_FEATURE_WORD: 3073 return g_strdup_printf("MSR(%02XH)", 3074 f->msr.index); 3075 } 3076 3077 return NULL; 3078 } 3079 3080 static void report_unavailable_features(FeatureWord w, uint32_t mask) 3081 { 3082 FeatureWordInfo *f = &feature_word_info[w]; 3083 int i; 3084 char *feat_word_str; 3085 3086 for (i = 0; i < 32; ++i) { 3087 if ((1UL << i) & mask) { 3088 feat_word_str = feature_word_description(f, i); 3089 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]", 3090 accel_uses_host_cpuid() ? "host" : "TCG", 3091 feat_word_str, 3092 f->feat_names[i] ? "." : "", 3093 f->feat_names[i] ? f->feat_names[i] : "", i); 3094 g_free(feat_word_str); 3095 } 3096 } 3097 } 3098 3099 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 3100 const char *name, void *opaque, 3101 Error **errp) 3102 { 3103 X86CPU *cpu = X86_CPU(obj); 3104 CPUX86State *env = &cpu->env; 3105 int64_t value; 3106 3107 value = (env->cpuid_version >> 8) & 0xf; 3108 if (value == 0xf) { 3109 value += (env->cpuid_version >> 20) & 0xff; 3110 } 3111 visit_type_int(v, name, &value, errp); 3112 } 3113 3114 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 3115 const char *name, void *opaque, 3116 Error **errp) 3117 { 3118 X86CPU *cpu = X86_CPU(obj); 3119 CPUX86State *env = &cpu->env; 3120 const int64_t min = 0; 3121 const int64_t max = 0xff + 0xf; 3122 Error *local_err = NULL; 3123 int64_t value; 3124 3125 visit_type_int(v, name, &value, &local_err); 3126 if (local_err) { 3127 error_propagate(errp, local_err); 3128 return; 3129 } 3130 if (value < min || value > max) { 3131 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3132 name ? name : "null", value, min, max); 3133 return; 3134 } 3135 3136 env->cpuid_version &= ~0xff00f00; 3137 if (value > 0x0f) { 3138 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 3139 } else { 3140 env->cpuid_version |= value << 8; 3141 } 3142 } 3143 3144 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 3145 const char *name, void *opaque, 3146 Error **errp) 3147 { 3148 X86CPU *cpu = X86_CPU(obj); 3149 CPUX86State *env = &cpu->env; 3150 int64_t value; 3151 3152 value = (env->cpuid_version >> 4) & 0xf; 3153 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 3154 visit_type_int(v, name, &value, errp); 3155 } 3156 3157 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 3158 const char *name, void *opaque, 3159 Error **errp) 3160 { 3161 X86CPU *cpu = X86_CPU(obj); 3162 CPUX86State *env = &cpu->env; 3163 const int64_t min = 0; 3164 const int64_t max = 0xff; 3165 Error *local_err = NULL; 3166 int64_t value; 3167 3168 visit_type_int(v, name, &value, &local_err); 3169 if (local_err) { 3170 error_propagate(errp, local_err); 3171 return; 3172 } 3173 if (value < min || value > max) { 3174 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3175 name ? name : "null", value, min, max); 3176 return; 3177 } 3178 3179 env->cpuid_version &= ~0xf00f0; 3180 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 3181 } 3182 3183 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 3184 const char *name, void *opaque, 3185 Error **errp) 3186 { 3187 X86CPU *cpu = X86_CPU(obj); 3188 CPUX86State *env = &cpu->env; 3189 int64_t value; 3190 3191 value = env->cpuid_version & 0xf; 3192 visit_type_int(v, name, &value, errp); 3193 } 3194 3195 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 3196 const char *name, void *opaque, 3197 Error **errp) 3198 { 3199 X86CPU *cpu = X86_CPU(obj); 3200 CPUX86State *env = &cpu->env; 3201 const int64_t min = 0; 3202 const int64_t max = 0xf; 3203 Error *local_err = NULL; 3204 int64_t value; 3205 3206 visit_type_int(v, name, &value, &local_err); 3207 if (local_err) { 3208 error_propagate(errp, local_err); 3209 return; 3210 } 3211 if (value < min || value > max) { 3212 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3213 name ? name : "null", value, min, max); 3214 return; 3215 } 3216 3217 env->cpuid_version &= ~0xf; 3218 env->cpuid_version |= value & 0xf; 3219 } 3220 3221 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 3222 { 3223 X86CPU *cpu = X86_CPU(obj); 3224 CPUX86State *env = &cpu->env; 3225 char *value; 3226 3227 value = g_malloc(CPUID_VENDOR_SZ + 1); 3228 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 3229 env->cpuid_vendor3); 3230 return value; 3231 } 3232 3233 static void x86_cpuid_set_vendor(Object *obj, const char *value, 3234 Error **errp) 3235 { 3236 X86CPU *cpu = X86_CPU(obj); 3237 CPUX86State *env = &cpu->env; 3238 int i; 3239 3240 if (strlen(value) != CPUID_VENDOR_SZ) { 3241 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 3242 return; 3243 } 3244 3245 env->cpuid_vendor1 = 0; 3246 env->cpuid_vendor2 = 0; 3247 env->cpuid_vendor3 = 0; 3248 for (i = 0; i < 4; i++) { 3249 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 3250 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 3251 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 3252 } 3253 } 3254 3255 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 3256 { 3257 X86CPU *cpu = X86_CPU(obj); 3258 CPUX86State *env = &cpu->env; 3259 char *value; 3260 int i; 3261 3262 value = g_malloc(48 + 1); 3263 for (i = 0; i < 48; i++) { 3264 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 3265 } 3266 value[48] = '\0'; 3267 return value; 3268 } 3269 3270 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 3271 Error **errp) 3272 { 3273 X86CPU *cpu = X86_CPU(obj); 3274 CPUX86State *env = &cpu->env; 3275 int c, len, i; 3276 3277 if (model_id == NULL) { 3278 model_id = ""; 3279 } 3280 len = strlen(model_id); 3281 memset(env->cpuid_model, 0, 48); 3282 for (i = 0; i < 48; i++) { 3283 if (i >= len) { 3284 c = '\0'; 3285 } else { 3286 c = (uint8_t)model_id[i]; 3287 } 3288 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 3289 } 3290 } 3291 3292 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 3293 void *opaque, Error **errp) 3294 { 3295 X86CPU *cpu = X86_CPU(obj); 3296 int64_t value; 3297 3298 value = cpu->env.tsc_khz * 1000; 3299 visit_type_int(v, name, &value, errp); 3300 } 3301 3302 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 3303 void *opaque, Error **errp) 3304 { 3305 X86CPU *cpu = X86_CPU(obj); 3306 const int64_t min = 0; 3307 const int64_t max = INT64_MAX; 3308 Error *local_err = NULL; 3309 int64_t value; 3310 3311 visit_type_int(v, name, &value, &local_err); 3312 if (local_err) { 3313 error_propagate(errp, local_err); 3314 return; 3315 } 3316 if (value < min || value > max) { 3317 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 3318 name ? name : "null", value, min, max); 3319 return; 3320 } 3321 3322 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 3323 } 3324 3325 /* Generic getter for "feature-words" and "filtered-features" properties */ 3326 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 3327 const char *name, void *opaque, 3328 Error **errp) 3329 { 3330 uint32_t *array = (uint32_t *)opaque; 3331 FeatureWord w; 3332 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 3333 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 3334 X86CPUFeatureWordInfoList *list = NULL; 3335 3336 for (w = 0; w < FEATURE_WORDS; w++) { 3337 FeatureWordInfo *wi = &feature_word_info[w]; 3338 /* 3339 * We didn't have MSR features when "feature-words" was 3340 * introduced. Therefore skipped other type entries. 3341 */ 3342 if (wi->type != CPUID_FEATURE_WORD) { 3343 continue; 3344 } 3345 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 3346 qwi->cpuid_input_eax = wi->cpuid.eax; 3347 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 3348 qwi->cpuid_input_ecx = wi->cpuid.ecx; 3349 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 3350 qwi->features = array[w]; 3351 3352 /* List will be in reverse order, but order shouldn't matter */ 3353 list_entries[w].next = list; 3354 list_entries[w].value = &word_infos[w]; 3355 list = &list_entries[w]; 3356 } 3357 3358 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 3359 } 3360 3361 /* Convert all '_' in a feature string option name to '-', to make feature 3362 * name conform to QOM property naming rule, which uses '-' instead of '_'. 3363 */ 3364 static inline void feat2prop(char *s) 3365 { 3366 while ((s = strchr(s, '_'))) { 3367 *s = '-'; 3368 } 3369 } 3370 3371 /* Return the feature property name for a feature flag bit */ 3372 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 3373 { 3374 /* XSAVE components are automatically enabled by other features, 3375 * so return the original feature name instead 3376 */ 3377 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 3378 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 3379 3380 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 3381 x86_ext_save_areas[comp].bits) { 3382 w = x86_ext_save_areas[comp].feature; 3383 bitnr = ctz32(x86_ext_save_areas[comp].bits); 3384 } 3385 } 3386 3387 assert(bitnr < 32); 3388 assert(w < FEATURE_WORDS); 3389 return feature_word_info[w].feat_names[bitnr]; 3390 } 3391 3392 /* Compatibily hack to maintain legacy +-feat semantic, 3393 * where +-feat overwrites any feature set by 3394 * feat=on|feat even if the later is parsed after +-feat 3395 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 3396 */ 3397 static GList *plus_features, *minus_features; 3398 3399 static gint compare_string(gconstpointer a, gconstpointer b) 3400 { 3401 return g_strcmp0(a, b); 3402 } 3403 3404 /* Parse "+feature,-feature,feature=foo" CPU feature string 3405 */ 3406 static void x86_cpu_parse_featurestr(const char *typename, char *features, 3407 Error **errp) 3408 { 3409 char *featurestr; /* Single 'key=value" string being parsed */ 3410 static bool cpu_globals_initialized; 3411 bool ambiguous = false; 3412 3413 if (cpu_globals_initialized) { 3414 return; 3415 } 3416 cpu_globals_initialized = true; 3417 3418 if (!features) { 3419 return; 3420 } 3421 3422 for (featurestr = strtok(features, ","); 3423 featurestr; 3424 featurestr = strtok(NULL, ",")) { 3425 const char *name; 3426 const char *val = NULL; 3427 char *eq = NULL; 3428 char num[32]; 3429 GlobalProperty *prop; 3430 3431 /* Compatibility syntax: */ 3432 if (featurestr[0] == '+') { 3433 plus_features = g_list_append(plus_features, 3434 g_strdup(featurestr + 1)); 3435 continue; 3436 } else if (featurestr[0] == '-') { 3437 minus_features = g_list_append(minus_features, 3438 g_strdup(featurestr + 1)); 3439 continue; 3440 } 3441 3442 eq = strchr(featurestr, '='); 3443 if (eq) { 3444 *eq++ = 0; 3445 val = eq; 3446 } else { 3447 val = "on"; 3448 } 3449 3450 feat2prop(featurestr); 3451 name = featurestr; 3452 3453 if (g_list_find_custom(plus_features, name, compare_string)) { 3454 warn_report("Ambiguous CPU model string. " 3455 "Don't mix both \"+%s\" and \"%s=%s\"", 3456 name, name, val); 3457 ambiguous = true; 3458 } 3459 if (g_list_find_custom(minus_features, name, compare_string)) { 3460 warn_report("Ambiguous CPU model string. " 3461 "Don't mix both \"-%s\" and \"%s=%s\"", 3462 name, name, val); 3463 ambiguous = true; 3464 } 3465 3466 /* Special case: */ 3467 if (!strcmp(name, "tsc-freq")) { 3468 int ret; 3469 uint64_t tsc_freq; 3470 3471 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 3472 if (ret < 0 || tsc_freq > INT64_MAX) { 3473 error_setg(errp, "bad numerical value %s", val); 3474 return; 3475 } 3476 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 3477 val = num; 3478 name = "tsc-frequency"; 3479 } 3480 3481 prop = g_new0(typeof(*prop), 1); 3482 prop->driver = typename; 3483 prop->property = g_strdup(name); 3484 prop->value = g_strdup(val); 3485 qdev_prop_register_global(prop); 3486 } 3487 3488 if (ambiguous) { 3489 warn_report("Compatibility of ambiguous CPU model " 3490 "strings won't be kept on future QEMU versions"); 3491 } 3492 } 3493 3494 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 3495 static int x86_cpu_filter_features(X86CPU *cpu); 3496 3497 /* Build a list with the name of all features on a feature word array */ 3498 static void x86_cpu_list_feature_names(FeatureWordArray features, 3499 strList **feat_names) 3500 { 3501 FeatureWord w; 3502 strList **next = feat_names; 3503 3504 for (w = 0; w < FEATURE_WORDS; w++) { 3505 uint32_t filtered = features[w]; 3506 int i; 3507 for (i = 0; i < 32; i++) { 3508 if (filtered & (1UL << i)) { 3509 strList *new = g_new0(strList, 1); 3510 new->value = g_strdup(x86_cpu_feature_name(w, i)); 3511 *next = new; 3512 next = &new->next; 3513 } 3514 } 3515 } 3516 } 3517 3518 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 3519 const char *name, void *opaque, 3520 Error **errp) 3521 { 3522 X86CPU *xc = X86_CPU(obj); 3523 strList *result = NULL; 3524 3525 x86_cpu_list_feature_names(xc->filtered_features, &result); 3526 visit_type_strList(v, "unavailable-features", &result, errp); 3527 } 3528 3529 /* Check for missing features that may prevent the CPU class from 3530 * running using the current machine and accelerator. 3531 */ 3532 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 3533 strList **missing_feats) 3534 { 3535 X86CPU *xc; 3536 Error *err = NULL; 3537 strList **next = missing_feats; 3538 3539 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 3540 strList *new = g_new0(strList, 1); 3541 new->value = g_strdup("kvm"); 3542 *missing_feats = new; 3543 return; 3544 } 3545 3546 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 3547 3548 x86_cpu_expand_features(xc, &err); 3549 if (err) { 3550 /* Errors at x86_cpu_expand_features should never happen, 3551 * but in case it does, just report the model as not 3552 * runnable at all using the "type" property. 3553 */ 3554 strList *new = g_new0(strList, 1); 3555 new->value = g_strdup("type"); 3556 *next = new; 3557 next = &new->next; 3558 } 3559 3560 x86_cpu_filter_features(xc); 3561 3562 x86_cpu_list_feature_names(xc->filtered_features, next); 3563 3564 object_unref(OBJECT(xc)); 3565 } 3566 3567 /* Print all cpuid feature names in featureset 3568 */ 3569 static void listflags(GList *features) 3570 { 3571 size_t len = 0; 3572 GList *tmp; 3573 3574 for (tmp = features; tmp; tmp = tmp->next) { 3575 const char *name = tmp->data; 3576 if ((len + strlen(name) + 1) >= 75) { 3577 qemu_printf("\n"); 3578 len = 0; 3579 } 3580 qemu_printf("%s%s", len == 0 ? " " : " ", name); 3581 len += strlen(name) + 1; 3582 } 3583 qemu_printf("\n"); 3584 } 3585 3586 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 3587 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 3588 { 3589 ObjectClass *class_a = (ObjectClass *)a; 3590 ObjectClass *class_b = (ObjectClass *)b; 3591 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 3592 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 3593 char *name_a, *name_b; 3594 int ret; 3595 3596 if (cc_a->ordering != cc_b->ordering) { 3597 ret = cc_a->ordering - cc_b->ordering; 3598 } else { 3599 name_a = x86_cpu_class_get_model_name(cc_a); 3600 name_b = x86_cpu_class_get_model_name(cc_b); 3601 ret = strcmp(name_a, name_b); 3602 g_free(name_a); 3603 g_free(name_b); 3604 } 3605 return ret; 3606 } 3607 3608 static GSList *get_sorted_cpu_model_list(void) 3609 { 3610 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 3611 list = g_slist_sort(list, x86_cpu_list_compare); 3612 return list; 3613 } 3614 3615 static char *x86_cpu_class_get_model_id(X86CPUClass *xc) 3616 { 3617 Object *obj = object_new(object_class_get_name(OBJECT_CLASS(xc))); 3618 char *r = object_property_get_str(obj, "model-id", &error_abort); 3619 object_unref(obj); 3620 return r; 3621 } 3622 3623 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) 3624 { 3625 X86CPUVersion version; 3626 3627 if (!cc->model || !cc->model->is_alias) { 3628 return NULL; 3629 } 3630 version = x86_cpu_model_resolve_version(cc->model); 3631 if (version <= 0) { 3632 return NULL; 3633 } 3634 return x86_cpu_versioned_model_name(cc->model->cpudef, version); 3635 } 3636 3637 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 3638 { 3639 ObjectClass *oc = data; 3640 X86CPUClass *cc = X86_CPU_CLASS(oc); 3641 char *name = x86_cpu_class_get_model_name(cc); 3642 char *desc = g_strdup(cc->model_description); 3643 char *alias_of = x86_cpu_class_get_alias_of(cc); 3644 3645 if (!desc && alias_of) { 3646 if (cc->model && cc->model->version == CPU_VERSION_AUTO) { 3647 desc = g_strdup("(alias configured by machine type)"); 3648 } else { 3649 desc = g_strdup_printf("(alias of %s)", alias_of); 3650 } 3651 } 3652 if (!desc) { 3653 desc = x86_cpu_class_get_model_id(cc); 3654 } 3655 3656 qemu_printf("x86 %-20s %-48s\n", name, desc); 3657 g_free(name); 3658 g_free(desc); 3659 g_free(alias_of); 3660 } 3661 3662 /* list available CPU models and flags */ 3663 void x86_cpu_list(void) 3664 { 3665 int i, j; 3666 GSList *list; 3667 GList *names = NULL; 3668 3669 qemu_printf("Available CPUs:\n"); 3670 list = get_sorted_cpu_model_list(); 3671 g_slist_foreach(list, x86_cpu_list_entry, NULL); 3672 g_slist_free(list); 3673 3674 names = NULL; 3675 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 3676 FeatureWordInfo *fw = &feature_word_info[i]; 3677 for (j = 0; j < 32; j++) { 3678 if (fw->feat_names[j]) { 3679 names = g_list_append(names, (gpointer)fw->feat_names[j]); 3680 } 3681 } 3682 } 3683 3684 names = g_list_sort(names, (GCompareFunc)strcmp); 3685 3686 qemu_printf("\nRecognized CPUID flags:\n"); 3687 listflags(names); 3688 qemu_printf("\n"); 3689 g_list_free(names); 3690 } 3691 3692 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 3693 { 3694 ObjectClass *oc = data; 3695 X86CPUClass *cc = X86_CPU_CLASS(oc); 3696 CpuDefinitionInfoList **cpu_list = user_data; 3697 CpuDefinitionInfoList *entry; 3698 CpuDefinitionInfo *info; 3699 3700 info = g_malloc0(sizeof(*info)); 3701 info->name = x86_cpu_class_get_model_name(cc); 3702 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 3703 info->has_unavailable_features = true; 3704 info->q_typename = g_strdup(object_class_get_name(oc)); 3705 info->migration_safe = cc->migration_safe; 3706 info->has_migration_safe = true; 3707 info->q_static = cc->static_model; 3708 /* 3709 * Old machine types won't report aliases, so that alias translation 3710 * doesn't break compatibility with previous QEMU versions. 3711 */ 3712 if (default_cpu_version != CPU_VERSION_LEGACY) { 3713 info->alias_of = x86_cpu_class_get_alias_of(cc); 3714 info->has_alias_of = !!info->alias_of; 3715 } 3716 3717 entry = g_malloc0(sizeof(*entry)); 3718 entry->value = info; 3719 entry->next = *cpu_list; 3720 *cpu_list = entry; 3721 } 3722 3723 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 3724 { 3725 CpuDefinitionInfoList *cpu_list = NULL; 3726 GSList *list = get_sorted_cpu_model_list(); 3727 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 3728 g_slist_free(list); 3729 return cpu_list; 3730 } 3731 3732 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, 3733 bool migratable_only) 3734 { 3735 FeatureWordInfo *wi = &feature_word_info[w]; 3736 uint32_t r = 0; 3737 3738 if (kvm_enabled()) { 3739 switch (wi->type) { 3740 case CPUID_FEATURE_WORD: 3741 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 3742 wi->cpuid.ecx, 3743 wi->cpuid.reg); 3744 break; 3745 case MSR_FEATURE_WORD: 3746 r = kvm_arch_get_supported_msr_feature(kvm_state, 3747 wi->msr.index); 3748 break; 3749 } 3750 } else if (hvf_enabled()) { 3751 if (wi->type != CPUID_FEATURE_WORD) { 3752 return 0; 3753 } 3754 r = hvf_get_supported_cpuid(wi->cpuid.eax, 3755 wi->cpuid.ecx, 3756 wi->cpuid.reg); 3757 } else if (tcg_enabled()) { 3758 r = wi->tcg_features; 3759 } else { 3760 return ~0; 3761 } 3762 if (migratable_only) { 3763 r &= x86_cpu_get_migratable_flags(w); 3764 } 3765 return r; 3766 } 3767 3768 static void x86_cpu_report_filtered_features(X86CPU *cpu) 3769 { 3770 FeatureWord w; 3771 3772 for (w = 0; w < FEATURE_WORDS; w++) { 3773 report_unavailable_features(w, cpu->filtered_features[w]); 3774 } 3775 } 3776 3777 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 3778 { 3779 PropValue *pv; 3780 for (pv = props; pv->prop; pv++) { 3781 if (!pv->value) { 3782 continue; 3783 } 3784 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 3785 &error_abort); 3786 } 3787 } 3788 3789 /* Apply properties for the CPU model version specified in model */ 3790 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) 3791 { 3792 const X86CPUVersionDefinition *vdef; 3793 X86CPUVersion version = x86_cpu_model_resolve_version(model); 3794 3795 if (version == CPU_VERSION_LEGACY) { 3796 return; 3797 } 3798 3799 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { 3800 PropValue *p; 3801 3802 for (p = vdef->props; p && p->prop; p++) { 3803 object_property_parse(OBJECT(cpu), p->value, p->prop, 3804 &error_abort); 3805 } 3806 3807 if (vdef->version == version) { 3808 break; 3809 } 3810 } 3811 3812 /* 3813 * If we reached the end of the list, version number was invalid 3814 */ 3815 assert(vdef->version == version); 3816 } 3817 3818 /* Load data from X86CPUDefinition into a X86CPU object 3819 */ 3820 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp) 3821 { 3822 X86CPUDefinition *def = model->cpudef; 3823 CPUX86State *env = &cpu->env; 3824 const char *vendor; 3825 char host_vendor[CPUID_VENDOR_SZ + 1]; 3826 FeatureWord w; 3827 3828 /*NOTE: any property set by this function should be returned by 3829 * x86_cpu_static_props(), so static expansion of 3830 * query-cpu-model-expansion is always complete. 3831 */ 3832 3833 /* CPU models only set _minimum_ values for level/xlevel: */ 3834 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp); 3835 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp); 3836 3837 object_property_set_int(OBJECT(cpu), def->family, "family", errp); 3838 object_property_set_int(OBJECT(cpu), def->model, "model", errp); 3839 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp); 3840 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); 3841 for (w = 0; w < FEATURE_WORDS; w++) { 3842 env->features[w] = def->features[w]; 3843 } 3844 3845 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 3846 cpu->legacy_cache = !def->cache_info; 3847 3848 /* Special cases not set in the X86CPUDefinition structs: */ 3849 /* TODO: in-kernel irqchip for hvf */ 3850 if (kvm_enabled()) { 3851 if (!kvm_irqchip_in_kernel()) { 3852 x86_cpu_change_kvm_default("x2apic", "off"); 3853 } 3854 3855 x86_cpu_apply_props(cpu, kvm_default_props); 3856 } else if (tcg_enabled()) { 3857 x86_cpu_apply_props(cpu, tcg_default_props); 3858 } 3859 3860 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 3861 3862 /* sysenter isn't supported in compatibility mode on AMD, 3863 * syscall isn't supported in compatibility mode on Intel. 3864 * Normally we advertise the actual CPU vendor, but you can 3865 * override this using the 'vendor' property if you want to use 3866 * KVM's sysenter/syscall emulation in compatibility mode and 3867 * when doing cross vendor migration 3868 */ 3869 vendor = def->vendor; 3870 if (accel_uses_host_cpuid()) { 3871 uint32_t ebx = 0, ecx = 0, edx = 0; 3872 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 3873 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 3874 vendor = host_vendor; 3875 } 3876 3877 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp); 3878 3879 x86_cpu_apply_version_props(cpu, model); 3880 } 3881 3882 #ifndef CONFIG_USER_ONLY 3883 /* Return a QDict containing keys for all properties that can be included 3884 * in static expansion of CPU models. All properties set by x86_cpu_load_model() 3885 * must be included in the dictionary. 3886 */ 3887 static QDict *x86_cpu_static_props(void) 3888 { 3889 FeatureWord w; 3890 int i; 3891 static const char *props[] = { 3892 "min-level", 3893 "min-xlevel", 3894 "family", 3895 "model", 3896 "stepping", 3897 "model-id", 3898 "vendor", 3899 "lmce", 3900 NULL, 3901 }; 3902 static QDict *d; 3903 3904 if (d) { 3905 return d; 3906 } 3907 3908 d = qdict_new(); 3909 for (i = 0; props[i]; i++) { 3910 qdict_put_null(d, props[i]); 3911 } 3912 3913 for (w = 0; w < FEATURE_WORDS; w++) { 3914 FeatureWordInfo *fi = &feature_word_info[w]; 3915 int bit; 3916 for (bit = 0; bit < 32; bit++) { 3917 if (!fi->feat_names[bit]) { 3918 continue; 3919 } 3920 qdict_put_null(d, fi->feat_names[bit]); 3921 } 3922 } 3923 3924 return d; 3925 } 3926 3927 /* Add an entry to @props dict, with the value for property. */ 3928 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 3929 { 3930 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 3931 &error_abort); 3932 3933 qdict_put_obj(props, prop, value); 3934 } 3935 3936 /* Convert CPU model data from X86CPU object to a property dictionary 3937 * that can recreate exactly the same CPU model. 3938 */ 3939 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 3940 { 3941 QDict *sprops = x86_cpu_static_props(); 3942 const QDictEntry *e; 3943 3944 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 3945 const char *prop = qdict_entry_key(e); 3946 x86_cpu_expand_prop(cpu, props, prop); 3947 } 3948 } 3949 3950 /* Convert CPU model data from X86CPU object to a property dictionary 3951 * that can recreate exactly the same CPU model, including every 3952 * writeable QOM property. 3953 */ 3954 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 3955 { 3956 ObjectPropertyIterator iter; 3957 ObjectProperty *prop; 3958 3959 object_property_iter_init(&iter, OBJECT(cpu)); 3960 while ((prop = object_property_iter_next(&iter))) { 3961 /* skip read-only or write-only properties */ 3962 if (!prop->get || !prop->set) { 3963 continue; 3964 } 3965 3966 /* "hotplugged" is the only property that is configurable 3967 * on the command-line but will be set differently on CPUs 3968 * created using "-cpu ... -smp ..." and by CPUs created 3969 * on the fly by x86_cpu_from_model() for querying. Skip it. 3970 */ 3971 if (!strcmp(prop->name, "hotplugged")) { 3972 continue; 3973 } 3974 x86_cpu_expand_prop(cpu, props, prop->name); 3975 } 3976 } 3977 3978 static void object_apply_props(Object *obj, QDict *props, Error **errp) 3979 { 3980 const QDictEntry *prop; 3981 Error *err = NULL; 3982 3983 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 3984 object_property_set_qobject(obj, qdict_entry_value(prop), 3985 qdict_entry_key(prop), &err); 3986 if (err) { 3987 break; 3988 } 3989 } 3990 3991 error_propagate(errp, err); 3992 } 3993 3994 /* Create X86CPU object according to model+props specification */ 3995 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 3996 { 3997 X86CPU *xc = NULL; 3998 X86CPUClass *xcc; 3999 Error *err = NULL; 4000 4001 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 4002 if (xcc == NULL) { 4003 error_setg(&err, "CPU model '%s' not found", model); 4004 goto out; 4005 } 4006 4007 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 4008 if (props) { 4009 object_apply_props(OBJECT(xc), props, &err); 4010 if (err) { 4011 goto out; 4012 } 4013 } 4014 4015 x86_cpu_expand_features(xc, &err); 4016 if (err) { 4017 goto out; 4018 } 4019 4020 out: 4021 if (err) { 4022 error_propagate(errp, err); 4023 object_unref(OBJECT(xc)); 4024 xc = NULL; 4025 } 4026 return xc; 4027 } 4028 4029 CpuModelExpansionInfo * 4030 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 4031 CpuModelInfo *model, 4032 Error **errp) 4033 { 4034 X86CPU *xc = NULL; 4035 Error *err = NULL; 4036 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 4037 QDict *props = NULL; 4038 const char *base_name; 4039 4040 xc = x86_cpu_from_model(model->name, 4041 model->has_props ? 4042 qobject_to(QDict, model->props) : 4043 NULL, &err); 4044 if (err) { 4045 goto out; 4046 } 4047 4048 props = qdict_new(); 4049 ret->model = g_new0(CpuModelInfo, 1); 4050 ret->model->props = QOBJECT(props); 4051 ret->model->has_props = true; 4052 4053 switch (type) { 4054 case CPU_MODEL_EXPANSION_TYPE_STATIC: 4055 /* Static expansion will be based on "base" only */ 4056 base_name = "base"; 4057 x86_cpu_to_dict(xc, props); 4058 break; 4059 case CPU_MODEL_EXPANSION_TYPE_FULL: 4060 /* As we don't return every single property, full expansion needs 4061 * to keep the original model name+props, and add extra 4062 * properties on top of that. 4063 */ 4064 base_name = model->name; 4065 x86_cpu_to_dict_full(xc, props); 4066 break; 4067 default: 4068 error_setg(&err, "Unsupported expansion type"); 4069 goto out; 4070 } 4071 4072 x86_cpu_to_dict(xc, props); 4073 4074 ret->model->name = g_strdup(base_name); 4075 4076 out: 4077 object_unref(OBJECT(xc)); 4078 if (err) { 4079 error_propagate(errp, err); 4080 qapi_free_CpuModelExpansionInfo(ret); 4081 ret = NULL; 4082 } 4083 return ret; 4084 } 4085 #endif /* !CONFIG_USER_ONLY */ 4086 4087 static gchar *x86_gdb_arch_name(CPUState *cs) 4088 { 4089 #ifdef TARGET_X86_64 4090 return g_strdup("i386:x86-64"); 4091 #else 4092 return g_strdup("i386"); 4093 #endif 4094 } 4095 4096 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 4097 { 4098 X86CPUModel *model = data; 4099 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4100 4101 xcc->model = model; 4102 xcc->migration_safe = true; 4103 } 4104 4105 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) 4106 { 4107 char *typename = x86_cpu_type_name(name); 4108 TypeInfo ti = { 4109 .name = typename, 4110 .parent = TYPE_X86_CPU, 4111 .class_init = x86_cpu_cpudef_class_init, 4112 .class_data = model, 4113 }; 4114 4115 type_register(&ti); 4116 g_free(typename); 4117 } 4118 4119 static void x86_register_cpudef_types(X86CPUDefinition *def) 4120 { 4121 X86CPUModel *m; 4122 const X86CPUVersionDefinition *vdef; 4123 char *name; 4124 4125 /* AMD aliases are handled at runtime based on CPUID vendor, so 4126 * they shouldn't be set on the CPU model table. 4127 */ 4128 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 4129 /* catch mistakes instead of silently truncating model_id when too long */ 4130 assert(def->model_id && strlen(def->model_id) <= 48); 4131 4132 /* Unversioned model: */ 4133 m = g_new0(X86CPUModel, 1); 4134 m->cpudef = def; 4135 m->version = CPU_VERSION_AUTO; 4136 m->is_alias = true; 4137 x86_register_cpu_model_type(def->name, m); 4138 4139 /* Versioned models: */ 4140 4141 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { 4142 X86CPUModel *m = g_new0(X86CPUModel, 1); 4143 m->cpudef = def; 4144 m->version = vdef->version; 4145 name = x86_cpu_versioned_model_name(def, vdef->version); 4146 x86_register_cpu_model_type(name, m); 4147 g_free(name); 4148 4149 if (vdef->alias) { 4150 X86CPUModel *am = g_new0(X86CPUModel, 1); 4151 am->cpudef = def; 4152 am->version = vdef->version; 4153 am->is_alias = true; 4154 x86_register_cpu_model_type(vdef->alias, am); 4155 } 4156 } 4157 4158 } 4159 4160 #if !defined(CONFIG_USER_ONLY) 4161 4162 void cpu_clear_apic_feature(CPUX86State *env) 4163 { 4164 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 4165 } 4166 4167 #endif /* !CONFIG_USER_ONLY */ 4168 4169 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 4170 uint32_t *eax, uint32_t *ebx, 4171 uint32_t *ecx, uint32_t *edx) 4172 { 4173 X86CPU *cpu = env_archcpu(env); 4174 CPUState *cs = env_cpu(env); 4175 uint32_t die_offset; 4176 uint32_t limit; 4177 uint32_t signature[3]; 4178 4179 /* Calculate & apply limits for different index ranges */ 4180 if (index >= 0xC0000000) { 4181 limit = env->cpuid_xlevel2; 4182 } else if (index >= 0x80000000) { 4183 limit = env->cpuid_xlevel; 4184 } else if (index >= 0x40000000) { 4185 limit = 0x40000001; 4186 } else { 4187 limit = env->cpuid_level; 4188 } 4189 4190 if (index > limit) { 4191 /* Intel documentation states that invalid EAX input will 4192 * return the same information as EAX=cpuid_level 4193 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 4194 */ 4195 index = env->cpuid_level; 4196 } 4197 4198 switch(index) { 4199 case 0: 4200 *eax = env->cpuid_level; 4201 *ebx = env->cpuid_vendor1; 4202 *edx = env->cpuid_vendor2; 4203 *ecx = env->cpuid_vendor3; 4204 break; 4205 case 1: 4206 *eax = env->cpuid_version; 4207 *ebx = (cpu->apic_id << 24) | 4208 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 4209 *ecx = env->features[FEAT_1_ECX]; 4210 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 4211 *ecx |= CPUID_EXT_OSXSAVE; 4212 } 4213 *edx = env->features[FEAT_1_EDX]; 4214 if (cs->nr_cores * cs->nr_threads > 1) { 4215 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 4216 *edx |= CPUID_HT; 4217 } 4218 break; 4219 case 2: 4220 /* cache info: needed for Pentium Pro compatibility */ 4221 if (cpu->cache_info_passthrough) { 4222 host_cpuid(index, 0, eax, ebx, ecx, edx); 4223 break; 4224 } 4225 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 4226 *ebx = 0; 4227 if (!cpu->enable_l3_cache) { 4228 *ecx = 0; 4229 } else { 4230 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 4231 } 4232 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 4233 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 4234 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 4235 break; 4236 case 4: 4237 /* cache info: needed for Core compatibility */ 4238 if (cpu->cache_info_passthrough) { 4239 host_cpuid(index, count, eax, ebx, ecx, edx); 4240 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 4241 *eax &= ~0xFC000000; 4242 if ((*eax & 31) && cs->nr_cores > 1) { 4243 *eax |= (cs->nr_cores - 1) << 26; 4244 } 4245 } else { 4246 *eax = 0; 4247 switch (count) { 4248 case 0: /* L1 dcache info */ 4249 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 4250 1, cs->nr_cores, 4251 eax, ebx, ecx, edx); 4252 break; 4253 case 1: /* L1 icache info */ 4254 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 4255 1, cs->nr_cores, 4256 eax, ebx, ecx, edx); 4257 break; 4258 case 2: /* L2 cache info */ 4259 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 4260 cs->nr_threads, cs->nr_cores, 4261 eax, ebx, ecx, edx); 4262 break; 4263 case 3: /* L3 cache info */ 4264 die_offset = apicid_die_offset(env->nr_dies, 4265 cs->nr_cores, cs->nr_threads); 4266 if (cpu->enable_l3_cache) { 4267 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 4268 (1 << die_offset), cs->nr_cores, 4269 eax, ebx, ecx, edx); 4270 break; 4271 } 4272 /* fall through */ 4273 default: /* end of info */ 4274 *eax = *ebx = *ecx = *edx = 0; 4275 break; 4276 } 4277 } 4278 break; 4279 case 5: 4280 /* MONITOR/MWAIT Leaf */ 4281 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 4282 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 4283 *ecx = cpu->mwait.ecx; /* flags */ 4284 *edx = cpu->mwait.edx; /* mwait substates */ 4285 break; 4286 case 6: 4287 /* Thermal and Power Leaf */ 4288 *eax = env->features[FEAT_6_EAX]; 4289 *ebx = 0; 4290 *ecx = 0; 4291 *edx = 0; 4292 break; 4293 case 7: 4294 /* Structured Extended Feature Flags Enumeration Leaf */ 4295 if (count == 0) { 4296 *eax = 0; /* Maximum ECX value for sub-leaves */ 4297 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 4298 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 4299 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 4300 *ecx |= CPUID_7_0_ECX_OSPKE; 4301 } 4302 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 4303 } else { 4304 *eax = 0; 4305 *ebx = 0; 4306 *ecx = 0; 4307 *edx = 0; 4308 } 4309 break; 4310 case 9: 4311 /* Direct Cache Access Information Leaf */ 4312 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 4313 *ebx = 0; 4314 *ecx = 0; 4315 *edx = 0; 4316 break; 4317 case 0xA: 4318 /* Architectural Performance Monitoring Leaf */ 4319 if (kvm_enabled() && cpu->enable_pmu) { 4320 KVMState *s = cs->kvm_state; 4321 4322 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 4323 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 4324 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 4325 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 4326 } else if (hvf_enabled() && cpu->enable_pmu) { 4327 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 4328 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 4329 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 4330 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 4331 } else { 4332 *eax = 0; 4333 *ebx = 0; 4334 *ecx = 0; 4335 *edx = 0; 4336 } 4337 break; 4338 case 0xB: 4339 /* Extended Topology Enumeration Leaf */ 4340 if (!cpu->enable_cpuid_0xb) { 4341 *eax = *ebx = *ecx = *edx = 0; 4342 break; 4343 } 4344 4345 *ecx = count & 0xff; 4346 *edx = cpu->apic_id; 4347 4348 switch (count) { 4349 case 0: 4350 *eax = apicid_core_offset(env->nr_dies, 4351 cs->nr_cores, cs->nr_threads); 4352 *ebx = cs->nr_threads; 4353 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 4354 break; 4355 case 1: 4356 *eax = apicid_pkg_offset(env->nr_dies, 4357 cs->nr_cores, cs->nr_threads); 4358 *ebx = cs->nr_cores * cs->nr_threads; 4359 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 4360 break; 4361 default: 4362 *eax = 0; 4363 *ebx = 0; 4364 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 4365 } 4366 4367 assert(!(*eax & ~0x1f)); 4368 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 4369 break; 4370 case 0x1F: 4371 /* V2 Extended Topology Enumeration Leaf */ 4372 if (env->nr_dies < 2) { 4373 *eax = *ebx = *ecx = *edx = 0; 4374 break; 4375 } 4376 4377 *ecx = count & 0xff; 4378 *edx = cpu->apic_id; 4379 switch (count) { 4380 case 0: 4381 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores, 4382 cs->nr_threads); 4383 *ebx = cs->nr_threads; 4384 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 4385 break; 4386 case 1: 4387 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores, 4388 cs->nr_threads); 4389 *ebx = cs->nr_cores * cs->nr_threads; 4390 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 4391 break; 4392 case 2: 4393 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores, 4394 cs->nr_threads); 4395 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; 4396 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; 4397 break; 4398 default: 4399 *eax = 0; 4400 *ebx = 0; 4401 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 4402 } 4403 assert(!(*eax & ~0x1f)); 4404 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 4405 break; 4406 case 0xD: { 4407 /* Processor Extended State */ 4408 *eax = 0; 4409 *ebx = 0; 4410 *ecx = 0; 4411 *edx = 0; 4412 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 4413 break; 4414 } 4415 4416 if (count == 0) { 4417 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 4418 *eax = env->features[FEAT_XSAVE_COMP_LO]; 4419 *edx = env->features[FEAT_XSAVE_COMP_HI]; 4420 *ebx = xsave_area_size(env->xcr0); 4421 } else if (count == 1) { 4422 *eax = env->features[FEAT_XSAVE]; 4423 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 4424 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 4425 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 4426 *eax = esa->size; 4427 *ebx = esa->offset; 4428 } 4429 } 4430 break; 4431 } 4432 case 0x14: { 4433 /* Intel Processor Trace Enumeration */ 4434 *eax = 0; 4435 *ebx = 0; 4436 *ecx = 0; 4437 *edx = 0; 4438 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 4439 !kvm_enabled()) { 4440 break; 4441 } 4442 4443 if (count == 0) { 4444 *eax = INTEL_PT_MAX_SUBLEAF; 4445 *ebx = INTEL_PT_MINIMAL_EBX; 4446 *ecx = INTEL_PT_MINIMAL_ECX; 4447 } else if (count == 1) { 4448 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 4449 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 4450 } 4451 break; 4452 } 4453 case 0x40000000: 4454 /* 4455 * CPUID code in kvm_arch_init_vcpu() ignores stuff 4456 * set here, but we restrict to TCG none the less. 4457 */ 4458 if (tcg_enabled() && cpu->expose_tcg) { 4459 memcpy(signature, "TCGTCGTCGTCG", 12); 4460 *eax = 0x40000001; 4461 *ebx = signature[0]; 4462 *ecx = signature[1]; 4463 *edx = signature[2]; 4464 } else { 4465 *eax = 0; 4466 *ebx = 0; 4467 *ecx = 0; 4468 *edx = 0; 4469 } 4470 break; 4471 case 0x40000001: 4472 *eax = 0; 4473 *ebx = 0; 4474 *ecx = 0; 4475 *edx = 0; 4476 break; 4477 case 0x80000000: 4478 *eax = env->cpuid_xlevel; 4479 *ebx = env->cpuid_vendor1; 4480 *edx = env->cpuid_vendor2; 4481 *ecx = env->cpuid_vendor3; 4482 break; 4483 case 0x80000001: 4484 *eax = env->cpuid_version; 4485 *ebx = 0; 4486 *ecx = env->features[FEAT_8000_0001_ECX]; 4487 *edx = env->features[FEAT_8000_0001_EDX]; 4488 4489 /* The Linux kernel checks for the CMPLegacy bit and 4490 * discards multiple thread information if it is set. 4491 * So don't set it here for Intel to make Linux guests happy. 4492 */ 4493 if (cs->nr_cores * cs->nr_threads > 1) { 4494 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 4495 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 4496 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 4497 *ecx |= 1 << 1; /* CmpLegacy bit */ 4498 } 4499 } 4500 break; 4501 case 0x80000002: 4502 case 0x80000003: 4503 case 0x80000004: 4504 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 4505 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 4506 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 4507 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 4508 break; 4509 case 0x80000005: 4510 /* cache info (L1 cache) */ 4511 if (cpu->cache_info_passthrough) { 4512 host_cpuid(index, 0, eax, ebx, ecx, edx); 4513 break; 4514 } 4515 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ 4516 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 4517 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ 4518 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 4519 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 4520 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 4521 break; 4522 case 0x80000006: 4523 /* cache info (L2 cache) */ 4524 if (cpu->cache_info_passthrough) { 4525 host_cpuid(index, 0, eax, ebx, ecx, edx); 4526 break; 4527 } 4528 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ 4529 (L2_DTLB_2M_ENTRIES << 16) | \ 4530 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ 4531 (L2_ITLB_2M_ENTRIES); 4532 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ 4533 (L2_DTLB_4K_ENTRIES << 16) | \ 4534 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ 4535 (L2_ITLB_4K_ENTRIES); 4536 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 4537 cpu->enable_l3_cache ? 4538 env->cache_info_amd.l3_cache : NULL, 4539 ecx, edx); 4540 break; 4541 case 0x80000007: 4542 *eax = 0; 4543 *ebx = 0; 4544 *ecx = 0; 4545 *edx = env->features[FEAT_8000_0007_EDX]; 4546 break; 4547 case 0x80000008: 4548 /* virtual & phys address size in low 2 bytes. */ 4549 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 4550 /* 64 bit processor */ 4551 *eax = cpu->phys_bits; /* configurable physical bits */ 4552 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 4553 *eax |= 0x00003900; /* 57 bits virtual */ 4554 } else { 4555 *eax |= 0x00003000; /* 48 bits virtual */ 4556 } 4557 } else { 4558 *eax = cpu->phys_bits; 4559 } 4560 *ebx = env->features[FEAT_8000_0008_EBX]; 4561 *ecx = 0; 4562 *edx = 0; 4563 if (cs->nr_cores * cs->nr_threads > 1) { 4564 *ecx |= (cs->nr_cores * cs->nr_threads) - 1; 4565 } 4566 break; 4567 case 0x8000000A: 4568 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 4569 *eax = 0x00000001; /* SVM Revision */ 4570 *ebx = 0x00000010; /* nr of ASIDs */ 4571 *ecx = 0; 4572 *edx = env->features[FEAT_SVM]; /* optional features */ 4573 } else { 4574 *eax = 0; 4575 *ebx = 0; 4576 *ecx = 0; 4577 *edx = 0; 4578 } 4579 break; 4580 case 0x8000001D: 4581 *eax = 0; 4582 if (cpu->cache_info_passthrough) { 4583 host_cpuid(index, count, eax, ebx, ecx, edx); 4584 break; 4585 } 4586 switch (count) { 4587 case 0: /* L1 dcache info */ 4588 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs, 4589 eax, ebx, ecx, edx); 4590 break; 4591 case 1: /* L1 icache info */ 4592 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs, 4593 eax, ebx, ecx, edx); 4594 break; 4595 case 2: /* L2 cache info */ 4596 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs, 4597 eax, ebx, ecx, edx); 4598 break; 4599 case 3: /* L3 cache info */ 4600 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs, 4601 eax, ebx, ecx, edx); 4602 break; 4603 default: /* end of info */ 4604 *eax = *ebx = *ecx = *edx = 0; 4605 break; 4606 } 4607 break; 4608 case 0x8000001E: 4609 assert(cpu->core_id <= 255); 4610 encode_topo_cpuid8000001e(cs, cpu, 4611 eax, ebx, ecx, edx); 4612 break; 4613 case 0xC0000000: 4614 *eax = env->cpuid_xlevel2; 4615 *ebx = 0; 4616 *ecx = 0; 4617 *edx = 0; 4618 break; 4619 case 0xC0000001: 4620 /* Support for VIA CPU's CPUID instruction */ 4621 *eax = env->cpuid_version; 4622 *ebx = 0; 4623 *ecx = 0; 4624 *edx = env->features[FEAT_C000_0001_EDX]; 4625 break; 4626 case 0xC0000002: 4627 case 0xC0000003: 4628 case 0xC0000004: 4629 /* Reserved for the future, and now filled with zero */ 4630 *eax = 0; 4631 *ebx = 0; 4632 *ecx = 0; 4633 *edx = 0; 4634 break; 4635 case 0x8000001F: 4636 *eax = sev_enabled() ? 0x2 : 0; 4637 *ebx = sev_get_cbit_position(); 4638 *ebx |= sev_get_reduced_phys_bits() << 6; 4639 *ecx = 0; 4640 *edx = 0; 4641 break; 4642 default: 4643 /* reserved values: zero */ 4644 *eax = 0; 4645 *ebx = 0; 4646 *ecx = 0; 4647 *edx = 0; 4648 break; 4649 } 4650 } 4651 4652 /* CPUClass::reset() */ 4653 static void x86_cpu_reset(CPUState *s) 4654 { 4655 X86CPU *cpu = X86_CPU(s); 4656 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 4657 CPUX86State *env = &cpu->env; 4658 target_ulong cr4; 4659 uint64_t xcr0; 4660 int i; 4661 4662 xcc->parent_reset(s); 4663 4664 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 4665 4666 env->old_exception = -1; 4667 4668 /* init to reset state */ 4669 4670 env->hflags2 |= HF2_GIF_MASK; 4671 4672 cpu_x86_update_cr0(env, 0x60000010); 4673 env->a20_mask = ~0x0; 4674 env->smbase = 0x30000; 4675 env->msr_smi_count = 0; 4676 4677 env->idt.limit = 0xffff; 4678 env->gdt.limit = 0xffff; 4679 env->ldt.limit = 0xffff; 4680 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 4681 env->tr.limit = 0xffff; 4682 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 4683 4684 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 4685 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 4686 DESC_R_MASK | DESC_A_MASK); 4687 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 4688 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4689 DESC_A_MASK); 4690 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 4691 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4692 DESC_A_MASK); 4693 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 4694 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4695 DESC_A_MASK); 4696 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 4697 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4698 DESC_A_MASK); 4699 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 4700 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 4701 DESC_A_MASK); 4702 4703 env->eip = 0xfff0; 4704 env->regs[R_EDX] = env->cpuid_version; 4705 4706 env->eflags = 0x2; 4707 4708 /* FPU init */ 4709 for (i = 0; i < 8; i++) { 4710 env->fptags[i] = 1; 4711 } 4712 cpu_set_fpuc(env, 0x37f); 4713 4714 env->mxcsr = 0x1f80; 4715 /* All units are in INIT state. */ 4716 env->xstate_bv = 0; 4717 4718 env->pat = 0x0007040600070406ULL; 4719 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 4720 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 4721 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 4722 } 4723 4724 memset(env->dr, 0, sizeof(env->dr)); 4725 env->dr[6] = DR6_FIXED_1; 4726 env->dr[7] = DR7_FIXED_1; 4727 cpu_breakpoint_remove_all(s, BP_CPU); 4728 cpu_watchpoint_remove_all(s, BP_CPU); 4729 4730 cr4 = 0; 4731 xcr0 = XSTATE_FP_MASK; 4732 4733 #ifdef CONFIG_USER_ONLY 4734 /* Enable all the features for user-mode. */ 4735 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 4736 xcr0 |= XSTATE_SSE_MASK; 4737 } 4738 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 4739 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 4740 if (env->features[esa->feature] & esa->bits) { 4741 xcr0 |= 1ull << i; 4742 } 4743 } 4744 4745 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 4746 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 4747 } 4748 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 4749 cr4 |= CR4_FSGSBASE_MASK; 4750 } 4751 #endif 4752 4753 env->xcr0 = xcr0; 4754 cpu_x86_update_cr4(env, cr4); 4755 4756 /* 4757 * SDM 11.11.5 requires: 4758 * - IA32_MTRR_DEF_TYPE MSR.E = 0 4759 * - IA32_MTRR_PHYSMASKn.V = 0 4760 * All other bits are undefined. For simplification, zero it all. 4761 */ 4762 env->mtrr_deftype = 0; 4763 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 4764 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 4765 4766 env->interrupt_injected = -1; 4767 env->exception_nr = -1; 4768 env->exception_pending = 0; 4769 env->exception_injected = 0; 4770 env->exception_has_payload = false; 4771 env->exception_payload = 0; 4772 env->nmi_injected = false; 4773 #if !defined(CONFIG_USER_ONLY) 4774 /* We hard-wire the BSP to the first CPU. */ 4775 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 4776 4777 s->halted = !cpu_is_bsp(cpu); 4778 4779 if (kvm_enabled()) { 4780 kvm_arch_reset_vcpu(cpu); 4781 } 4782 else if (hvf_enabled()) { 4783 hvf_reset_vcpu(s); 4784 } 4785 #endif 4786 } 4787 4788 #ifndef CONFIG_USER_ONLY 4789 bool cpu_is_bsp(X86CPU *cpu) 4790 { 4791 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 4792 } 4793 4794 /* TODO: remove me, when reset over QOM tree is implemented */ 4795 static void x86_cpu_machine_reset_cb(void *opaque) 4796 { 4797 X86CPU *cpu = opaque; 4798 cpu_reset(CPU(cpu)); 4799 } 4800 #endif 4801 4802 static void mce_init(X86CPU *cpu) 4803 { 4804 CPUX86State *cenv = &cpu->env; 4805 unsigned int bank; 4806 4807 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 4808 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 4809 (CPUID_MCE | CPUID_MCA)) { 4810 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 4811 (cpu->enable_lmce ? MCG_LMCE_P : 0); 4812 cenv->mcg_ctl = ~(uint64_t)0; 4813 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 4814 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 4815 } 4816 } 4817 } 4818 4819 #ifndef CONFIG_USER_ONLY 4820 APICCommonClass *apic_get_class(void) 4821 { 4822 const char *apic_type = "apic"; 4823 4824 /* TODO: in-kernel irqchip for hvf */ 4825 if (kvm_apic_in_kernel()) { 4826 apic_type = "kvm-apic"; 4827 } else if (xen_enabled()) { 4828 apic_type = "xen-apic"; 4829 } 4830 4831 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 4832 } 4833 4834 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 4835 { 4836 APICCommonState *apic; 4837 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 4838 4839 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class))); 4840 4841 object_property_add_child(OBJECT(cpu), "lapic", 4842 OBJECT(cpu->apic_state), &error_abort); 4843 object_unref(OBJECT(cpu->apic_state)); 4844 4845 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 4846 /* TODO: convert to link<> */ 4847 apic = APIC_COMMON(cpu->apic_state); 4848 apic->cpu = cpu; 4849 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 4850 } 4851 4852 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4853 { 4854 APICCommonState *apic; 4855 static bool apic_mmio_map_once; 4856 4857 if (cpu->apic_state == NULL) { 4858 return; 4859 } 4860 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized", 4861 errp); 4862 4863 /* Map APIC MMIO area */ 4864 apic = APIC_COMMON(cpu->apic_state); 4865 if (!apic_mmio_map_once) { 4866 memory_region_add_subregion_overlap(get_system_memory(), 4867 apic->apicbase & 4868 MSR_IA32_APICBASE_BASE, 4869 &apic->io_memory, 4870 0x1000); 4871 apic_mmio_map_once = true; 4872 } 4873 } 4874 4875 static void x86_cpu_machine_done(Notifier *n, void *unused) 4876 { 4877 X86CPU *cpu = container_of(n, X86CPU, machine_done); 4878 MemoryRegion *smram = 4879 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 4880 4881 if (smram) { 4882 cpu->smram = g_new(MemoryRegion, 1); 4883 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 4884 smram, 0, 1ull << 32); 4885 memory_region_set_enabled(cpu->smram, true); 4886 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 4887 } 4888 } 4889 #else 4890 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 4891 { 4892 } 4893 #endif 4894 4895 /* Note: Only safe for use on x86(-64) hosts */ 4896 static uint32_t x86_host_phys_bits(void) 4897 { 4898 uint32_t eax; 4899 uint32_t host_phys_bits; 4900 4901 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 4902 if (eax >= 0x80000008) { 4903 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 4904 /* Note: According to AMD doc 25481 rev 2.34 they have a field 4905 * at 23:16 that can specify a maximum physical address bits for 4906 * the guest that can override this value; but I've not seen 4907 * anything with that set. 4908 */ 4909 host_phys_bits = eax & 0xff; 4910 } else { 4911 /* It's an odd 64 bit machine that doesn't have the leaf for 4912 * physical address bits; fall back to 36 that's most older 4913 * Intel. 4914 */ 4915 host_phys_bits = 36; 4916 } 4917 4918 return host_phys_bits; 4919 } 4920 4921 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 4922 { 4923 if (*min < value) { 4924 *min = value; 4925 } 4926 } 4927 4928 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 4929 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 4930 { 4931 CPUX86State *env = &cpu->env; 4932 FeatureWordInfo *fi = &feature_word_info[w]; 4933 uint32_t eax = fi->cpuid.eax; 4934 uint32_t region = eax & 0xF0000000; 4935 4936 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 4937 if (!env->features[w]) { 4938 return; 4939 } 4940 4941 switch (region) { 4942 case 0x00000000: 4943 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 4944 break; 4945 case 0x80000000: 4946 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 4947 break; 4948 case 0xC0000000: 4949 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 4950 break; 4951 } 4952 } 4953 4954 /* Calculate XSAVE components based on the configured CPU feature flags */ 4955 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 4956 { 4957 CPUX86State *env = &cpu->env; 4958 int i; 4959 uint64_t mask; 4960 4961 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 4962 return; 4963 } 4964 4965 mask = 0; 4966 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 4967 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 4968 if (env->features[esa->feature] & esa->bits) { 4969 mask |= (1ULL << i); 4970 } 4971 } 4972 4973 env->features[FEAT_XSAVE_COMP_LO] = mask; 4974 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 4975 } 4976 4977 /***** Steps involved on loading and filtering CPUID data 4978 * 4979 * When initializing and realizing a CPU object, the steps 4980 * involved in setting up CPUID data are: 4981 * 4982 * 1) Loading CPU model definition (X86CPUDefinition). This is 4983 * implemented by x86_cpu_load_model() and should be completely 4984 * transparent, as it is done automatically by instance_init. 4985 * No code should need to look at X86CPUDefinition structs 4986 * outside instance_init. 4987 * 4988 * 2) CPU expansion. This is done by realize before CPUID 4989 * filtering, and will make sure host/accelerator data is 4990 * loaded for CPU models that depend on host capabilities 4991 * (e.g. "host"). Done by x86_cpu_expand_features(). 4992 * 4993 * 3) CPUID filtering. This initializes extra data related to 4994 * CPUID, and checks if the host supports all capabilities 4995 * required by the CPU. Runnability of a CPU model is 4996 * determined at this step. Done by x86_cpu_filter_features(). 4997 * 4998 * Some operations don't require all steps to be performed. 4999 * More precisely: 5000 * 5001 * - CPU instance creation (instance_init) will run only CPU 5002 * model loading. CPU expansion can't run at instance_init-time 5003 * because host/accelerator data may be not available yet. 5004 * - CPU realization will perform both CPU model expansion and CPUID 5005 * filtering, and return an error in case one of them fails. 5006 * - query-cpu-definitions needs to run all 3 steps. It needs 5007 * to run CPUID filtering, as the 'unavailable-features' 5008 * field is set based on the filtering results. 5009 * - The query-cpu-model-expansion QMP command only needs to run 5010 * CPU model loading and CPU expansion. It should not filter 5011 * any CPUID data based on host capabilities. 5012 */ 5013 5014 /* Expand CPU configuration data, based on configured features 5015 * and host/accelerator capabilities when appropriate. 5016 */ 5017 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 5018 { 5019 CPUX86State *env = &cpu->env; 5020 FeatureWord w; 5021 GList *l; 5022 Error *local_err = NULL; 5023 5024 /*TODO: Now cpu->max_features doesn't overwrite features 5025 * set using QOM properties, and we can convert 5026 * plus_features & minus_features to global properties 5027 * inside x86_cpu_parse_featurestr() too. 5028 */ 5029 if (cpu->max_features) { 5030 for (w = 0; w < FEATURE_WORDS; w++) { 5031 /* Override only features that weren't set explicitly 5032 * by the user. 5033 */ 5034 env->features[w] |= 5035 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 5036 ~env->user_features[w] & \ 5037 ~feature_word_info[w].no_autoenable_flags; 5038 } 5039 } 5040 5041 for (l = plus_features; l; l = l->next) { 5042 const char *prop = l->data; 5043 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 5044 if (local_err) { 5045 goto out; 5046 } 5047 } 5048 5049 for (l = minus_features; l; l = l->next) { 5050 const char *prop = l->data; 5051 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 5052 if (local_err) { 5053 goto out; 5054 } 5055 } 5056 5057 if (!kvm_enabled() || !cpu->expose_kvm) { 5058 env->features[FEAT_KVM] = 0; 5059 } 5060 5061 x86_cpu_enable_xsave_components(cpu); 5062 5063 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 5064 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 5065 if (cpu->full_cpuid_auto_level) { 5066 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 5067 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 5068 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 5069 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 5070 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 5071 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 5072 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 5073 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 5074 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 5075 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 5076 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 5077 5078 /* Intel Processor Trace requires CPUID[0x14] */ 5079 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 5080 kvm_enabled() && cpu->intel_pt_auto_level) { 5081 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 5082 } 5083 5084 /* CPU topology with multi-dies support requires CPUID[0x1F] */ 5085 if (env->nr_dies > 1) { 5086 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); 5087 } 5088 5089 /* SVM requires CPUID[0x8000000A] */ 5090 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5091 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 5092 } 5093 5094 /* SEV requires CPUID[0x8000001F] */ 5095 if (sev_enabled()) { 5096 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 5097 } 5098 } 5099 5100 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 5101 if (env->cpuid_level == UINT32_MAX) { 5102 env->cpuid_level = env->cpuid_min_level; 5103 } 5104 if (env->cpuid_xlevel == UINT32_MAX) { 5105 env->cpuid_xlevel = env->cpuid_min_xlevel; 5106 } 5107 if (env->cpuid_xlevel2 == UINT32_MAX) { 5108 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 5109 } 5110 5111 out: 5112 if (local_err != NULL) { 5113 error_propagate(errp, local_err); 5114 } 5115 } 5116 5117 /* 5118 * Finishes initialization of CPUID data, filters CPU feature 5119 * words based on host availability of each feature. 5120 * 5121 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 5122 */ 5123 static int x86_cpu_filter_features(X86CPU *cpu) 5124 { 5125 CPUX86State *env = &cpu->env; 5126 FeatureWord w; 5127 int rv = 0; 5128 5129 for (w = 0; w < FEATURE_WORDS; w++) { 5130 uint32_t host_feat = 5131 x86_cpu_get_supported_feature_word(w, false); 5132 uint32_t requested_features = env->features[w]; 5133 uint32_t available_features = requested_features & host_feat; 5134 if (!cpu->force_features) { 5135 env->features[w] = available_features; 5136 } 5137 cpu->filtered_features[w] = requested_features & ~available_features; 5138 if (cpu->filtered_features[w]) { 5139 rv = 1; 5140 } 5141 } 5142 5143 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 5144 kvm_enabled()) { 5145 KVMState *s = CPU(cpu)->kvm_state; 5146 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 5147 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 5148 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 5149 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 5150 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 5151 5152 if (!eax_0 || 5153 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 5154 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 5155 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 5156 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 5157 INTEL_PT_ADDR_RANGES_NUM) || 5158 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 5159 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 5160 (ecx_0 & INTEL_PT_IP_LIP)) { 5161 /* 5162 * Processor Trace capabilities aren't configurable, so if the 5163 * host can't emulate the capabilities we report on 5164 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 5165 */ 5166 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT; 5167 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT; 5168 rv = 1; 5169 } 5170 } 5171 5172 return rv; 5173 } 5174 5175 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 5176 { 5177 CPUState *cs = CPU(dev); 5178 X86CPU *cpu = X86_CPU(dev); 5179 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 5180 CPUX86State *env = &cpu->env; 5181 Error *local_err = NULL; 5182 static bool ht_warned; 5183 5184 if (xcc->host_cpuid_required) { 5185 if (!accel_uses_host_cpuid()) { 5186 char *name = x86_cpu_class_get_model_name(xcc); 5187 error_setg(&local_err, "CPU model '%s' requires KVM", name); 5188 g_free(name); 5189 goto out; 5190 } 5191 5192 if (enable_cpu_pm) { 5193 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 5194 &cpu->mwait.ecx, &cpu->mwait.edx); 5195 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 5196 } 5197 } 5198 5199 /* mwait extended info: needed for Core compatibility */ 5200 /* We always wake on interrupt even if host does not have the capability */ 5201 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 5202 5203 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 5204 error_setg(errp, "apic-id property was not initialized properly"); 5205 return; 5206 } 5207 5208 x86_cpu_expand_features(cpu, &local_err); 5209 if (local_err) { 5210 goto out; 5211 } 5212 5213 if (x86_cpu_filter_features(cpu) && 5214 (cpu->check_cpuid || cpu->enforce_cpuid)) { 5215 x86_cpu_report_filtered_features(cpu); 5216 if (cpu->enforce_cpuid) { 5217 error_setg(&local_err, 5218 accel_uses_host_cpuid() ? 5219 "Host doesn't support requested features" : 5220 "TCG doesn't support requested features"); 5221 goto out; 5222 } 5223 } 5224 5225 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 5226 * CPUID[1].EDX. 5227 */ 5228 if (IS_AMD_CPU(env)) { 5229 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 5230 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 5231 & CPUID_EXT2_AMD_ALIASES); 5232 } 5233 5234 /* For 64bit systems think about the number of physical bits to present. 5235 * ideally this should be the same as the host; anything other than matching 5236 * the host can cause incorrect guest behaviour. 5237 * QEMU used to pick the magic value of 40 bits that corresponds to 5238 * consumer AMD devices but nothing else. 5239 */ 5240 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5241 if (accel_uses_host_cpuid()) { 5242 uint32_t host_phys_bits = x86_host_phys_bits(); 5243 static bool warned; 5244 5245 /* Print a warning if the user set it to a value that's not the 5246 * host value. 5247 */ 5248 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 5249 !warned) { 5250 warn_report("Host physical bits (%u)" 5251 " does not match phys-bits property (%u)", 5252 host_phys_bits, cpu->phys_bits); 5253 warned = true; 5254 } 5255 5256 if (cpu->host_phys_bits) { 5257 /* The user asked for us to use the host physical bits */ 5258 cpu->phys_bits = host_phys_bits; 5259 if (cpu->host_phys_bits_limit && 5260 cpu->phys_bits > cpu->host_phys_bits_limit) { 5261 cpu->phys_bits = cpu->host_phys_bits_limit; 5262 } 5263 } 5264 5265 if (cpu->phys_bits && 5266 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 5267 cpu->phys_bits < 32)) { 5268 error_setg(errp, "phys-bits should be between 32 and %u " 5269 " (but is %u)", 5270 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 5271 return; 5272 } 5273 } else { 5274 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 5275 error_setg(errp, "TCG only supports phys-bits=%u", 5276 TCG_PHYS_ADDR_BITS); 5277 return; 5278 } 5279 } 5280 /* 0 means it was not explicitly set by the user (or by machine 5281 * compat_props or by the host code above). In this case, the default 5282 * is the value used by TCG (40). 5283 */ 5284 if (cpu->phys_bits == 0) { 5285 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 5286 } 5287 } else { 5288 /* For 32 bit systems don't use the user set value, but keep 5289 * phys_bits consistent with what we tell the guest. 5290 */ 5291 if (cpu->phys_bits != 0) { 5292 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 5293 return; 5294 } 5295 5296 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 5297 cpu->phys_bits = 36; 5298 } else { 5299 cpu->phys_bits = 32; 5300 } 5301 } 5302 5303 /* Cache information initialization */ 5304 if (!cpu->legacy_cache) { 5305 if (!xcc->model || !xcc->model->cpudef->cache_info) { 5306 char *name = x86_cpu_class_get_model_name(xcc); 5307 error_setg(errp, 5308 "CPU model '%s' doesn't support legacy-cache=off", name); 5309 g_free(name); 5310 return; 5311 } 5312 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 5313 *xcc->model->cpudef->cache_info; 5314 } else { 5315 /* Build legacy cache information */ 5316 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 5317 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 5318 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 5319 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 5320 5321 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 5322 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 5323 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 5324 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 5325 5326 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 5327 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 5328 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 5329 env->cache_info_amd.l3_cache = &legacy_l3_cache; 5330 } 5331 5332 5333 cpu_exec_realizefn(cs, &local_err); 5334 if (local_err != NULL) { 5335 error_propagate(errp, local_err); 5336 return; 5337 } 5338 5339 #ifndef CONFIG_USER_ONLY 5340 MachineState *ms = MACHINE(qdev_get_machine()); 5341 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 5342 5343 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { 5344 x86_cpu_apic_create(cpu, &local_err); 5345 if (local_err != NULL) { 5346 goto out; 5347 } 5348 } 5349 #endif 5350 5351 mce_init(cpu); 5352 5353 #ifndef CONFIG_USER_ONLY 5354 if (tcg_enabled()) { 5355 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 5356 cpu->cpu_as_root = g_new(MemoryRegion, 1); 5357 5358 /* Outer container... */ 5359 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 5360 memory_region_set_enabled(cpu->cpu_as_root, true); 5361 5362 /* ... with two regions inside: normal system memory with low 5363 * priority, and... 5364 */ 5365 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 5366 get_system_memory(), 0, ~0ull); 5367 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 5368 memory_region_set_enabled(cpu->cpu_as_mem, true); 5369 5370 cs->num_ases = 2; 5371 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 5372 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 5373 5374 /* ... SMRAM with higher priority, linked from /machine/smram. */ 5375 cpu->machine_done.notify = x86_cpu_machine_done; 5376 qemu_add_machine_init_done_notifier(&cpu->machine_done); 5377 } 5378 #endif 5379 5380 qemu_init_vcpu(cs); 5381 5382 /* 5383 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 5384 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 5385 * based on inputs (sockets,cores,threads), it is still better to give 5386 * users a warning. 5387 * 5388 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 5389 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 5390 */ 5391 if (IS_AMD_CPU(env) && 5392 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 5393 cs->nr_threads > 1 && !ht_warned) { 5394 warn_report("This family of AMD CPU doesn't support " 5395 "hyperthreading(%d)", 5396 cs->nr_threads); 5397 error_printf("Please configure -smp options properly" 5398 " or try enabling topoext feature.\n"); 5399 ht_warned = true; 5400 } 5401 5402 x86_cpu_apic_realize(cpu, &local_err); 5403 if (local_err != NULL) { 5404 goto out; 5405 } 5406 cpu_reset(cs); 5407 5408 xcc->parent_realize(dev, &local_err); 5409 5410 out: 5411 if (local_err != NULL) { 5412 error_propagate(errp, local_err); 5413 return; 5414 } 5415 } 5416 5417 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) 5418 { 5419 X86CPU *cpu = X86_CPU(dev); 5420 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 5421 Error *local_err = NULL; 5422 5423 #ifndef CONFIG_USER_ONLY 5424 cpu_remove_sync(CPU(dev)); 5425 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 5426 #endif 5427 5428 if (cpu->apic_state) { 5429 object_unparent(OBJECT(cpu->apic_state)); 5430 cpu->apic_state = NULL; 5431 } 5432 5433 xcc->parent_unrealize(dev, &local_err); 5434 if (local_err != NULL) { 5435 error_propagate(errp, local_err); 5436 return; 5437 } 5438 } 5439 5440 typedef struct BitProperty { 5441 FeatureWord w; 5442 uint32_t mask; 5443 } BitProperty; 5444 5445 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 5446 void *opaque, Error **errp) 5447 { 5448 X86CPU *cpu = X86_CPU(obj); 5449 BitProperty *fp = opaque; 5450 uint32_t f = cpu->env.features[fp->w]; 5451 bool value = (f & fp->mask) == fp->mask; 5452 visit_type_bool(v, name, &value, errp); 5453 } 5454 5455 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 5456 void *opaque, Error **errp) 5457 { 5458 DeviceState *dev = DEVICE(obj); 5459 X86CPU *cpu = X86_CPU(obj); 5460 BitProperty *fp = opaque; 5461 Error *local_err = NULL; 5462 bool value; 5463 5464 if (dev->realized) { 5465 qdev_prop_set_after_realize(dev, name, errp); 5466 return; 5467 } 5468 5469 visit_type_bool(v, name, &value, &local_err); 5470 if (local_err) { 5471 error_propagate(errp, local_err); 5472 return; 5473 } 5474 5475 if (value) { 5476 cpu->env.features[fp->w] |= fp->mask; 5477 } else { 5478 cpu->env.features[fp->w] &= ~fp->mask; 5479 } 5480 cpu->env.user_features[fp->w] |= fp->mask; 5481 } 5482 5483 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 5484 void *opaque) 5485 { 5486 BitProperty *prop = opaque; 5487 g_free(prop); 5488 } 5489 5490 /* Register a boolean property to get/set a single bit in a uint32_t field. 5491 * 5492 * The same property name can be registered multiple times to make it affect 5493 * multiple bits in the same FeatureWord. In that case, the getter will return 5494 * true only if all bits are set. 5495 */ 5496 static void x86_cpu_register_bit_prop(X86CPU *cpu, 5497 const char *prop_name, 5498 FeatureWord w, 5499 int bitnr) 5500 { 5501 BitProperty *fp; 5502 ObjectProperty *op; 5503 uint32_t mask = (1UL << bitnr); 5504 5505 op = object_property_find(OBJECT(cpu), prop_name, NULL); 5506 if (op) { 5507 fp = op->opaque; 5508 assert(fp->w == w); 5509 fp->mask |= mask; 5510 } else { 5511 fp = g_new0(BitProperty, 1); 5512 fp->w = w; 5513 fp->mask = mask; 5514 object_property_add(OBJECT(cpu), prop_name, "bool", 5515 x86_cpu_get_bit_prop, 5516 x86_cpu_set_bit_prop, 5517 x86_cpu_release_bit_prop, fp, &error_abort); 5518 } 5519 } 5520 5521 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 5522 FeatureWord w, 5523 int bitnr) 5524 { 5525 FeatureWordInfo *fi = &feature_word_info[w]; 5526 const char *name = fi->feat_names[bitnr]; 5527 5528 if (!name) { 5529 return; 5530 } 5531 5532 /* Property names should use "-" instead of "_". 5533 * Old names containing underscores are registered as aliases 5534 * using object_property_add_alias() 5535 */ 5536 assert(!strchr(name, '_')); 5537 /* aliases don't use "|" delimiters anymore, they are registered 5538 * manually using object_property_add_alias() */ 5539 assert(!strchr(name, '|')); 5540 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 5541 } 5542 5543 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 5544 { 5545 X86CPU *cpu = X86_CPU(cs); 5546 CPUX86State *env = &cpu->env; 5547 GuestPanicInformation *panic_info = NULL; 5548 5549 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 5550 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 5551 5552 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 5553 5554 assert(HV_CRASH_PARAMS >= 5); 5555 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 5556 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 5557 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 5558 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 5559 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 5560 } 5561 5562 return panic_info; 5563 } 5564 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 5565 const char *name, void *opaque, 5566 Error **errp) 5567 { 5568 CPUState *cs = CPU(obj); 5569 GuestPanicInformation *panic_info; 5570 5571 if (!cs->crash_occurred) { 5572 error_setg(errp, "No crash occured"); 5573 return; 5574 } 5575 5576 panic_info = x86_cpu_get_crash_info(cs); 5577 if (panic_info == NULL) { 5578 error_setg(errp, "No crash information"); 5579 return; 5580 } 5581 5582 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 5583 errp); 5584 qapi_free_GuestPanicInformation(panic_info); 5585 } 5586 5587 static void x86_cpu_initfn(Object *obj) 5588 { 5589 X86CPU *cpu = X86_CPU(obj); 5590 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 5591 CPUX86State *env = &cpu->env; 5592 FeatureWord w; 5593 5594 env->nr_dies = 1; 5595 cpu_set_cpustate_pointers(cpu); 5596 5597 object_property_add(obj, "family", "int", 5598 x86_cpuid_version_get_family, 5599 x86_cpuid_version_set_family, NULL, NULL, NULL); 5600 object_property_add(obj, "model", "int", 5601 x86_cpuid_version_get_model, 5602 x86_cpuid_version_set_model, NULL, NULL, NULL); 5603 object_property_add(obj, "stepping", "int", 5604 x86_cpuid_version_get_stepping, 5605 x86_cpuid_version_set_stepping, NULL, NULL, NULL); 5606 object_property_add_str(obj, "vendor", 5607 x86_cpuid_get_vendor, 5608 x86_cpuid_set_vendor, NULL); 5609 object_property_add_str(obj, "model-id", 5610 x86_cpuid_get_model_id, 5611 x86_cpuid_set_model_id, NULL); 5612 object_property_add(obj, "tsc-frequency", "int", 5613 x86_cpuid_get_tsc_freq, 5614 x86_cpuid_set_tsc_freq, NULL, NULL, NULL); 5615 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 5616 x86_cpu_get_feature_words, 5617 NULL, NULL, (void *)env->features, NULL); 5618 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 5619 x86_cpu_get_feature_words, 5620 NULL, NULL, (void *)cpu->filtered_features, NULL); 5621 /* 5622 * The "unavailable-features" property has the same semantics as 5623 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 5624 * QMP command: they list the features that would have prevented the 5625 * CPU from running if the "enforce" flag was set. 5626 */ 5627 object_property_add(obj, "unavailable-features", "strList", 5628 x86_cpu_get_unavailable_features, 5629 NULL, NULL, NULL, &error_abort); 5630 5631 object_property_add(obj, "crash-information", "GuestPanicInformation", 5632 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); 5633 5634 for (w = 0; w < FEATURE_WORDS; w++) { 5635 int bitnr; 5636 5637 for (bitnr = 0; bitnr < 32; bitnr++) { 5638 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 5639 } 5640 } 5641 5642 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort); 5643 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort); 5644 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort); 5645 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort); 5646 object_property_add_alias(obj, "xd", obj, "nx", &error_abort); 5647 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort); 5648 object_property_add_alias(obj, "i64", obj, "lm", &error_abort); 5649 5650 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort); 5651 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort); 5652 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort); 5653 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort); 5654 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort); 5655 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort); 5656 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort); 5657 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort); 5658 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort); 5659 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort); 5660 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort); 5661 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort); 5662 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort); 5663 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort); 5664 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort); 5665 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort); 5666 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort); 5667 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort); 5668 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort); 5669 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort); 5670 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort); 5671 5672 if (xcc->model) { 5673 x86_cpu_load_model(cpu, xcc->model, &error_abort); 5674 } 5675 } 5676 5677 static int64_t x86_cpu_get_arch_id(CPUState *cs) 5678 { 5679 X86CPU *cpu = X86_CPU(cs); 5680 5681 return cpu->apic_id; 5682 } 5683 5684 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 5685 { 5686 X86CPU *cpu = X86_CPU(cs); 5687 5688 return cpu->env.cr[0] & CR0_PG_MASK; 5689 } 5690 5691 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 5692 { 5693 X86CPU *cpu = X86_CPU(cs); 5694 5695 cpu->env.eip = value; 5696 } 5697 5698 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 5699 { 5700 X86CPU *cpu = X86_CPU(cs); 5701 5702 cpu->env.eip = tb->pc - tb->cs_base; 5703 } 5704 5705 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 5706 { 5707 X86CPU *cpu = X86_CPU(cs); 5708 CPUX86State *env = &cpu->env; 5709 5710 #if !defined(CONFIG_USER_ONLY) 5711 if (interrupt_request & CPU_INTERRUPT_POLL) { 5712 return CPU_INTERRUPT_POLL; 5713 } 5714 #endif 5715 if (interrupt_request & CPU_INTERRUPT_SIPI) { 5716 return CPU_INTERRUPT_SIPI; 5717 } 5718 5719 if (env->hflags2 & HF2_GIF_MASK) { 5720 if ((interrupt_request & CPU_INTERRUPT_SMI) && 5721 !(env->hflags & HF_SMM_MASK)) { 5722 return CPU_INTERRUPT_SMI; 5723 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 5724 !(env->hflags2 & HF2_NMI_MASK)) { 5725 return CPU_INTERRUPT_NMI; 5726 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 5727 return CPU_INTERRUPT_MCE; 5728 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 5729 (((env->hflags2 & HF2_VINTR_MASK) && 5730 (env->hflags2 & HF2_HIF_MASK)) || 5731 (!(env->hflags2 & HF2_VINTR_MASK) && 5732 (env->eflags & IF_MASK && 5733 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 5734 return CPU_INTERRUPT_HARD; 5735 #if !defined(CONFIG_USER_ONLY) 5736 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 5737 (env->eflags & IF_MASK) && 5738 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 5739 return CPU_INTERRUPT_VIRQ; 5740 #endif 5741 } 5742 } 5743 5744 return 0; 5745 } 5746 5747 static bool x86_cpu_has_work(CPUState *cs) 5748 { 5749 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 5750 } 5751 5752 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 5753 { 5754 X86CPU *cpu = X86_CPU(cs); 5755 CPUX86State *env = &cpu->env; 5756 5757 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 5758 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 5759 : bfd_mach_i386_i8086); 5760 info->print_insn = print_insn_i386; 5761 5762 info->cap_arch = CS_ARCH_X86; 5763 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 5764 : env->hflags & HF_CS32_MASK ? CS_MODE_32 5765 : CS_MODE_16); 5766 info->cap_insn_unit = 1; 5767 info->cap_insn_split = 8; 5768 } 5769 5770 void x86_update_hflags(CPUX86State *env) 5771 { 5772 uint32_t hflags; 5773 #define HFLAG_COPY_MASK \ 5774 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 5775 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 5776 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 5777 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 5778 5779 hflags = env->hflags & HFLAG_COPY_MASK; 5780 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 5781 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 5782 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 5783 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 5784 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 5785 5786 if (env->cr[4] & CR4_OSFXSR_MASK) { 5787 hflags |= HF_OSFXSR_MASK; 5788 } 5789 5790 if (env->efer & MSR_EFER_LMA) { 5791 hflags |= HF_LMA_MASK; 5792 } 5793 5794 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 5795 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 5796 } else { 5797 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 5798 (DESC_B_SHIFT - HF_CS32_SHIFT); 5799 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 5800 (DESC_B_SHIFT - HF_SS32_SHIFT); 5801 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 5802 !(hflags & HF_CS32_MASK)) { 5803 hflags |= HF_ADDSEG_MASK; 5804 } else { 5805 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 5806 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 5807 } 5808 } 5809 env->hflags = hflags; 5810 } 5811 5812 static Property x86_cpu_properties[] = { 5813 #ifdef CONFIG_USER_ONLY 5814 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 5815 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 5816 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 5817 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 5818 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0), 5819 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 5820 #else 5821 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 5822 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 5823 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 5824 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1), 5825 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 5826 #endif 5827 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 5828 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 5829 5830 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, 5831 HYPERV_SPINLOCK_NEVER_RETRY), 5832 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 5833 HYPERV_FEAT_RELAXED, 0), 5834 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 5835 HYPERV_FEAT_VAPIC, 0), 5836 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 5837 HYPERV_FEAT_TIME, 0), 5838 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 5839 HYPERV_FEAT_CRASH, 0), 5840 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 5841 HYPERV_FEAT_RESET, 0), 5842 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 5843 HYPERV_FEAT_VPINDEX, 0), 5844 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 5845 HYPERV_FEAT_RUNTIME, 0), 5846 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 5847 HYPERV_FEAT_SYNIC, 0), 5848 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 5849 HYPERV_FEAT_STIMER, 0), 5850 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 5851 HYPERV_FEAT_FREQUENCIES, 0), 5852 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 5853 HYPERV_FEAT_REENLIGHTENMENT, 0), 5854 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 5855 HYPERV_FEAT_TLBFLUSH, 0), 5856 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 5857 HYPERV_FEAT_EVMCS, 0), 5858 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 5859 HYPERV_FEAT_IPI, 0), 5860 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 5861 HYPERV_FEAT_STIMER_DIRECT, 0), 5862 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 5863 5864 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 5865 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 5866 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false), 5867 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 5868 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 5869 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 5870 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 5871 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 5872 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 5873 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 5874 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 5875 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 5876 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 5877 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 5878 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 5879 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 5880 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 5881 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 5882 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 5883 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 5884 false), 5885 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 5886 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 5887 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 5888 true), 5889 /* 5890 * lecacy_cache defaults to true unless the CPU model provides its 5891 * own cache information (see x86_cpu_load_def()). 5892 */ 5893 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 5894 5895 /* 5896 * From "Requirements for Implementing the Microsoft 5897 * Hypervisor Interface": 5898 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 5899 * 5900 * "Starting with Windows Server 2012 and Windows 8, if 5901 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 5902 * the hypervisor imposes no specific limit to the number of VPs. 5903 * In this case, Windows Server 2012 guest VMs may use more than 5904 * 64 VPs, up to the maximum supported number of processors applicable 5905 * to the specific Windows version being used." 5906 */ 5907 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 5908 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 5909 false), 5910 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 5911 true), 5912 DEFINE_PROP_END_OF_LIST() 5913 }; 5914 5915 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 5916 { 5917 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5918 CPUClass *cc = CPU_CLASS(oc); 5919 DeviceClass *dc = DEVICE_CLASS(oc); 5920 5921 device_class_set_parent_realize(dc, x86_cpu_realizefn, 5922 &xcc->parent_realize); 5923 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 5924 &xcc->parent_unrealize); 5925 dc->props = x86_cpu_properties; 5926 5927 xcc->parent_reset = cc->reset; 5928 cc->reset = x86_cpu_reset; 5929 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 5930 5931 cc->class_by_name = x86_cpu_class_by_name; 5932 cc->parse_features = x86_cpu_parse_featurestr; 5933 cc->has_work = x86_cpu_has_work; 5934 #ifdef CONFIG_TCG 5935 cc->do_interrupt = x86_cpu_do_interrupt; 5936 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 5937 #endif 5938 cc->dump_state = x86_cpu_dump_state; 5939 cc->get_crash_info = x86_cpu_get_crash_info; 5940 cc->set_pc = x86_cpu_set_pc; 5941 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 5942 cc->gdb_read_register = x86_cpu_gdb_read_register; 5943 cc->gdb_write_register = x86_cpu_gdb_write_register; 5944 cc->get_arch_id = x86_cpu_get_arch_id; 5945 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 5946 #ifndef CONFIG_USER_ONLY 5947 cc->asidx_from_attrs = x86_asidx_from_attrs; 5948 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 5949 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; 5950 cc->write_elf64_note = x86_cpu_write_elf64_note; 5951 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 5952 cc->write_elf32_note = x86_cpu_write_elf32_note; 5953 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 5954 cc->vmsd = &vmstate_x86_cpu; 5955 #endif 5956 cc->gdb_arch_name = x86_gdb_arch_name; 5957 #ifdef TARGET_X86_64 5958 cc->gdb_core_xml_file = "i386-64bit.xml"; 5959 cc->gdb_num_core_regs = 66; 5960 #else 5961 cc->gdb_core_xml_file = "i386-32bit.xml"; 5962 cc->gdb_num_core_regs = 50; 5963 #endif 5964 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 5965 cc->debug_excp_handler = breakpoint_handler; 5966 #endif 5967 cc->cpu_exec_enter = x86_cpu_exec_enter; 5968 cc->cpu_exec_exit = x86_cpu_exec_exit; 5969 #ifdef CONFIG_TCG 5970 cc->tcg_initialize = tcg_x86_init; 5971 cc->tlb_fill = x86_cpu_tlb_fill; 5972 #endif 5973 cc->disas_set_info = x86_disas_set_info; 5974 5975 dc->user_creatable = true; 5976 } 5977 5978 static const TypeInfo x86_cpu_type_info = { 5979 .name = TYPE_X86_CPU, 5980 .parent = TYPE_CPU, 5981 .instance_size = sizeof(X86CPU), 5982 .instance_init = x86_cpu_initfn, 5983 .abstract = true, 5984 .class_size = sizeof(X86CPUClass), 5985 .class_init = x86_cpu_common_class_init, 5986 }; 5987 5988 5989 /* "base" CPU model, used by query-cpu-model-expansion */ 5990 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 5991 { 5992 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5993 5994 xcc->static_model = true; 5995 xcc->migration_safe = true; 5996 xcc->model_description = "base CPU model type with no features enabled"; 5997 xcc->ordering = 8; 5998 } 5999 6000 static const TypeInfo x86_base_cpu_type_info = { 6001 .name = X86_CPU_TYPE_NAME("base"), 6002 .parent = TYPE_X86_CPU, 6003 .class_init = x86_cpu_base_class_init, 6004 }; 6005 6006 static void x86_cpu_register_types(void) 6007 { 6008 int i; 6009 6010 type_register_static(&x86_cpu_type_info); 6011 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 6012 x86_register_cpudef_types(&builtin_x86_defs[i]); 6013 } 6014 type_register_static(&max_x86_cpu_type_info); 6015 type_register_static(&x86_base_cpu_type_info); 6016 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 6017 type_register_static(&host_x86_cpu_type_info); 6018 #endif 6019 } 6020 6021 type_init(x86_cpu_register_types) 6022