1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "exec/exec-all.h" 28 #include "sysemu/kvm.h" 29 #include "sysemu/reset.h" 30 #include "sysemu/hvf.h" 31 #include "sysemu/cpus.h" 32 #include "kvm_i386.h" 33 #include "sev_i386.h" 34 35 #include "qemu/error-report.h" 36 #include "qemu/module.h" 37 #include "qemu/option.h" 38 #include "qemu/config-file.h" 39 #include "qapi/error.h" 40 #include "qapi/qapi-visit-machine.h" 41 #include "qapi/qapi-visit-run-state.h" 42 #include "qapi/qmp/qdict.h" 43 #include "qapi/qmp/qerror.h" 44 #include "qapi/visitor.h" 45 #include "qom/qom-qobject.h" 46 #include "sysemu/arch_init.h" 47 #include "qapi/qapi-commands-machine-target.h" 48 49 #include "standard-headers/asm-x86/kvm_para.h" 50 51 #include "sysemu/sysemu.h" 52 #include "sysemu/tcg.h" 53 #include "hw/qdev-properties.h" 54 #include "hw/i386/topology.h" 55 #ifndef CONFIG_USER_ONLY 56 #include "exec/address-spaces.h" 57 #include "hw/xen/xen.h" 58 #include "hw/i386/apic_internal.h" 59 #include "hw/boards.h" 60 #endif 61 62 #include "disas/capstone.h" 63 64 /* Helpers for building CPUID[2] descriptors: */ 65 66 struct CPUID2CacheDescriptorInfo { 67 enum CacheType type; 68 int level; 69 int size; 70 int line_size; 71 int associativity; 72 }; 73 74 /* 75 * Known CPUID 2 cache descriptors. 76 * From Intel SDM Volume 2A, CPUID instruction 77 */ 78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 80 .associativity = 4, .line_size = 32, }, 81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 82 .associativity = 4, .line_size = 32, }, 83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 84 .associativity = 4, .line_size = 64, }, 85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 86 .associativity = 2, .line_size = 32, }, 87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 88 .associativity = 4, .line_size = 32, }, 89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 90 .associativity = 4, .line_size = 64, }, 91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 92 .associativity = 6, .line_size = 64, }, 93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 94 .associativity = 2, .line_size = 64, }, 95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 96 .associativity = 8, .line_size = 64, }, 97 /* lines per sector is not supported cpuid2_cache_descriptor(), 98 * so descriptors 0x22, 0x23 are not included 99 */ 100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 101 .associativity = 16, .line_size = 64, }, 102 /* lines per sector is not supported cpuid2_cache_descriptor(), 103 * so descriptors 0x25, 0x20 are not included 104 */ 105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 106 .associativity = 8, .line_size = 64, }, 107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 108 .associativity = 8, .line_size = 64, }, 109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 110 .associativity = 4, .line_size = 32, }, 111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 112 .associativity = 4, .line_size = 32, }, 113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 114 .associativity = 4, .line_size = 32, }, 115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 116 .associativity = 4, .line_size = 32, }, 117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 118 .associativity = 4, .line_size = 32, }, 119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 120 .associativity = 4, .line_size = 64, }, 121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 122 .associativity = 8, .line_size = 64, }, 123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 124 .associativity = 12, .line_size = 64, }, 125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 127 .associativity = 12, .line_size = 64, }, 128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 129 .associativity = 16, .line_size = 64, }, 130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 131 .associativity = 12, .line_size = 64, }, 132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 133 .associativity = 16, .line_size = 64, }, 134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 135 .associativity = 24, .line_size = 64, }, 136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 137 .associativity = 8, .line_size = 64, }, 138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 139 .associativity = 4, .line_size = 64, }, 140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 141 .associativity = 4, .line_size = 64, }, 142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 143 .associativity = 4, .line_size = 64, }, 144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 145 .associativity = 4, .line_size = 64, }, 146 /* lines per sector is not supported cpuid2_cache_descriptor(), 147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 148 */ 149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 150 .associativity = 8, .line_size = 64, }, 151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 152 .associativity = 2, .line_size = 64, }, 153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 154 .associativity = 8, .line_size = 64, }, 155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 156 .associativity = 8, .line_size = 32, }, 157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 158 .associativity = 8, .line_size = 32, }, 159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 160 .associativity = 8, .line_size = 32, }, 161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 162 .associativity = 8, .line_size = 32, }, 163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 164 .associativity = 4, .line_size = 64, }, 165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 166 .associativity = 8, .line_size = 64, }, 167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 168 .associativity = 4, .line_size = 64, }, 169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 170 .associativity = 4, .line_size = 64, }, 171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 172 .associativity = 4, .line_size = 64, }, 173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 174 .associativity = 8, .line_size = 64, }, 175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 176 .associativity = 8, .line_size = 64, }, 177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 178 .associativity = 8, .line_size = 64, }, 179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 180 .associativity = 12, .line_size = 64, }, 181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 182 .associativity = 12, .line_size = 64, }, 183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 184 .associativity = 12, .line_size = 64, }, 185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 186 .associativity = 16, .line_size = 64, }, 187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 188 .associativity = 16, .line_size = 64, }, 189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 190 .associativity = 16, .line_size = 64, }, 191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 192 .associativity = 24, .line_size = 64, }, 193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 194 .associativity = 24, .line_size = 64, }, 195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 196 .associativity = 24, .line_size = 64, }, 197 }; 198 199 /* 200 * "CPUID leaf 2 does not report cache descriptor information, 201 * use CPUID leaf 4 to query cache parameters" 202 */ 203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 204 205 /* 206 * Return a CPUID 2 cache descriptor for a given cache. 207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 208 */ 209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 210 { 211 int i; 212 213 assert(cache->size > 0); 214 assert(cache->level > 0); 215 assert(cache->line_size > 0); 216 assert(cache->associativity > 0); 217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 219 if (d->level == cache->level && d->type == cache->type && 220 d->size == cache->size && d->line_size == cache->line_size && 221 d->associativity == cache->associativity) { 222 return i; 223 } 224 } 225 226 return CACHE_DESCRIPTOR_UNAVAILABLE; 227 } 228 229 /* CPUID Leaf 4 constants: */ 230 231 /* EAX: */ 232 #define CACHE_TYPE_D 1 233 #define CACHE_TYPE_I 2 234 #define CACHE_TYPE_UNIFIED 3 235 236 #define CACHE_LEVEL(l) (l << 5) 237 238 #define CACHE_SELF_INIT_LEVEL (1 << 8) 239 240 /* EDX: */ 241 #define CACHE_NO_INVD_SHARING (1 << 0) 242 #define CACHE_INCLUSIVE (1 << 1) 243 #define CACHE_COMPLEX_IDX (1 << 2) 244 245 /* Encode CacheType for CPUID[4].EAX */ 246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 249 0 /* Invalid value */) 250 251 252 /* Encode cache info for CPUID[4] */ 253 static void encode_cache_cpuid4(CPUCacheInfo *cache, 254 int num_apic_ids, int num_cores, 255 uint32_t *eax, uint32_t *ebx, 256 uint32_t *ecx, uint32_t *edx) 257 { 258 assert(cache->size == cache->line_size * cache->associativity * 259 cache->partitions * cache->sets); 260 261 assert(num_apic_ids > 0); 262 *eax = CACHE_TYPE(cache->type) | 263 CACHE_LEVEL(cache->level) | 264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 265 ((num_cores - 1) << 26) | 266 ((num_apic_ids - 1) << 14); 267 268 assert(cache->line_size > 0); 269 assert(cache->partitions > 0); 270 assert(cache->associativity > 0); 271 /* We don't implement fully-associative caches */ 272 assert(cache->associativity < cache->sets); 273 *ebx = (cache->line_size - 1) | 274 ((cache->partitions - 1) << 12) | 275 ((cache->associativity - 1) << 22); 276 277 assert(cache->sets > 0); 278 *ecx = cache->sets - 1; 279 280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 281 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 283 } 284 285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 287 { 288 assert(cache->size % 1024 == 0); 289 assert(cache->lines_per_tag > 0); 290 assert(cache->associativity > 0); 291 assert(cache->line_size > 0); 292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 293 (cache->lines_per_tag << 8) | (cache->line_size); 294 } 295 296 #define ASSOC_FULL 0xFF 297 298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 300 a == 2 ? 0x2 : \ 301 a == 4 ? 0x4 : \ 302 a == 8 ? 0x6 : \ 303 a == 16 ? 0x8 : \ 304 a == 32 ? 0xA : \ 305 a == 48 ? 0xB : \ 306 a == 64 ? 0xC : \ 307 a == 96 ? 0xD : \ 308 a == 128 ? 0xE : \ 309 a == ASSOC_FULL ? 0xF : \ 310 0 /* invalid value */) 311 312 /* 313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 314 * @l3 can be NULL. 315 */ 316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 317 CPUCacheInfo *l3, 318 uint32_t *ecx, uint32_t *edx) 319 { 320 assert(l2->size % 1024 == 0); 321 assert(l2->associativity > 0); 322 assert(l2->lines_per_tag > 0); 323 assert(l2->line_size > 0); 324 *ecx = ((l2->size / 1024) << 16) | 325 (AMD_ENC_ASSOC(l2->associativity) << 12) | 326 (l2->lines_per_tag << 8) | (l2->line_size); 327 328 if (l3) { 329 assert(l3->size % (512 * 1024) == 0); 330 assert(l3->associativity > 0); 331 assert(l3->lines_per_tag > 0); 332 assert(l3->line_size > 0); 333 *edx = ((l3->size / (512 * 1024)) << 18) | 334 (AMD_ENC_ASSOC(l3->associativity) << 12) | 335 (l3->lines_per_tag << 8) | (l3->line_size); 336 } else { 337 *edx = 0; 338 } 339 } 340 341 /* 342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E 343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3. 344 * Define the constants to build the cpu topology. Right now, TOPOEXT 345 * feature is enabled only on EPYC. So, these constants are based on 346 * EPYC supported configurations. We may need to handle the cases if 347 * these values change in future. 348 */ 349 /* Maximum core complexes in a node */ 350 #define MAX_CCX 2 351 /* Maximum cores in a core complex */ 352 #define MAX_CORES_IN_CCX 4 353 /* Maximum cores in a node */ 354 #define MAX_CORES_IN_NODE 8 355 /* Maximum nodes in a socket */ 356 #define MAX_NODES_PER_SOCKET 4 357 358 /* 359 * Figure out the number of nodes required to build this config. 360 * Max cores in a node is 8 361 */ 362 static int nodes_in_socket(int nr_cores) 363 { 364 int nodes; 365 366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE); 367 368 /* Hardware does not support config with 3 nodes, return 4 in that case */ 369 return (nodes == 3) ? 4 : nodes; 370 } 371 372 /* 373 * Decide the number of cores in a core complex with the given nr_cores using 374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and 375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible 376 * L3 cache is shared across all cores in a core complex. So, this will also 377 * tell us how many cores are sharing the L3 cache. 378 */ 379 static int cores_in_core_complex(int nr_cores) 380 { 381 int nodes; 382 383 /* Check if we can fit all the cores in one core complex */ 384 if (nr_cores <= MAX_CORES_IN_CCX) { 385 return nr_cores; 386 } 387 /* Get the number of nodes required to build this config */ 388 nodes = nodes_in_socket(nr_cores); 389 390 /* 391 * Divide the cores accros all the core complexes 392 * Return rounded up value 393 */ 394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX); 395 } 396 397 /* Encode cache info for CPUID[8000001D] */ 398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs, 399 uint32_t *eax, uint32_t *ebx, 400 uint32_t *ecx, uint32_t *edx) 401 { 402 uint32_t l3_cores; 403 assert(cache->size == cache->line_size * cache->associativity * 404 cache->partitions * cache->sets); 405 406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 408 409 /* L3 is shared among multiple cores */ 410 if (cache->level == 3) { 411 l3_cores = cores_in_core_complex(cs->nr_cores); 412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14; 413 } else { 414 *eax |= ((cs->nr_threads - 1) << 14); 415 } 416 417 assert(cache->line_size > 0); 418 assert(cache->partitions > 0); 419 assert(cache->associativity > 0); 420 /* We don't implement fully-associative caches */ 421 assert(cache->associativity < cache->sets); 422 *ebx = (cache->line_size - 1) | 423 ((cache->partitions - 1) << 12) | 424 ((cache->associativity - 1) << 22); 425 426 assert(cache->sets > 0); 427 *ecx = cache->sets - 1; 428 429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 430 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 432 } 433 434 /* Data structure to hold the configuration info for a given core index */ 435 struct core_topology { 436 /* core complex id of the current core index */ 437 int ccx_id; 438 /* 439 * Adjusted core index for this core in the topology 440 * This can be 0,1,2,3 with max 4 cores in a core complex 441 */ 442 int core_id; 443 /* Node id for this core index */ 444 int node_id; 445 /* Number of nodes in this config */ 446 int num_nodes; 447 }; 448 449 /* 450 * Build the configuration closely match the EPYC hardware. Using the EPYC 451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE) 452 * right now. This could change in future. 453 * nr_cores : Total number of cores in the config 454 * core_id : Core index of the current CPU 455 * topo : Data structure to hold all the config info for this core index 456 */ 457 static void build_core_topology(int nr_cores, int core_id, 458 struct core_topology *topo) 459 { 460 int nodes, cores_in_ccx; 461 462 /* First get the number of nodes required */ 463 nodes = nodes_in_socket(nr_cores); 464 465 cores_in_ccx = cores_in_core_complex(nr_cores); 466 467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX); 468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx; 469 topo->core_id = core_id % cores_in_ccx; 470 topo->num_nodes = nodes; 471 } 472 473 /* Encode cache info for CPUID[8000001E] */ 474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu, 475 uint32_t *eax, uint32_t *ebx, 476 uint32_t *ecx, uint32_t *edx) 477 { 478 struct core_topology topo = {0}; 479 unsigned long nodes; 480 int shift; 481 482 build_core_topology(cs->nr_cores, cpu->core_id, &topo); 483 *eax = cpu->apic_id; 484 /* 485 * CPUID_Fn8000001E_EBX 486 * 31:16 Reserved 487 * 15:8 Threads per core (The number of threads per core is 488 * Threads per core + 1) 489 * 7:0 Core id (see bit decoding below) 490 * SMT: 491 * 4:3 node id 492 * 2 Core complex id 493 * 1:0 Core id 494 * Non SMT: 495 * 5:4 node id 496 * 3 Core complex id 497 * 1:0 Core id 498 */ 499 if (cs->nr_threads - 1) { 500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) | 501 (topo.ccx_id << 2) | topo.core_id; 502 } else { 503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id; 504 } 505 /* 506 * CPUID_Fn8000001E_ECX 507 * 31:11 Reserved 508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) 509 * 7:0 Node id (see bit decoding below) 510 * 2 Socket id 511 * 1:0 Node id 512 */ 513 if (topo.num_nodes <= 4) { 514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) | 515 topo.node_id; 516 } else { 517 /* 518 * Node id fix up. Actual hardware supports up to 4 nodes. But with 519 * more than 32 cores, we may end up with more than 4 nodes. 520 * Node id is a combination of socket id and node id. Only requirement 521 * here is that this number should be unique accross the system. 522 * Shift the socket id to accommodate more nodes. We dont expect both 523 * socket id and node id to be big number at the same time. This is not 524 * an ideal config but we need to to support it. Max nodes we can have 525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need 526 * 5 bits for nodes. Find the left most set bit to represent the total 527 * number of nodes. find_last_bit returns last set bit(0 based). Left 528 * shift(+1) the socket id to represent all the nodes. 529 */ 530 nodes = topo.num_nodes - 1; 531 shift = find_last_bit(&nodes, 8); 532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) | 533 topo.node_id; 534 } 535 *edx = 0; 536 } 537 538 /* 539 * Definitions of the hardcoded cache entries we expose: 540 * These are legacy cache values. If there is a need to change any 541 * of these values please use builtin_x86_defs 542 */ 543 544 /* L1 data cache: */ 545 static CPUCacheInfo legacy_l1d_cache = { 546 .type = DATA_CACHE, 547 .level = 1, 548 .size = 32 * KiB, 549 .self_init = 1, 550 .line_size = 64, 551 .associativity = 8, 552 .sets = 64, 553 .partitions = 1, 554 .no_invd_sharing = true, 555 }; 556 557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 558 static CPUCacheInfo legacy_l1d_cache_amd = { 559 .type = DATA_CACHE, 560 .level = 1, 561 .size = 64 * KiB, 562 .self_init = 1, 563 .line_size = 64, 564 .associativity = 2, 565 .sets = 512, 566 .partitions = 1, 567 .lines_per_tag = 1, 568 .no_invd_sharing = true, 569 }; 570 571 /* L1 instruction cache: */ 572 static CPUCacheInfo legacy_l1i_cache = { 573 .type = INSTRUCTION_CACHE, 574 .level = 1, 575 .size = 32 * KiB, 576 .self_init = 1, 577 .line_size = 64, 578 .associativity = 8, 579 .sets = 64, 580 .partitions = 1, 581 .no_invd_sharing = true, 582 }; 583 584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 585 static CPUCacheInfo legacy_l1i_cache_amd = { 586 .type = INSTRUCTION_CACHE, 587 .level = 1, 588 .size = 64 * KiB, 589 .self_init = 1, 590 .line_size = 64, 591 .associativity = 2, 592 .sets = 512, 593 .partitions = 1, 594 .lines_per_tag = 1, 595 .no_invd_sharing = true, 596 }; 597 598 /* Level 2 unified cache: */ 599 static CPUCacheInfo legacy_l2_cache = { 600 .type = UNIFIED_CACHE, 601 .level = 2, 602 .size = 4 * MiB, 603 .self_init = 1, 604 .line_size = 64, 605 .associativity = 16, 606 .sets = 4096, 607 .partitions = 1, 608 .no_invd_sharing = true, 609 }; 610 611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 612 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 613 .type = UNIFIED_CACHE, 614 .level = 2, 615 .size = 2 * MiB, 616 .line_size = 64, 617 .associativity = 8, 618 }; 619 620 621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 622 static CPUCacheInfo legacy_l2_cache_amd = { 623 .type = UNIFIED_CACHE, 624 .level = 2, 625 .size = 512 * KiB, 626 .line_size = 64, 627 .lines_per_tag = 1, 628 .associativity = 16, 629 .sets = 512, 630 .partitions = 1, 631 }; 632 633 /* Level 3 unified cache: */ 634 static CPUCacheInfo legacy_l3_cache = { 635 .type = UNIFIED_CACHE, 636 .level = 3, 637 .size = 16 * MiB, 638 .line_size = 64, 639 .associativity = 16, 640 .sets = 16384, 641 .partitions = 1, 642 .lines_per_tag = 1, 643 .self_init = true, 644 .inclusive = true, 645 .complex_indexing = true, 646 }; 647 648 /* TLB definitions: */ 649 650 #define L1_DTLB_2M_ASSOC 1 651 #define L1_DTLB_2M_ENTRIES 255 652 #define L1_DTLB_4K_ASSOC 1 653 #define L1_DTLB_4K_ENTRIES 255 654 655 #define L1_ITLB_2M_ASSOC 1 656 #define L1_ITLB_2M_ENTRIES 255 657 #define L1_ITLB_4K_ASSOC 1 658 #define L1_ITLB_4K_ENTRIES 255 659 660 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 662 #define L2_DTLB_4K_ASSOC 4 663 #define L2_DTLB_4K_ENTRIES 512 664 665 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 667 #define L2_ITLB_4K_ASSOC 4 668 #define L2_ITLB_4K_ENTRIES 512 669 670 /* CPUID Leaf 0x14 constants: */ 671 #define INTEL_PT_MAX_SUBLEAF 0x1 672 /* 673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 674 * MSR can be accessed; 675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 677 * of Intel PT MSRs across warm reset; 678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 679 */ 680 #define INTEL_PT_MINIMAL_EBX 0xf 681 /* 682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 684 * accessed; 685 * bit[01]: ToPA tables can hold any number of output entries, up to the 686 * maximum allowed by the MaskOrTableOffset field of 687 * IA32_RTIT_OUTPUT_MASK_PTRS; 688 * bit[02]: Support Single-Range Output scheme; 689 */ 690 #define INTEL_PT_MINIMAL_ECX 0x7 691 /* generated packets which contain IP payloads have LIP values */ 692 #define INTEL_PT_IP_LIP (1 << 31) 693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 698 699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 700 uint32_t vendor2, uint32_t vendor3) 701 { 702 int i; 703 for (i = 0; i < 4; i++) { 704 dst[i] = vendor1 >> (8 * i); 705 dst[i + 4] = vendor2 >> (8 * i); 706 dst[i + 8] = vendor3 >> (8 * i); 707 } 708 dst[CPUID_VENDOR_SZ] = '\0'; 709 } 710 711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 716 CPUID_PSE36 | CPUID_FXSR) 717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 721 CPUID_PAE | CPUID_SEP | CPUID_APIC) 722 723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 728 /* partly implemented: 729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 730 /* missing: 731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 737 CPUID_EXT_RDRAND) 738 /* missing: 739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 743 CPUID_EXT_F16C */ 744 745 #ifdef TARGET_X86_64 746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 747 #else 748 #define TCG_EXT2_X86_64_FEATURES 0 749 #endif 750 751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 754 TCG_EXT2_X86_64_FEATURES) 755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 757 #define TCG_EXT4_FEATURES 0 758 #define TCG_SVM_FEATURES CPUID_SVM_NPT 759 #define TCG_KVM_FEATURES 0 760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 764 CPUID_7_0_EBX_ERMS) 765 /* missing: 766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 768 CPUID_7_0_EBX_RDSEED */ 769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 771 CPUID_7_0_ECX_LA57) 772 #define TCG_7_0_EDX_FEATURES 0 773 #define TCG_7_1_EAX_FEATURES 0 774 #define TCG_APM_FEATURES 0 775 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 776 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 777 /* missing: 778 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 779 780 typedef enum FeatureWordType { 781 CPUID_FEATURE_WORD, 782 MSR_FEATURE_WORD, 783 } FeatureWordType; 784 785 typedef struct FeatureWordInfo { 786 FeatureWordType type; 787 /* feature flags names are taken from "Intel Processor Identification and 788 * the CPUID Instruction" and AMD's "CPUID Specification". 789 * In cases of disagreement between feature naming conventions, 790 * aliases may be added. 791 */ 792 const char *feat_names[64]; 793 union { 794 /* If type==CPUID_FEATURE_WORD */ 795 struct { 796 uint32_t eax; /* Input EAX for CPUID */ 797 bool needs_ecx; /* CPUID instruction uses ECX as input */ 798 uint32_t ecx; /* Input ECX value for CPUID */ 799 int reg; /* output register (R_* constant) */ 800 } cpuid; 801 /* If type==MSR_FEATURE_WORD */ 802 struct { 803 uint32_t index; 804 } msr; 805 }; 806 uint64_t tcg_features; /* Feature flags supported by TCG */ 807 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */ 808 uint64_t migratable_flags; /* Feature flags known to be migratable */ 809 /* Features that shouldn't be auto-enabled by "-cpu host" */ 810 uint64_t no_autoenable_flags; 811 } FeatureWordInfo; 812 813 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 814 [FEAT_1_EDX] = { 815 .type = CPUID_FEATURE_WORD, 816 .feat_names = { 817 "fpu", "vme", "de", "pse", 818 "tsc", "msr", "pae", "mce", 819 "cx8", "apic", NULL, "sep", 820 "mtrr", "pge", "mca", "cmov", 821 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 822 NULL, "ds" /* Intel dts */, "acpi", "mmx", 823 "fxsr", "sse", "sse2", "ss", 824 "ht" /* Intel htt */, "tm", "ia64", "pbe", 825 }, 826 .cpuid = {.eax = 1, .reg = R_EDX, }, 827 .tcg_features = TCG_FEATURES, 828 }, 829 [FEAT_1_ECX] = { 830 .type = CPUID_FEATURE_WORD, 831 .feat_names = { 832 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 833 "ds-cpl", "vmx", "smx", "est", 834 "tm2", "ssse3", "cid", NULL, 835 "fma", "cx16", "xtpr", "pdcm", 836 NULL, "pcid", "dca", "sse4.1", 837 "sse4.2", "x2apic", "movbe", "popcnt", 838 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 839 "avx", "f16c", "rdrand", "hypervisor", 840 }, 841 .cpuid = { .eax = 1, .reg = R_ECX, }, 842 .tcg_features = TCG_EXT_FEATURES, 843 }, 844 /* Feature names that are already defined on feature_name[] but 845 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 846 * names on feat_names below. They are copied automatically 847 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 848 */ 849 [FEAT_8000_0001_EDX] = { 850 .type = CPUID_FEATURE_WORD, 851 .feat_names = { 852 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 853 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 854 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 855 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 856 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 857 "nx", NULL, "mmxext", NULL /* mmx */, 858 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 859 NULL, "lm", "3dnowext", "3dnow", 860 }, 861 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 862 .tcg_features = TCG_EXT2_FEATURES, 863 }, 864 [FEAT_8000_0001_ECX] = { 865 .type = CPUID_FEATURE_WORD, 866 .feat_names = { 867 "lahf-lm", "cmp-legacy", "svm", "extapic", 868 "cr8legacy", "abm", "sse4a", "misalignsse", 869 "3dnowprefetch", "osvw", "ibs", "xop", 870 "skinit", "wdt", NULL, "lwp", 871 "fma4", "tce", NULL, "nodeid-msr", 872 NULL, "tbm", "topoext", "perfctr-core", 873 "perfctr-nb", NULL, NULL, NULL, 874 NULL, NULL, NULL, NULL, 875 }, 876 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 877 .tcg_features = TCG_EXT3_FEATURES, 878 /* 879 * TOPOEXT is always allowed but can't be enabled blindly by 880 * "-cpu host", as it requires consistent cache topology info 881 * to be provided so it doesn't confuse guests. 882 */ 883 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 884 }, 885 [FEAT_C000_0001_EDX] = { 886 .type = CPUID_FEATURE_WORD, 887 .feat_names = { 888 NULL, NULL, "xstore", "xstore-en", 889 NULL, NULL, "xcrypt", "xcrypt-en", 890 "ace2", "ace2-en", "phe", "phe-en", 891 "pmm", "pmm-en", NULL, NULL, 892 NULL, NULL, NULL, NULL, 893 NULL, NULL, NULL, NULL, 894 NULL, NULL, NULL, NULL, 895 NULL, NULL, NULL, NULL, 896 }, 897 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 898 .tcg_features = TCG_EXT4_FEATURES, 899 }, 900 [FEAT_KVM] = { 901 .type = CPUID_FEATURE_WORD, 902 .feat_names = { 903 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 904 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 905 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 906 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL, 907 NULL, NULL, NULL, NULL, 908 NULL, NULL, NULL, NULL, 909 "kvmclock-stable-bit", NULL, NULL, NULL, 910 NULL, NULL, NULL, NULL, 911 }, 912 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 913 .tcg_features = TCG_KVM_FEATURES, 914 }, 915 [FEAT_KVM_HINTS] = { 916 .type = CPUID_FEATURE_WORD, 917 .feat_names = { 918 "kvm-hint-dedicated", NULL, NULL, NULL, 919 NULL, NULL, NULL, NULL, 920 NULL, NULL, NULL, NULL, 921 NULL, NULL, NULL, NULL, 922 NULL, NULL, NULL, NULL, 923 NULL, NULL, NULL, NULL, 924 NULL, NULL, NULL, NULL, 925 NULL, NULL, NULL, NULL, 926 }, 927 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 928 .tcg_features = TCG_KVM_FEATURES, 929 /* 930 * KVM hints aren't auto-enabled by -cpu host, they need to be 931 * explicitly enabled in the command-line. 932 */ 933 .no_autoenable_flags = ~0U, 934 }, 935 /* 936 * .feat_names are commented out for Hyper-V enlightenments because we 937 * don't want to have two different ways for enabling them on QEMU command 938 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 939 * enabling several feature bits simultaneously, exposing these bits 940 * individually may just confuse guests. 941 */ 942 [FEAT_HYPERV_EAX] = { 943 .type = CPUID_FEATURE_WORD, 944 .feat_names = { 945 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 946 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 947 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 948 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 949 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 950 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 951 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 952 NULL, NULL, 953 NULL, NULL, NULL, NULL, 954 NULL, NULL, NULL, NULL, 955 NULL, NULL, NULL, NULL, 956 NULL, NULL, NULL, NULL, 957 }, 958 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 959 }, 960 [FEAT_HYPERV_EBX] = { 961 .type = CPUID_FEATURE_WORD, 962 .feat_names = { 963 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 964 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 965 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 966 NULL /* hv_create_port */, NULL /* hv_connect_port */, 967 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 968 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 969 NULL, NULL, 970 NULL, NULL, NULL, NULL, 971 NULL, NULL, NULL, NULL, 972 NULL, NULL, NULL, NULL, 973 NULL, NULL, NULL, NULL, 974 }, 975 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 976 }, 977 [FEAT_HYPERV_EDX] = { 978 .type = CPUID_FEATURE_WORD, 979 .feat_names = { 980 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 981 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 982 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 983 NULL, NULL, 984 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 985 NULL, NULL, NULL, NULL, 986 NULL, NULL, NULL, NULL, 987 NULL, NULL, NULL, NULL, 988 NULL, NULL, NULL, NULL, 989 NULL, NULL, NULL, NULL, 990 }, 991 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 992 }, 993 [FEAT_HV_RECOMM_EAX] = { 994 .type = CPUID_FEATURE_WORD, 995 .feat_names = { 996 NULL /* hv_recommend_pv_as_switch */, 997 NULL /* hv_recommend_pv_tlbflush_local */, 998 NULL /* hv_recommend_pv_tlbflush_remote */, 999 NULL /* hv_recommend_msr_apic_access */, 1000 NULL /* hv_recommend_msr_reset */, 1001 NULL /* hv_recommend_relaxed_timing */, 1002 NULL /* hv_recommend_dma_remapping */, 1003 NULL /* hv_recommend_int_remapping */, 1004 NULL /* hv_recommend_x2apic_msrs */, 1005 NULL /* hv_recommend_autoeoi_deprecation */, 1006 NULL /* hv_recommend_pv_ipi */, 1007 NULL /* hv_recommend_ex_hypercalls */, 1008 NULL /* hv_hypervisor_is_nested */, 1009 NULL /* hv_recommend_int_mbec */, 1010 NULL /* hv_recommend_evmcs */, 1011 NULL, 1012 NULL, NULL, NULL, NULL, 1013 NULL, NULL, NULL, NULL, 1014 NULL, NULL, NULL, NULL, 1015 NULL, NULL, NULL, NULL, 1016 }, 1017 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 1018 }, 1019 [FEAT_HV_NESTED_EAX] = { 1020 .type = CPUID_FEATURE_WORD, 1021 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 1022 }, 1023 [FEAT_SVM] = { 1024 .type = CPUID_FEATURE_WORD, 1025 .feat_names = { 1026 "npt", "lbrv", "svm-lock", "nrip-save", 1027 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 1028 NULL, NULL, "pause-filter", NULL, 1029 "pfthreshold", NULL, NULL, NULL, 1030 NULL, NULL, NULL, NULL, 1031 NULL, NULL, NULL, NULL, 1032 NULL, NULL, NULL, NULL, 1033 NULL, NULL, NULL, NULL, 1034 }, 1035 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 1036 .tcg_features = TCG_SVM_FEATURES, 1037 }, 1038 [FEAT_7_0_EBX] = { 1039 .type = CPUID_FEATURE_WORD, 1040 .feat_names = { 1041 "fsgsbase", "tsc-adjust", NULL, "bmi1", 1042 "hle", "avx2", NULL, "smep", 1043 "bmi2", "erms", "invpcid", "rtm", 1044 NULL, NULL, "mpx", NULL, 1045 "avx512f", "avx512dq", "rdseed", "adx", 1046 "smap", "avx512ifma", "pcommit", "clflushopt", 1047 "clwb", "intel-pt", "avx512pf", "avx512er", 1048 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 1049 }, 1050 .cpuid = { 1051 .eax = 7, 1052 .needs_ecx = true, .ecx = 0, 1053 .reg = R_EBX, 1054 }, 1055 .tcg_features = TCG_7_0_EBX_FEATURES, 1056 }, 1057 [FEAT_7_0_ECX] = { 1058 .type = CPUID_FEATURE_WORD, 1059 .feat_names = { 1060 NULL, "avx512vbmi", "umip", "pku", 1061 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL, 1062 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 1063 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 1064 "la57", NULL, NULL, NULL, 1065 NULL, NULL, "rdpid", NULL, 1066 NULL, "cldemote", NULL, "movdiri", 1067 "movdir64b", NULL, NULL, NULL, 1068 }, 1069 .cpuid = { 1070 .eax = 7, 1071 .needs_ecx = true, .ecx = 0, 1072 .reg = R_ECX, 1073 }, 1074 .tcg_features = TCG_7_0_ECX_FEATURES, 1075 }, 1076 [FEAT_7_0_EDX] = { 1077 .type = CPUID_FEATURE_WORD, 1078 .feat_names = { 1079 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 1080 NULL, NULL, NULL, NULL, 1081 NULL, NULL, "md-clear", NULL, 1082 NULL, NULL, NULL, NULL, 1083 NULL, NULL, NULL /* pconfig */, NULL, 1084 NULL, NULL, NULL, NULL, 1085 NULL, NULL, "spec-ctrl", "stibp", 1086 NULL, "arch-capabilities", "core-capability", "ssbd", 1087 }, 1088 .cpuid = { 1089 .eax = 7, 1090 .needs_ecx = true, .ecx = 0, 1091 .reg = R_EDX, 1092 }, 1093 .tcg_features = TCG_7_0_EDX_FEATURES, 1094 }, 1095 [FEAT_7_1_EAX] = { 1096 .type = CPUID_FEATURE_WORD, 1097 .feat_names = { 1098 NULL, NULL, NULL, NULL, 1099 NULL, "avx512-bf16", NULL, NULL, 1100 NULL, NULL, NULL, NULL, 1101 NULL, NULL, NULL, NULL, 1102 NULL, NULL, NULL, NULL, 1103 NULL, NULL, NULL, NULL, 1104 NULL, NULL, NULL, NULL, 1105 NULL, NULL, NULL, NULL, 1106 }, 1107 .cpuid = { 1108 .eax = 7, 1109 .needs_ecx = true, .ecx = 1, 1110 .reg = R_EAX, 1111 }, 1112 .tcg_features = TCG_7_1_EAX_FEATURES, 1113 }, 1114 [FEAT_8000_0007_EDX] = { 1115 .type = CPUID_FEATURE_WORD, 1116 .feat_names = { 1117 NULL, NULL, NULL, NULL, 1118 NULL, NULL, NULL, NULL, 1119 "invtsc", NULL, NULL, NULL, 1120 NULL, NULL, NULL, NULL, 1121 NULL, NULL, NULL, NULL, 1122 NULL, NULL, NULL, NULL, 1123 NULL, NULL, NULL, NULL, 1124 NULL, NULL, NULL, NULL, 1125 }, 1126 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1127 .tcg_features = TCG_APM_FEATURES, 1128 .unmigratable_flags = CPUID_APM_INVTSC, 1129 }, 1130 [FEAT_8000_0008_EBX] = { 1131 .type = CPUID_FEATURE_WORD, 1132 .feat_names = { 1133 "clzero", NULL, "xsaveerptr", NULL, 1134 NULL, NULL, NULL, NULL, 1135 NULL, "wbnoinvd", NULL, NULL, 1136 "ibpb", NULL, NULL, "amd-stibp", 1137 NULL, NULL, NULL, NULL, 1138 NULL, NULL, NULL, NULL, 1139 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1140 NULL, NULL, NULL, NULL, 1141 }, 1142 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1143 .tcg_features = 0, 1144 .unmigratable_flags = 0, 1145 }, 1146 [FEAT_XSAVE] = { 1147 .type = CPUID_FEATURE_WORD, 1148 .feat_names = { 1149 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1150 NULL, NULL, NULL, NULL, 1151 NULL, NULL, NULL, NULL, 1152 NULL, NULL, NULL, NULL, 1153 NULL, NULL, NULL, NULL, 1154 NULL, NULL, NULL, NULL, 1155 NULL, NULL, NULL, NULL, 1156 NULL, NULL, NULL, NULL, 1157 }, 1158 .cpuid = { 1159 .eax = 0xd, 1160 .needs_ecx = true, .ecx = 1, 1161 .reg = R_EAX, 1162 }, 1163 .tcg_features = TCG_XSAVE_FEATURES, 1164 }, 1165 [FEAT_6_EAX] = { 1166 .type = CPUID_FEATURE_WORD, 1167 .feat_names = { 1168 NULL, NULL, "arat", NULL, 1169 NULL, NULL, NULL, NULL, 1170 NULL, NULL, NULL, NULL, 1171 NULL, NULL, NULL, NULL, 1172 NULL, NULL, NULL, NULL, 1173 NULL, NULL, NULL, NULL, 1174 NULL, NULL, NULL, NULL, 1175 NULL, NULL, NULL, NULL, 1176 }, 1177 .cpuid = { .eax = 6, .reg = R_EAX, }, 1178 .tcg_features = TCG_6_EAX_FEATURES, 1179 }, 1180 [FEAT_XSAVE_COMP_LO] = { 1181 .type = CPUID_FEATURE_WORD, 1182 .cpuid = { 1183 .eax = 0xD, 1184 .needs_ecx = true, .ecx = 0, 1185 .reg = R_EAX, 1186 }, 1187 .tcg_features = ~0U, 1188 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1189 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1190 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1191 XSTATE_PKRU_MASK, 1192 }, 1193 [FEAT_XSAVE_COMP_HI] = { 1194 .type = CPUID_FEATURE_WORD, 1195 .cpuid = { 1196 .eax = 0xD, 1197 .needs_ecx = true, .ecx = 0, 1198 .reg = R_EDX, 1199 }, 1200 .tcg_features = ~0U, 1201 }, 1202 /*Below are MSR exposed features*/ 1203 [FEAT_ARCH_CAPABILITIES] = { 1204 .type = MSR_FEATURE_WORD, 1205 .feat_names = { 1206 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1207 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", 1208 "taa-no", NULL, NULL, NULL, 1209 NULL, NULL, NULL, NULL, 1210 NULL, NULL, NULL, NULL, 1211 NULL, NULL, NULL, NULL, 1212 NULL, NULL, NULL, NULL, 1213 NULL, NULL, NULL, NULL, 1214 }, 1215 .msr = { 1216 .index = MSR_IA32_ARCH_CAPABILITIES, 1217 }, 1218 }, 1219 [FEAT_CORE_CAPABILITY] = { 1220 .type = MSR_FEATURE_WORD, 1221 .feat_names = { 1222 NULL, NULL, NULL, NULL, 1223 NULL, "split-lock-detect", NULL, NULL, 1224 NULL, NULL, NULL, NULL, 1225 NULL, NULL, NULL, NULL, 1226 NULL, NULL, NULL, NULL, 1227 NULL, NULL, NULL, NULL, 1228 NULL, NULL, NULL, NULL, 1229 NULL, NULL, NULL, NULL, 1230 }, 1231 .msr = { 1232 .index = MSR_IA32_CORE_CAPABILITY, 1233 }, 1234 }, 1235 1236 [FEAT_VMX_PROCBASED_CTLS] = { 1237 .type = MSR_FEATURE_WORD, 1238 .feat_names = { 1239 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset", 1240 NULL, NULL, NULL, "vmx-hlt-exit", 1241 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit", 1242 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit", 1243 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit", 1244 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit", 1245 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf", 1246 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls", 1247 }, 1248 .msr = { 1249 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1250 } 1251 }, 1252 1253 [FEAT_VMX_SECONDARY_CTLS] = { 1254 .type = MSR_FEATURE_WORD, 1255 .feat_names = { 1256 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit", 1257 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest", 1258 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit", 1259 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit", 1260 "vmx-rdseed-exit", "vmx-pml", NULL, NULL, 1261 "vmx-xsaves", NULL, NULL, NULL, 1262 NULL, NULL, NULL, NULL, 1263 NULL, NULL, NULL, NULL, 1264 }, 1265 .msr = { 1266 .index = MSR_IA32_VMX_PROCBASED_CTLS2, 1267 } 1268 }, 1269 1270 [FEAT_VMX_PINBASED_CTLS] = { 1271 .type = MSR_FEATURE_WORD, 1272 .feat_names = { 1273 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit", 1274 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr", 1275 NULL, NULL, NULL, NULL, 1276 NULL, NULL, NULL, NULL, 1277 NULL, NULL, NULL, NULL, 1278 NULL, NULL, NULL, NULL, 1279 NULL, NULL, NULL, NULL, 1280 NULL, NULL, NULL, NULL, 1281 }, 1282 .msr = { 1283 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1284 } 1285 }, 1286 1287 [FEAT_VMX_EXIT_CTLS] = { 1288 .type = MSR_FEATURE_WORD, 1289 /* 1290 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from 1291 * the LM CPUID bit. 1292 */ 1293 .feat_names = { 1294 NULL, NULL, "vmx-exit-nosave-debugctl", NULL, 1295 NULL, NULL, NULL, NULL, 1296 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL, 1297 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr", 1298 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat", 1299 "vmx-exit-save-efer", "vmx-exit-load-efer", 1300 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", 1301 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, 1302 NULL, NULL, NULL, NULL, 1303 }, 1304 .msr = { 1305 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, 1306 } 1307 }, 1308 1309 [FEAT_VMX_ENTRY_CTLS] = { 1310 .type = MSR_FEATURE_WORD, 1311 .feat_names = { 1312 NULL, NULL, "vmx-entry-noload-debugctl", NULL, 1313 NULL, NULL, NULL, NULL, 1314 NULL, "vmx-entry-ia32e-mode", NULL, NULL, 1315 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", 1316 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, 1317 NULL, NULL, NULL, NULL, 1318 NULL, NULL, NULL, NULL, 1319 NULL, NULL, NULL, NULL, 1320 }, 1321 .msr = { 1322 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1323 } 1324 }, 1325 1326 [FEAT_VMX_MISC] = { 1327 .type = MSR_FEATURE_WORD, 1328 .feat_names = { 1329 NULL, NULL, NULL, NULL, 1330 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown", 1331 "vmx-activity-wait-sipi", NULL, NULL, NULL, 1332 NULL, NULL, NULL, NULL, 1333 NULL, NULL, NULL, NULL, 1334 NULL, NULL, NULL, NULL, 1335 NULL, NULL, NULL, NULL, 1336 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL, 1337 }, 1338 .msr = { 1339 .index = MSR_IA32_VMX_MISC, 1340 } 1341 }, 1342 1343 [FEAT_VMX_EPT_VPID_CAPS] = { 1344 .type = MSR_FEATURE_WORD, 1345 .feat_names = { 1346 "vmx-ept-execonly", NULL, NULL, NULL, 1347 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5", 1348 NULL, NULL, NULL, NULL, 1349 NULL, NULL, NULL, NULL, 1350 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL, 1351 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL, 1352 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL, 1353 NULL, NULL, NULL, NULL, 1354 "vmx-invvpid", NULL, NULL, NULL, 1355 NULL, NULL, NULL, NULL, 1356 "vmx-invvpid-single-addr", "vmx-invept-single-context", 1357 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals", 1358 NULL, NULL, NULL, NULL, 1359 NULL, NULL, NULL, NULL, 1360 NULL, NULL, NULL, NULL, 1361 NULL, NULL, NULL, NULL, 1362 NULL, NULL, NULL, NULL, 1363 }, 1364 .msr = { 1365 .index = MSR_IA32_VMX_EPT_VPID_CAP, 1366 } 1367 }, 1368 1369 [FEAT_VMX_BASIC] = { 1370 .type = MSR_FEATURE_WORD, 1371 .feat_names = { 1372 [54] = "vmx-ins-outs", 1373 [55] = "vmx-true-ctls", 1374 }, 1375 .msr = { 1376 .index = MSR_IA32_VMX_BASIC, 1377 }, 1378 /* Just to be safe - we don't support setting the MSEG version field. */ 1379 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR, 1380 }, 1381 1382 [FEAT_VMX_VMFUNC] = { 1383 .type = MSR_FEATURE_WORD, 1384 .feat_names = { 1385 [0] = "vmx-eptp-switching", 1386 }, 1387 .msr = { 1388 .index = MSR_IA32_VMX_VMFUNC, 1389 } 1390 }, 1391 1392 }; 1393 1394 typedef struct FeatureMask { 1395 FeatureWord index; 1396 uint64_t mask; 1397 } FeatureMask; 1398 1399 typedef struct FeatureDep { 1400 FeatureMask from, to; 1401 } FeatureDep; 1402 1403 static FeatureDep feature_dependencies[] = { 1404 { 1405 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES }, 1406 .to = { FEAT_ARCH_CAPABILITIES, ~0ull }, 1407 }, 1408 { 1409 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY }, 1410 .to = { FEAT_CORE_CAPABILITY, ~0ull }, 1411 }, 1412 { 1413 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1414 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull }, 1415 }, 1416 { 1417 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1418 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull }, 1419 }, 1420 { 1421 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1422 .to = { FEAT_VMX_EXIT_CTLS, ~0ull }, 1423 }, 1424 { 1425 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1426 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull }, 1427 }, 1428 { 1429 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1430 .to = { FEAT_VMX_MISC, ~0ull }, 1431 }, 1432 { 1433 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1434 .to = { FEAT_VMX_BASIC, ~0ull }, 1435 }, 1436 { 1437 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM }, 1438 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE }, 1439 }, 1440 { 1441 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS }, 1442 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull }, 1443 }, 1444 { 1445 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES }, 1446 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES }, 1447 }, 1448 { 1449 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND }, 1450 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING }, 1451 }, 1452 { 1453 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID }, 1454 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID }, 1455 }, 1456 { 1457 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED }, 1458 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING }, 1459 }, 1460 { 1461 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP }, 1462 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP }, 1463 }, 1464 { 1465 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1466 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull }, 1467 }, 1468 { 1469 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1470 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST }, 1471 }, 1472 { 1473 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID }, 1474 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 }, 1475 }, 1476 { 1477 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC }, 1478 .to = { FEAT_VMX_VMFUNC, ~0ull }, 1479 }, 1480 }; 1481 1482 typedef struct X86RegisterInfo32 { 1483 /* Name of register */ 1484 const char *name; 1485 /* QAPI enum value register */ 1486 X86CPURegister32 qapi_enum; 1487 } X86RegisterInfo32; 1488 1489 #define REGISTER(reg) \ 1490 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1491 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1492 REGISTER(EAX), 1493 REGISTER(ECX), 1494 REGISTER(EDX), 1495 REGISTER(EBX), 1496 REGISTER(ESP), 1497 REGISTER(EBP), 1498 REGISTER(ESI), 1499 REGISTER(EDI), 1500 }; 1501 #undef REGISTER 1502 1503 typedef struct ExtSaveArea { 1504 uint32_t feature, bits; 1505 uint32_t offset, size; 1506 } ExtSaveArea; 1507 1508 static const ExtSaveArea x86_ext_save_areas[] = { 1509 [XSTATE_FP_BIT] = { 1510 /* x87 FP state component is always enabled if XSAVE is supported */ 1511 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1512 /* x87 state is in the legacy region of the XSAVE area */ 1513 .offset = 0, 1514 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1515 }, 1516 [XSTATE_SSE_BIT] = { 1517 /* SSE state component is always enabled if XSAVE is supported */ 1518 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1519 /* SSE state is in the legacy region of the XSAVE area */ 1520 .offset = 0, 1521 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1522 }, 1523 [XSTATE_YMM_BIT] = 1524 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1525 .offset = offsetof(X86XSaveArea, avx_state), 1526 .size = sizeof(XSaveAVX) }, 1527 [XSTATE_BNDREGS_BIT] = 1528 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1529 .offset = offsetof(X86XSaveArea, bndreg_state), 1530 .size = sizeof(XSaveBNDREG) }, 1531 [XSTATE_BNDCSR_BIT] = 1532 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1533 .offset = offsetof(X86XSaveArea, bndcsr_state), 1534 .size = sizeof(XSaveBNDCSR) }, 1535 [XSTATE_OPMASK_BIT] = 1536 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1537 .offset = offsetof(X86XSaveArea, opmask_state), 1538 .size = sizeof(XSaveOpmask) }, 1539 [XSTATE_ZMM_Hi256_BIT] = 1540 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1541 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1542 .size = sizeof(XSaveZMM_Hi256) }, 1543 [XSTATE_Hi16_ZMM_BIT] = 1544 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1545 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1546 .size = sizeof(XSaveHi16_ZMM) }, 1547 [XSTATE_PKRU_BIT] = 1548 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1549 .offset = offsetof(X86XSaveArea, pkru_state), 1550 .size = sizeof(XSavePKRU) }, 1551 }; 1552 1553 static uint32_t xsave_area_size(uint64_t mask) 1554 { 1555 int i; 1556 uint64_t ret = 0; 1557 1558 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1559 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1560 if ((mask >> i) & 1) { 1561 ret = MAX(ret, esa->offset + esa->size); 1562 } 1563 } 1564 return ret; 1565 } 1566 1567 static inline bool accel_uses_host_cpuid(void) 1568 { 1569 return kvm_enabled() || hvf_enabled(); 1570 } 1571 1572 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1573 { 1574 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1575 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1576 } 1577 1578 const char *get_register_name_32(unsigned int reg) 1579 { 1580 if (reg >= CPU_NB_REGS32) { 1581 return NULL; 1582 } 1583 return x86_reg_info_32[reg].name; 1584 } 1585 1586 /* 1587 * Returns the set of feature flags that are supported and migratable by 1588 * QEMU, for a given FeatureWord. 1589 */ 1590 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) 1591 { 1592 FeatureWordInfo *wi = &feature_word_info[w]; 1593 uint64_t r = 0; 1594 int i; 1595 1596 for (i = 0; i < 64; i++) { 1597 uint64_t f = 1ULL << i; 1598 1599 /* If the feature name is known, it is implicitly considered migratable, 1600 * unless it is explicitly set in unmigratable_flags */ 1601 if ((wi->migratable_flags & f) || 1602 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1603 r |= f; 1604 } 1605 } 1606 return r; 1607 } 1608 1609 void host_cpuid(uint32_t function, uint32_t count, 1610 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1611 { 1612 uint32_t vec[4]; 1613 1614 #ifdef __x86_64__ 1615 asm volatile("cpuid" 1616 : "=a"(vec[0]), "=b"(vec[1]), 1617 "=c"(vec[2]), "=d"(vec[3]) 1618 : "0"(function), "c"(count) : "cc"); 1619 #elif defined(__i386__) 1620 asm volatile("pusha \n\t" 1621 "cpuid \n\t" 1622 "mov %%eax, 0(%2) \n\t" 1623 "mov %%ebx, 4(%2) \n\t" 1624 "mov %%ecx, 8(%2) \n\t" 1625 "mov %%edx, 12(%2) \n\t" 1626 "popa" 1627 : : "a"(function), "c"(count), "S"(vec) 1628 : "memory", "cc"); 1629 #else 1630 abort(); 1631 #endif 1632 1633 if (eax) 1634 *eax = vec[0]; 1635 if (ebx) 1636 *ebx = vec[1]; 1637 if (ecx) 1638 *ecx = vec[2]; 1639 if (edx) 1640 *edx = vec[3]; 1641 } 1642 1643 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1644 { 1645 uint32_t eax, ebx, ecx, edx; 1646 1647 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1648 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1649 1650 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1651 if (family) { 1652 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1653 } 1654 if (model) { 1655 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1656 } 1657 if (stepping) { 1658 *stepping = eax & 0x0F; 1659 } 1660 } 1661 1662 /* CPU class name definitions: */ 1663 1664 /* Return type name for a given CPU model name 1665 * Caller is responsible for freeing the returned string. 1666 */ 1667 static char *x86_cpu_type_name(const char *model_name) 1668 { 1669 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1670 } 1671 1672 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1673 { 1674 g_autofree char *typename = x86_cpu_type_name(cpu_model); 1675 return object_class_by_name(typename); 1676 } 1677 1678 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1679 { 1680 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1681 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1682 return g_strndup(class_name, 1683 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1684 } 1685 1686 typedef struct PropValue { 1687 const char *prop, *value; 1688 } PropValue; 1689 1690 typedef struct X86CPUVersionDefinition { 1691 X86CPUVersion version; 1692 const char *alias; 1693 const char *note; 1694 PropValue *props; 1695 } X86CPUVersionDefinition; 1696 1697 /* Base definition for a CPU model */ 1698 typedef struct X86CPUDefinition { 1699 const char *name; 1700 uint32_t level; 1701 uint32_t xlevel; 1702 /* vendor is zero-terminated, 12 character ASCII string */ 1703 char vendor[CPUID_VENDOR_SZ + 1]; 1704 int family; 1705 int model; 1706 int stepping; 1707 FeatureWordArray features; 1708 const char *model_id; 1709 CPUCaches *cache_info; 1710 /* 1711 * Definitions for alternative versions of CPU model. 1712 * List is terminated by item with version == 0. 1713 * If NULL, version 1 will be registered automatically. 1714 */ 1715 const X86CPUVersionDefinition *versions; 1716 } X86CPUDefinition; 1717 1718 /* Reference to a specific CPU model version */ 1719 struct X86CPUModel { 1720 /* Base CPU definition */ 1721 X86CPUDefinition *cpudef; 1722 /* CPU model version */ 1723 X86CPUVersion version; 1724 const char *note; 1725 /* 1726 * If true, this is an alias CPU model. 1727 * This matters only for "-cpu help" and query-cpu-definitions 1728 */ 1729 bool is_alias; 1730 }; 1731 1732 /* Get full model name for CPU version */ 1733 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, 1734 X86CPUVersion version) 1735 { 1736 assert(version > 0); 1737 return g_strdup_printf("%s-v%d", cpudef->name, (int)version); 1738 } 1739 1740 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) 1741 { 1742 /* When X86CPUDefinition::versions is NULL, we register only v1 */ 1743 static const X86CPUVersionDefinition default_version_list[] = { 1744 { 1 }, 1745 { /* end of list */ } 1746 }; 1747 1748 return def->versions ?: default_version_list; 1749 } 1750 1751 static CPUCaches epyc_cache_info = { 1752 .l1d_cache = &(CPUCacheInfo) { 1753 .type = DATA_CACHE, 1754 .level = 1, 1755 .size = 32 * KiB, 1756 .line_size = 64, 1757 .associativity = 8, 1758 .partitions = 1, 1759 .sets = 64, 1760 .lines_per_tag = 1, 1761 .self_init = 1, 1762 .no_invd_sharing = true, 1763 }, 1764 .l1i_cache = &(CPUCacheInfo) { 1765 .type = INSTRUCTION_CACHE, 1766 .level = 1, 1767 .size = 64 * KiB, 1768 .line_size = 64, 1769 .associativity = 4, 1770 .partitions = 1, 1771 .sets = 256, 1772 .lines_per_tag = 1, 1773 .self_init = 1, 1774 .no_invd_sharing = true, 1775 }, 1776 .l2_cache = &(CPUCacheInfo) { 1777 .type = UNIFIED_CACHE, 1778 .level = 2, 1779 .size = 512 * KiB, 1780 .line_size = 64, 1781 .associativity = 8, 1782 .partitions = 1, 1783 .sets = 1024, 1784 .lines_per_tag = 1, 1785 }, 1786 .l3_cache = &(CPUCacheInfo) { 1787 .type = UNIFIED_CACHE, 1788 .level = 3, 1789 .size = 8 * MiB, 1790 .line_size = 64, 1791 .associativity = 16, 1792 .partitions = 1, 1793 .sets = 8192, 1794 .lines_per_tag = 1, 1795 .self_init = true, 1796 .inclusive = true, 1797 .complex_indexing = true, 1798 }, 1799 }; 1800 1801 static CPUCaches epyc_rome_cache_info = { 1802 .l1d_cache = &(CPUCacheInfo) { 1803 .type = DATA_CACHE, 1804 .level = 1, 1805 .size = 32 * KiB, 1806 .line_size = 64, 1807 .associativity = 8, 1808 .partitions = 1, 1809 .sets = 64, 1810 .lines_per_tag = 1, 1811 .self_init = 1, 1812 .no_invd_sharing = true, 1813 }, 1814 .l1i_cache = &(CPUCacheInfo) { 1815 .type = INSTRUCTION_CACHE, 1816 .level = 1, 1817 .size = 32 * KiB, 1818 .line_size = 64, 1819 .associativity = 8, 1820 .partitions = 1, 1821 .sets = 64, 1822 .lines_per_tag = 1, 1823 .self_init = 1, 1824 .no_invd_sharing = true, 1825 }, 1826 .l2_cache = &(CPUCacheInfo) { 1827 .type = UNIFIED_CACHE, 1828 .level = 2, 1829 .size = 512 * KiB, 1830 .line_size = 64, 1831 .associativity = 8, 1832 .partitions = 1, 1833 .sets = 1024, 1834 .lines_per_tag = 1, 1835 }, 1836 .l3_cache = &(CPUCacheInfo) { 1837 .type = UNIFIED_CACHE, 1838 .level = 3, 1839 .size = 16 * MiB, 1840 .line_size = 64, 1841 .associativity = 16, 1842 .partitions = 1, 1843 .sets = 16384, 1844 .lines_per_tag = 1, 1845 .self_init = true, 1846 .inclusive = true, 1847 .complex_indexing = true, 1848 }, 1849 }; 1850 1851 /* The following VMX features are not supported by KVM and are left out in the 1852 * CPU definitions: 1853 * 1854 * Dual-monitor support (all processors) 1855 * Entry to SMM 1856 * Deactivate dual-monitor treatment 1857 * Number of CR3-target values 1858 * Shutdown activity state 1859 * Wait-for-SIPI activity state 1860 * PAUSE-loop exiting (Westmere and newer) 1861 * EPT-violation #VE (Broadwell and newer) 1862 * Inject event with insn length=0 (Skylake and newer) 1863 * Conceal non-root operation from PT 1864 * Conceal VM exits from PT 1865 * Conceal VM entries from PT 1866 * Enable ENCLS exiting 1867 * Mode-based execute control (XS/XU) 1868 s TSC scaling (Skylake Server and newer) 1869 * GPA translation for PT (IceLake and newer) 1870 * User wait and pause 1871 * ENCLV exiting 1872 * Load IA32_RTIT_CTL 1873 * Clear IA32_RTIT_CTL 1874 * Advanced VM-exit information for EPT violations 1875 * Sub-page write permissions 1876 * PT in VMX operation 1877 */ 1878 1879 static X86CPUDefinition builtin_x86_defs[] = { 1880 { 1881 .name = "qemu64", 1882 .level = 0xd, 1883 .vendor = CPUID_VENDOR_AMD, 1884 .family = 6, 1885 .model = 6, 1886 .stepping = 3, 1887 .features[FEAT_1_EDX] = 1888 PPRO_FEATURES | 1889 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1890 CPUID_PSE36, 1891 .features[FEAT_1_ECX] = 1892 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1893 .features[FEAT_8000_0001_EDX] = 1894 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1895 .features[FEAT_8000_0001_ECX] = 1896 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1897 .xlevel = 0x8000000A, 1898 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1899 }, 1900 { 1901 .name = "phenom", 1902 .level = 5, 1903 .vendor = CPUID_VENDOR_AMD, 1904 .family = 16, 1905 .model = 2, 1906 .stepping = 3, 1907 /* Missing: CPUID_HT */ 1908 .features[FEAT_1_EDX] = 1909 PPRO_FEATURES | 1910 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1911 CPUID_PSE36 | CPUID_VME, 1912 .features[FEAT_1_ECX] = 1913 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1914 CPUID_EXT_POPCNT, 1915 .features[FEAT_8000_0001_EDX] = 1916 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1917 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1918 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1919 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1920 CPUID_EXT3_CR8LEG, 1921 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1922 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1923 .features[FEAT_8000_0001_ECX] = 1924 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1925 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1926 /* Missing: CPUID_SVM_LBRV */ 1927 .features[FEAT_SVM] = 1928 CPUID_SVM_NPT, 1929 .xlevel = 0x8000001A, 1930 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1931 }, 1932 { 1933 .name = "core2duo", 1934 .level = 10, 1935 .vendor = CPUID_VENDOR_INTEL, 1936 .family = 6, 1937 .model = 15, 1938 .stepping = 11, 1939 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1940 .features[FEAT_1_EDX] = 1941 PPRO_FEATURES | 1942 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1943 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1944 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1945 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1946 .features[FEAT_1_ECX] = 1947 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1948 CPUID_EXT_CX16, 1949 .features[FEAT_8000_0001_EDX] = 1950 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1951 .features[FEAT_8000_0001_ECX] = 1952 CPUID_EXT3_LAHF_LM, 1953 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 1954 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1955 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1956 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1957 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1958 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 1959 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1960 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1961 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1962 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1963 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1964 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1965 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1966 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 1967 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 1968 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 1969 .features[FEAT_VMX_SECONDARY_CTLS] = 1970 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 1971 .xlevel = 0x80000008, 1972 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1973 }, 1974 { 1975 .name = "kvm64", 1976 .level = 0xd, 1977 .vendor = CPUID_VENDOR_INTEL, 1978 .family = 15, 1979 .model = 6, 1980 .stepping = 1, 1981 /* Missing: CPUID_HT */ 1982 .features[FEAT_1_EDX] = 1983 PPRO_FEATURES | CPUID_VME | 1984 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1985 CPUID_PSE36, 1986 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1987 .features[FEAT_1_ECX] = 1988 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1989 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1990 .features[FEAT_8000_0001_EDX] = 1991 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1992 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1993 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1994 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1995 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1996 .features[FEAT_8000_0001_ECX] = 1997 0, 1998 /* VMX features from Cedar Mill/Prescott */ 1999 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2000 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2001 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2002 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2003 VMX_PIN_BASED_NMI_EXITING, 2004 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2005 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2006 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2007 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2008 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2009 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2010 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2011 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING, 2012 .xlevel = 0x80000008, 2013 .model_id = "Common KVM processor" 2014 }, 2015 { 2016 .name = "qemu32", 2017 .level = 4, 2018 .vendor = CPUID_VENDOR_INTEL, 2019 .family = 6, 2020 .model = 6, 2021 .stepping = 3, 2022 .features[FEAT_1_EDX] = 2023 PPRO_FEATURES, 2024 .features[FEAT_1_ECX] = 2025 CPUID_EXT_SSE3, 2026 .xlevel = 0x80000004, 2027 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 2028 }, 2029 { 2030 .name = "kvm32", 2031 .level = 5, 2032 .vendor = CPUID_VENDOR_INTEL, 2033 .family = 15, 2034 .model = 6, 2035 .stepping = 1, 2036 .features[FEAT_1_EDX] = 2037 PPRO_FEATURES | CPUID_VME | 2038 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 2039 .features[FEAT_1_ECX] = 2040 CPUID_EXT_SSE3, 2041 .features[FEAT_8000_0001_ECX] = 2042 0, 2043 /* VMX features from Yonah */ 2044 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2045 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2046 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2047 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2048 VMX_PIN_BASED_NMI_EXITING, 2049 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2050 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2051 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2052 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2053 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2054 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2055 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2056 .xlevel = 0x80000008, 2057 .model_id = "Common 32-bit KVM processor" 2058 }, 2059 { 2060 .name = "coreduo", 2061 .level = 10, 2062 .vendor = CPUID_VENDOR_INTEL, 2063 .family = 6, 2064 .model = 14, 2065 .stepping = 8, 2066 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2067 .features[FEAT_1_EDX] = 2068 PPRO_FEATURES | CPUID_VME | 2069 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 2070 CPUID_SS, 2071 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 2072 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 2073 .features[FEAT_1_ECX] = 2074 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 2075 .features[FEAT_8000_0001_EDX] = 2076 CPUID_EXT2_NX, 2077 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2078 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2079 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2080 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2081 VMX_PIN_BASED_NMI_EXITING, 2082 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2083 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2084 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2085 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2086 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2087 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2088 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2089 .xlevel = 0x80000008, 2090 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 2091 }, 2092 { 2093 .name = "486", 2094 .level = 1, 2095 .vendor = CPUID_VENDOR_INTEL, 2096 .family = 4, 2097 .model = 8, 2098 .stepping = 0, 2099 .features[FEAT_1_EDX] = 2100 I486_FEATURES, 2101 .xlevel = 0, 2102 .model_id = "", 2103 }, 2104 { 2105 .name = "pentium", 2106 .level = 1, 2107 .vendor = CPUID_VENDOR_INTEL, 2108 .family = 5, 2109 .model = 4, 2110 .stepping = 3, 2111 .features[FEAT_1_EDX] = 2112 PENTIUM_FEATURES, 2113 .xlevel = 0, 2114 .model_id = "", 2115 }, 2116 { 2117 .name = "pentium2", 2118 .level = 2, 2119 .vendor = CPUID_VENDOR_INTEL, 2120 .family = 6, 2121 .model = 5, 2122 .stepping = 2, 2123 .features[FEAT_1_EDX] = 2124 PENTIUM2_FEATURES, 2125 .xlevel = 0, 2126 .model_id = "", 2127 }, 2128 { 2129 .name = "pentium3", 2130 .level = 3, 2131 .vendor = CPUID_VENDOR_INTEL, 2132 .family = 6, 2133 .model = 7, 2134 .stepping = 3, 2135 .features[FEAT_1_EDX] = 2136 PENTIUM3_FEATURES, 2137 .xlevel = 0, 2138 .model_id = "", 2139 }, 2140 { 2141 .name = "athlon", 2142 .level = 2, 2143 .vendor = CPUID_VENDOR_AMD, 2144 .family = 6, 2145 .model = 2, 2146 .stepping = 3, 2147 .features[FEAT_1_EDX] = 2148 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 2149 CPUID_MCA, 2150 .features[FEAT_8000_0001_EDX] = 2151 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 2152 .xlevel = 0x80000008, 2153 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 2154 }, 2155 { 2156 .name = "n270", 2157 .level = 10, 2158 .vendor = CPUID_VENDOR_INTEL, 2159 .family = 6, 2160 .model = 28, 2161 .stepping = 2, 2162 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2163 .features[FEAT_1_EDX] = 2164 PPRO_FEATURES | 2165 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 2166 CPUID_ACPI | CPUID_SS, 2167 /* Some CPUs got no CPUID_SEP */ 2168 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 2169 * CPUID_EXT_XTPR */ 2170 .features[FEAT_1_ECX] = 2171 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 2172 CPUID_EXT_MOVBE, 2173 .features[FEAT_8000_0001_EDX] = 2174 CPUID_EXT2_NX, 2175 .features[FEAT_8000_0001_ECX] = 2176 CPUID_EXT3_LAHF_LM, 2177 .xlevel = 0x80000008, 2178 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 2179 }, 2180 { 2181 .name = "Conroe", 2182 .level = 10, 2183 .vendor = CPUID_VENDOR_INTEL, 2184 .family = 6, 2185 .model = 15, 2186 .stepping = 3, 2187 .features[FEAT_1_EDX] = 2188 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2189 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2190 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2191 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2192 CPUID_DE | CPUID_FP87, 2193 .features[FEAT_1_ECX] = 2194 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2195 .features[FEAT_8000_0001_EDX] = 2196 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2197 .features[FEAT_8000_0001_ECX] = 2198 CPUID_EXT3_LAHF_LM, 2199 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2200 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2201 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2202 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2203 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2204 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2205 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2206 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2207 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2208 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2209 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2210 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2211 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2212 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2213 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2214 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2215 .features[FEAT_VMX_SECONDARY_CTLS] = 2216 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 2217 .xlevel = 0x80000008, 2218 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 2219 }, 2220 { 2221 .name = "Penryn", 2222 .level = 10, 2223 .vendor = CPUID_VENDOR_INTEL, 2224 .family = 6, 2225 .model = 23, 2226 .stepping = 3, 2227 .features[FEAT_1_EDX] = 2228 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2229 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2230 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2231 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2232 CPUID_DE | CPUID_FP87, 2233 .features[FEAT_1_ECX] = 2234 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2235 CPUID_EXT_SSE3, 2236 .features[FEAT_8000_0001_EDX] = 2237 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2238 .features[FEAT_8000_0001_ECX] = 2239 CPUID_EXT3_LAHF_LM, 2240 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2241 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2242 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 2243 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | 2244 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 2245 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2246 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2247 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2248 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2249 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2250 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2251 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2252 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2253 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2254 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2255 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2256 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2257 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2258 .features[FEAT_VMX_SECONDARY_CTLS] = 2259 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2260 VMX_SECONDARY_EXEC_WBINVD_EXITING, 2261 .xlevel = 0x80000008, 2262 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 2263 }, 2264 { 2265 .name = "Nehalem", 2266 .level = 11, 2267 .vendor = CPUID_VENDOR_INTEL, 2268 .family = 6, 2269 .model = 26, 2270 .stepping = 3, 2271 .features[FEAT_1_EDX] = 2272 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2273 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2274 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2275 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2276 CPUID_DE | CPUID_FP87, 2277 .features[FEAT_1_ECX] = 2278 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2279 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2280 .features[FEAT_8000_0001_EDX] = 2281 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2282 .features[FEAT_8000_0001_ECX] = 2283 CPUID_EXT3_LAHF_LM, 2284 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2285 MSR_VMX_BASIC_TRUE_CTLS, 2286 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2287 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2288 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2289 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2290 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2291 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2292 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2293 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2294 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2295 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2296 .features[FEAT_VMX_EXIT_CTLS] = 2297 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2298 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2299 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2300 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2301 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2302 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2303 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2304 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2305 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2306 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2307 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2308 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2309 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2310 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2311 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2312 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2313 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2314 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2315 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2316 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2317 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2318 .features[FEAT_VMX_SECONDARY_CTLS] = 2319 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2320 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2321 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2322 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2323 VMX_SECONDARY_EXEC_ENABLE_VPID, 2324 .xlevel = 0x80000008, 2325 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 2326 .versions = (X86CPUVersionDefinition[]) { 2327 { .version = 1 }, 2328 { 2329 .version = 2, 2330 .alias = "Nehalem-IBRS", 2331 .props = (PropValue[]) { 2332 { "spec-ctrl", "on" }, 2333 { "model-id", 2334 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, 2335 { /* end of list */ } 2336 } 2337 }, 2338 { /* end of list */ } 2339 } 2340 }, 2341 { 2342 .name = "Westmere", 2343 .level = 11, 2344 .vendor = CPUID_VENDOR_INTEL, 2345 .family = 6, 2346 .model = 44, 2347 .stepping = 1, 2348 .features[FEAT_1_EDX] = 2349 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2350 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2351 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2352 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2353 CPUID_DE | CPUID_FP87, 2354 .features[FEAT_1_ECX] = 2355 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2356 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2357 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2358 .features[FEAT_8000_0001_EDX] = 2359 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2360 .features[FEAT_8000_0001_ECX] = 2361 CPUID_EXT3_LAHF_LM, 2362 .features[FEAT_6_EAX] = 2363 CPUID_6_EAX_ARAT, 2364 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2365 MSR_VMX_BASIC_TRUE_CTLS, 2366 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2367 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2368 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2369 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2370 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2371 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2372 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2373 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2374 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2375 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2376 .features[FEAT_VMX_EXIT_CTLS] = 2377 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2378 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2379 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2380 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2381 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2382 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2383 MSR_VMX_MISC_STORE_LMA, 2384 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2385 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2386 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2387 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2388 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2389 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2390 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2391 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2392 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2393 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2394 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2395 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2396 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2397 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2398 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2399 .features[FEAT_VMX_SECONDARY_CTLS] = 2400 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2401 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2402 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2403 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2404 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2405 .xlevel = 0x80000008, 2406 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 2407 .versions = (X86CPUVersionDefinition[]) { 2408 { .version = 1 }, 2409 { 2410 .version = 2, 2411 .alias = "Westmere-IBRS", 2412 .props = (PropValue[]) { 2413 { "spec-ctrl", "on" }, 2414 { "model-id", 2415 "Westmere E56xx/L56xx/X56xx (IBRS update)" }, 2416 { /* end of list */ } 2417 } 2418 }, 2419 { /* end of list */ } 2420 } 2421 }, 2422 { 2423 .name = "SandyBridge", 2424 .level = 0xd, 2425 .vendor = CPUID_VENDOR_INTEL, 2426 .family = 6, 2427 .model = 42, 2428 .stepping = 1, 2429 .features[FEAT_1_EDX] = 2430 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2431 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2432 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2433 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2434 CPUID_DE | CPUID_FP87, 2435 .features[FEAT_1_ECX] = 2436 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2437 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2438 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2439 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2440 CPUID_EXT_SSE3, 2441 .features[FEAT_8000_0001_EDX] = 2442 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2443 CPUID_EXT2_SYSCALL, 2444 .features[FEAT_8000_0001_ECX] = 2445 CPUID_EXT3_LAHF_LM, 2446 .features[FEAT_XSAVE] = 2447 CPUID_XSAVE_XSAVEOPT, 2448 .features[FEAT_6_EAX] = 2449 CPUID_6_EAX_ARAT, 2450 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2451 MSR_VMX_BASIC_TRUE_CTLS, 2452 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2453 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2454 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2455 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2456 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2457 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2458 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2459 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2460 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2461 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2462 .features[FEAT_VMX_EXIT_CTLS] = 2463 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2464 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2465 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2466 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2467 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2468 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2469 MSR_VMX_MISC_STORE_LMA, 2470 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2471 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2472 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2473 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2474 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2475 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2476 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2477 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2478 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2479 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2480 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2481 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2482 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2483 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2484 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2485 .features[FEAT_VMX_SECONDARY_CTLS] = 2486 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2487 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2488 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2489 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2490 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2491 .xlevel = 0x80000008, 2492 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 2493 .versions = (X86CPUVersionDefinition[]) { 2494 { .version = 1 }, 2495 { 2496 .version = 2, 2497 .alias = "SandyBridge-IBRS", 2498 .props = (PropValue[]) { 2499 { "spec-ctrl", "on" }, 2500 { "model-id", 2501 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, 2502 { /* end of list */ } 2503 } 2504 }, 2505 { /* end of list */ } 2506 } 2507 }, 2508 { 2509 .name = "IvyBridge", 2510 .level = 0xd, 2511 .vendor = CPUID_VENDOR_INTEL, 2512 .family = 6, 2513 .model = 58, 2514 .stepping = 9, 2515 .features[FEAT_1_EDX] = 2516 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2517 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2518 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2519 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2520 CPUID_DE | CPUID_FP87, 2521 .features[FEAT_1_ECX] = 2522 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2523 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2524 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2525 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2526 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2527 .features[FEAT_7_0_EBX] = 2528 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 2529 CPUID_7_0_EBX_ERMS, 2530 .features[FEAT_8000_0001_EDX] = 2531 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2532 CPUID_EXT2_SYSCALL, 2533 .features[FEAT_8000_0001_ECX] = 2534 CPUID_EXT3_LAHF_LM, 2535 .features[FEAT_XSAVE] = 2536 CPUID_XSAVE_XSAVEOPT, 2537 .features[FEAT_6_EAX] = 2538 CPUID_6_EAX_ARAT, 2539 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2540 MSR_VMX_BASIC_TRUE_CTLS, 2541 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2542 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2543 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2544 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2545 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2546 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2547 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2548 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2549 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2550 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2551 .features[FEAT_VMX_EXIT_CTLS] = 2552 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2553 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2554 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2555 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2556 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2557 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2558 MSR_VMX_MISC_STORE_LMA, 2559 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2560 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2561 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2562 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2563 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2564 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2565 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2566 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2567 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2568 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2569 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2570 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2571 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2572 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2573 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2574 .features[FEAT_VMX_SECONDARY_CTLS] = 2575 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2576 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2577 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2578 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2579 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2580 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2581 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2582 VMX_SECONDARY_EXEC_RDRAND_EXITING, 2583 .xlevel = 0x80000008, 2584 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 2585 .versions = (X86CPUVersionDefinition[]) { 2586 { .version = 1 }, 2587 { 2588 .version = 2, 2589 .alias = "IvyBridge-IBRS", 2590 .props = (PropValue[]) { 2591 { "spec-ctrl", "on" }, 2592 { "model-id", 2593 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, 2594 { /* end of list */ } 2595 } 2596 }, 2597 { /* end of list */ } 2598 } 2599 }, 2600 { 2601 .name = "Haswell", 2602 .level = 0xd, 2603 .vendor = CPUID_VENDOR_INTEL, 2604 .family = 6, 2605 .model = 60, 2606 .stepping = 4, 2607 .features[FEAT_1_EDX] = 2608 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2609 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2610 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2611 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2612 CPUID_DE | CPUID_FP87, 2613 .features[FEAT_1_ECX] = 2614 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2615 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2616 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2617 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2618 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2619 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2620 .features[FEAT_8000_0001_EDX] = 2621 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2622 CPUID_EXT2_SYSCALL, 2623 .features[FEAT_8000_0001_ECX] = 2624 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2625 .features[FEAT_7_0_EBX] = 2626 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2627 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2628 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2629 CPUID_7_0_EBX_RTM, 2630 .features[FEAT_XSAVE] = 2631 CPUID_XSAVE_XSAVEOPT, 2632 .features[FEAT_6_EAX] = 2633 CPUID_6_EAX_ARAT, 2634 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2635 MSR_VMX_BASIC_TRUE_CTLS, 2636 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2637 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2638 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2639 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2640 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2641 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2642 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2643 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2644 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2645 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2646 .features[FEAT_VMX_EXIT_CTLS] = 2647 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2648 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2649 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2650 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2651 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2652 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2653 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2654 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2655 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2656 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2657 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2658 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2659 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2660 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2661 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2662 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2663 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2664 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2665 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2666 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2667 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2668 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2669 .features[FEAT_VMX_SECONDARY_CTLS] = 2670 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2671 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2672 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2673 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2674 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2675 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2676 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2677 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2678 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 2679 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2680 .xlevel = 0x80000008, 2681 .model_id = "Intel Core Processor (Haswell)", 2682 .versions = (X86CPUVersionDefinition[]) { 2683 { .version = 1 }, 2684 { 2685 .version = 2, 2686 .alias = "Haswell-noTSX", 2687 .props = (PropValue[]) { 2688 { "hle", "off" }, 2689 { "rtm", "off" }, 2690 { "stepping", "1" }, 2691 { "model-id", "Intel Core Processor (Haswell, no TSX)", }, 2692 { /* end of list */ } 2693 }, 2694 }, 2695 { 2696 .version = 3, 2697 .alias = "Haswell-IBRS", 2698 .props = (PropValue[]) { 2699 /* Restore TSX features removed by -v2 above */ 2700 { "hle", "on" }, 2701 { "rtm", "on" }, 2702 /* 2703 * Haswell and Haswell-IBRS had stepping=4 in 2704 * QEMU 4.0 and older 2705 */ 2706 { "stepping", "4" }, 2707 { "spec-ctrl", "on" }, 2708 { "model-id", 2709 "Intel Core Processor (Haswell, IBRS)" }, 2710 { /* end of list */ } 2711 } 2712 }, 2713 { 2714 .version = 4, 2715 .alias = "Haswell-noTSX-IBRS", 2716 .props = (PropValue[]) { 2717 { "hle", "off" }, 2718 { "rtm", "off" }, 2719 /* spec-ctrl was already enabled by -v3 above */ 2720 { "stepping", "1" }, 2721 { "model-id", 2722 "Intel Core Processor (Haswell, no TSX, IBRS)" }, 2723 { /* end of list */ } 2724 } 2725 }, 2726 { /* end of list */ } 2727 } 2728 }, 2729 { 2730 .name = "Broadwell", 2731 .level = 0xd, 2732 .vendor = CPUID_VENDOR_INTEL, 2733 .family = 6, 2734 .model = 61, 2735 .stepping = 2, 2736 .features[FEAT_1_EDX] = 2737 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2738 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2739 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2740 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2741 CPUID_DE | CPUID_FP87, 2742 .features[FEAT_1_ECX] = 2743 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2744 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2745 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2746 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2747 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2748 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2749 .features[FEAT_8000_0001_EDX] = 2750 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2751 CPUID_EXT2_SYSCALL, 2752 .features[FEAT_8000_0001_ECX] = 2753 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2754 .features[FEAT_7_0_EBX] = 2755 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2756 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2757 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2758 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2759 CPUID_7_0_EBX_SMAP, 2760 .features[FEAT_XSAVE] = 2761 CPUID_XSAVE_XSAVEOPT, 2762 .features[FEAT_6_EAX] = 2763 CPUID_6_EAX_ARAT, 2764 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2765 MSR_VMX_BASIC_TRUE_CTLS, 2766 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2767 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2768 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2769 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2770 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2771 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2772 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2773 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2774 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2775 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2776 .features[FEAT_VMX_EXIT_CTLS] = 2777 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2778 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2779 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2780 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2781 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2782 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2783 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2784 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2785 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2786 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2787 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2788 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2789 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2790 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2791 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2792 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2793 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2794 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2795 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2796 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2797 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2798 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2799 .features[FEAT_VMX_SECONDARY_CTLS] = 2800 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2801 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2802 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2803 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2804 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2805 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2806 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2807 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2808 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2809 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2810 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2811 .xlevel = 0x80000008, 2812 .model_id = "Intel Core Processor (Broadwell)", 2813 .versions = (X86CPUVersionDefinition[]) { 2814 { .version = 1 }, 2815 { 2816 .version = 2, 2817 .alias = "Broadwell-noTSX", 2818 .props = (PropValue[]) { 2819 { "hle", "off" }, 2820 { "rtm", "off" }, 2821 { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, 2822 { /* end of list */ } 2823 }, 2824 }, 2825 { 2826 .version = 3, 2827 .alias = "Broadwell-IBRS", 2828 .props = (PropValue[]) { 2829 /* Restore TSX features removed by -v2 above */ 2830 { "hle", "on" }, 2831 { "rtm", "on" }, 2832 { "spec-ctrl", "on" }, 2833 { "model-id", 2834 "Intel Core Processor (Broadwell, IBRS)" }, 2835 { /* end of list */ } 2836 } 2837 }, 2838 { 2839 .version = 4, 2840 .alias = "Broadwell-noTSX-IBRS", 2841 .props = (PropValue[]) { 2842 { "hle", "off" }, 2843 { "rtm", "off" }, 2844 /* spec-ctrl was already enabled by -v3 above */ 2845 { "model-id", 2846 "Intel Core Processor (Broadwell, no TSX, IBRS)" }, 2847 { /* end of list */ } 2848 } 2849 }, 2850 { /* end of list */ } 2851 } 2852 }, 2853 { 2854 .name = "Skylake-Client", 2855 .level = 0xd, 2856 .vendor = CPUID_VENDOR_INTEL, 2857 .family = 6, 2858 .model = 94, 2859 .stepping = 3, 2860 .features[FEAT_1_EDX] = 2861 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2862 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2863 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2864 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2865 CPUID_DE | CPUID_FP87, 2866 .features[FEAT_1_ECX] = 2867 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2868 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2869 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2870 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2871 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2872 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2873 .features[FEAT_8000_0001_EDX] = 2874 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2875 CPUID_EXT2_SYSCALL, 2876 .features[FEAT_8000_0001_ECX] = 2877 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2878 .features[FEAT_7_0_EBX] = 2879 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2880 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2881 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2882 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2883 CPUID_7_0_EBX_SMAP, 2884 /* Missing: XSAVES (not supported by some Linux versions, 2885 * including v4.1 to v4.12). 2886 * KVM doesn't yet expose any XSAVES state save component, 2887 * and the only one defined in Skylake (processor tracing) 2888 * probably will block migration anyway. 2889 */ 2890 .features[FEAT_XSAVE] = 2891 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2892 CPUID_XSAVE_XGETBV1, 2893 .features[FEAT_6_EAX] = 2894 CPUID_6_EAX_ARAT, 2895 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2896 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2897 MSR_VMX_BASIC_TRUE_CTLS, 2898 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2899 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2900 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2901 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2902 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2903 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2904 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2905 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2906 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2907 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2908 .features[FEAT_VMX_EXIT_CTLS] = 2909 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2910 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2911 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2912 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2913 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2914 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2915 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2916 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2917 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2918 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2919 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2920 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2921 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2922 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2923 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2924 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2925 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2926 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2927 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2928 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2929 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2930 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2931 .features[FEAT_VMX_SECONDARY_CTLS] = 2932 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2933 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2934 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2935 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2936 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2937 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2938 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2939 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2940 .xlevel = 0x80000008, 2941 .model_id = "Intel Core Processor (Skylake)", 2942 .versions = (X86CPUVersionDefinition[]) { 2943 { .version = 1 }, 2944 { 2945 .version = 2, 2946 .alias = "Skylake-Client-IBRS", 2947 .props = (PropValue[]) { 2948 { "spec-ctrl", "on" }, 2949 { "model-id", 2950 "Intel Core Processor (Skylake, IBRS)" }, 2951 { /* end of list */ } 2952 } 2953 }, 2954 { 2955 .version = 3, 2956 .alias = "Skylake-Client-noTSX-IBRS", 2957 .props = (PropValue[]) { 2958 { "hle", "off" }, 2959 { "rtm", "off" }, 2960 { "model-id", 2961 "Intel Core Processor (Skylake, IBRS, no TSX)" }, 2962 { /* end of list */ } 2963 } 2964 }, 2965 { /* end of list */ } 2966 } 2967 }, 2968 { 2969 .name = "Skylake-Server", 2970 .level = 0xd, 2971 .vendor = CPUID_VENDOR_INTEL, 2972 .family = 6, 2973 .model = 85, 2974 .stepping = 4, 2975 .features[FEAT_1_EDX] = 2976 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2977 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2978 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2979 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2980 CPUID_DE | CPUID_FP87, 2981 .features[FEAT_1_ECX] = 2982 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2983 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2984 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2985 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2986 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2987 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2988 .features[FEAT_8000_0001_EDX] = 2989 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2990 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2991 .features[FEAT_8000_0001_ECX] = 2992 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2993 .features[FEAT_7_0_EBX] = 2994 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2995 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2996 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2997 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2998 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2999 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3000 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3001 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3002 .features[FEAT_7_0_ECX] = 3003 CPUID_7_0_ECX_PKU, 3004 /* Missing: XSAVES (not supported by some Linux versions, 3005 * including v4.1 to v4.12). 3006 * KVM doesn't yet expose any XSAVES state save component, 3007 * and the only one defined in Skylake (processor tracing) 3008 * probably will block migration anyway. 3009 */ 3010 .features[FEAT_XSAVE] = 3011 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3012 CPUID_XSAVE_XGETBV1, 3013 .features[FEAT_6_EAX] = 3014 CPUID_6_EAX_ARAT, 3015 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3016 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3017 MSR_VMX_BASIC_TRUE_CTLS, 3018 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3019 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3020 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3021 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3022 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3023 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3024 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3025 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3026 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3027 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3028 .features[FEAT_VMX_EXIT_CTLS] = 3029 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3030 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3031 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3032 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3033 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3034 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3035 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3036 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3037 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3038 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3039 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3040 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3041 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3042 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3043 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3044 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3045 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3046 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3047 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3048 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3049 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3050 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3051 .features[FEAT_VMX_SECONDARY_CTLS] = 3052 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3053 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3054 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3055 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3056 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3057 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3058 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3059 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3060 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3061 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3062 .xlevel = 0x80000008, 3063 .model_id = "Intel Xeon Processor (Skylake)", 3064 .versions = (X86CPUVersionDefinition[]) { 3065 { .version = 1 }, 3066 { 3067 .version = 2, 3068 .alias = "Skylake-Server-IBRS", 3069 .props = (PropValue[]) { 3070 /* clflushopt was not added to Skylake-Server-IBRS */ 3071 /* TODO: add -v3 including clflushopt */ 3072 { "clflushopt", "off" }, 3073 { "spec-ctrl", "on" }, 3074 { "model-id", 3075 "Intel Xeon Processor (Skylake, IBRS)" }, 3076 { /* end of list */ } 3077 } 3078 }, 3079 { 3080 .version = 3, 3081 .alias = "Skylake-Server-noTSX-IBRS", 3082 .props = (PropValue[]) { 3083 { "hle", "off" }, 3084 { "rtm", "off" }, 3085 { "model-id", 3086 "Intel Xeon Processor (Skylake, IBRS, no TSX)" }, 3087 { /* end of list */ } 3088 } 3089 }, 3090 { /* end of list */ } 3091 } 3092 }, 3093 { 3094 .name = "Cascadelake-Server", 3095 .level = 0xd, 3096 .vendor = CPUID_VENDOR_INTEL, 3097 .family = 6, 3098 .model = 85, 3099 .stepping = 6, 3100 .features[FEAT_1_EDX] = 3101 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3102 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3103 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3104 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3105 CPUID_DE | CPUID_FP87, 3106 .features[FEAT_1_ECX] = 3107 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3108 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3109 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3110 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3111 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3112 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3113 .features[FEAT_8000_0001_EDX] = 3114 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3115 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3116 .features[FEAT_8000_0001_ECX] = 3117 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3118 .features[FEAT_7_0_EBX] = 3119 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3120 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3121 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3122 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3123 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3124 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3125 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3126 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3127 .features[FEAT_7_0_ECX] = 3128 CPUID_7_0_ECX_PKU | 3129 CPUID_7_0_ECX_AVX512VNNI, 3130 .features[FEAT_7_0_EDX] = 3131 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3132 /* Missing: XSAVES (not supported by some Linux versions, 3133 * including v4.1 to v4.12). 3134 * KVM doesn't yet expose any XSAVES state save component, 3135 * and the only one defined in Skylake (processor tracing) 3136 * probably will block migration anyway. 3137 */ 3138 .features[FEAT_XSAVE] = 3139 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3140 CPUID_XSAVE_XGETBV1, 3141 .features[FEAT_6_EAX] = 3142 CPUID_6_EAX_ARAT, 3143 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3144 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3145 MSR_VMX_BASIC_TRUE_CTLS, 3146 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3147 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3148 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3149 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3150 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3151 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3152 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3153 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3154 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3155 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3156 .features[FEAT_VMX_EXIT_CTLS] = 3157 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3158 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3159 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3160 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3161 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3162 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3163 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3164 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3165 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3166 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3167 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3168 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3169 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3170 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3171 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3172 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3173 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3174 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3175 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3176 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3177 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3178 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3179 .features[FEAT_VMX_SECONDARY_CTLS] = 3180 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3181 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3182 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3183 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3184 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3185 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3186 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3187 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3188 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3189 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3190 .xlevel = 0x80000008, 3191 .model_id = "Intel Xeon Processor (Cascadelake)", 3192 .versions = (X86CPUVersionDefinition[]) { 3193 { .version = 1 }, 3194 { .version = 2, 3195 .props = (PropValue[]) { 3196 { "arch-capabilities", "on" }, 3197 { "rdctl-no", "on" }, 3198 { "ibrs-all", "on" }, 3199 { "skip-l1dfl-vmentry", "on" }, 3200 { "mds-no", "on" }, 3201 { /* end of list */ } 3202 }, 3203 }, 3204 { .version = 3, 3205 .alias = "Cascadelake-Server-noTSX", 3206 .props = (PropValue[]) { 3207 { "hle", "off" }, 3208 { "rtm", "off" }, 3209 { /* end of list */ } 3210 }, 3211 }, 3212 { /* end of list */ } 3213 } 3214 }, 3215 { 3216 .name = "Cooperlake", 3217 .level = 0xd, 3218 .vendor = CPUID_VENDOR_INTEL, 3219 .family = 6, 3220 .model = 85, 3221 .stepping = 10, 3222 .features[FEAT_1_EDX] = 3223 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3224 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3225 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3226 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3227 CPUID_DE | CPUID_FP87, 3228 .features[FEAT_1_ECX] = 3229 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3230 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3231 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3232 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3233 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3234 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3235 .features[FEAT_8000_0001_EDX] = 3236 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3237 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3238 .features[FEAT_8000_0001_ECX] = 3239 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3240 .features[FEAT_7_0_EBX] = 3241 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3242 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3243 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3244 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3245 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3246 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3247 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3248 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3249 .features[FEAT_7_0_ECX] = 3250 CPUID_7_0_ECX_PKU | 3251 CPUID_7_0_ECX_AVX512VNNI, 3252 .features[FEAT_7_0_EDX] = 3253 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP | 3254 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES, 3255 .features[FEAT_ARCH_CAPABILITIES] = 3256 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | 3257 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | 3258 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, 3259 .features[FEAT_7_1_EAX] = 3260 CPUID_7_1_EAX_AVX512_BF16, 3261 /* 3262 * Missing: XSAVES (not supported by some Linux versions, 3263 * including v4.1 to v4.12). 3264 * KVM doesn't yet expose any XSAVES state save component, 3265 * and the only one defined in Skylake (processor tracing) 3266 * probably will block migration anyway. 3267 */ 3268 .features[FEAT_XSAVE] = 3269 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3270 CPUID_XSAVE_XGETBV1, 3271 .features[FEAT_6_EAX] = 3272 CPUID_6_EAX_ARAT, 3273 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3274 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3275 MSR_VMX_BASIC_TRUE_CTLS, 3276 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3277 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3278 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3279 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3280 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3281 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3282 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3283 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3284 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3285 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3286 .features[FEAT_VMX_EXIT_CTLS] = 3287 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3288 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3289 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3290 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3291 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3292 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3293 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3294 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3295 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3296 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3297 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3298 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3299 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3300 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3301 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3302 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3303 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3304 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3305 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3306 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3307 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3308 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3309 .features[FEAT_VMX_SECONDARY_CTLS] = 3310 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3311 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3312 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3313 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3314 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3315 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3316 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3317 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3318 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3319 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3320 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3321 .xlevel = 0x80000008, 3322 .model_id = "Intel Xeon Processor (Cooperlake)", 3323 }, 3324 { 3325 .name = "Icelake-Client", 3326 .level = 0xd, 3327 .vendor = CPUID_VENDOR_INTEL, 3328 .family = 6, 3329 .model = 126, 3330 .stepping = 0, 3331 .features[FEAT_1_EDX] = 3332 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3333 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3334 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3335 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3336 CPUID_DE | CPUID_FP87, 3337 .features[FEAT_1_ECX] = 3338 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3339 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3340 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3341 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3342 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3343 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3344 .features[FEAT_8000_0001_EDX] = 3345 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 3346 CPUID_EXT2_SYSCALL, 3347 .features[FEAT_8000_0001_ECX] = 3348 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3349 .features[FEAT_8000_0008_EBX] = 3350 CPUID_8000_0008_EBX_WBNOINVD, 3351 .features[FEAT_7_0_EBX] = 3352 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3353 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3354 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3355 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3356 CPUID_7_0_EBX_SMAP, 3357 .features[FEAT_7_0_ECX] = 3358 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3359 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3360 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3361 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3362 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3363 .features[FEAT_7_0_EDX] = 3364 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3365 /* Missing: XSAVES (not supported by some Linux versions, 3366 * including v4.1 to v4.12). 3367 * KVM doesn't yet expose any XSAVES state save component, 3368 * and the only one defined in Skylake (processor tracing) 3369 * probably will block migration anyway. 3370 */ 3371 .features[FEAT_XSAVE] = 3372 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3373 CPUID_XSAVE_XGETBV1, 3374 .features[FEAT_6_EAX] = 3375 CPUID_6_EAX_ARAT, 3376 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3377 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3378 MSR_VMX_BASIC_TRUE_CTLS, 3379 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3380 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3381 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3382 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3383 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3384 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3385 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3386 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3387 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3388 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3389 .features[FEAT_VMX_EXIT_CTLS] = 3390 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3391 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3392 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3393 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3394 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3395 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3396 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3397 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3398 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3399 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 3400 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3401 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3402 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3403 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3404 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3405 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3406 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3407 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3408 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3409 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3410 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3411 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3412 .features[FEAT_VMX_SECONDARY_CTLS] = 3413 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3414 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3415 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3416 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3417 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3418 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3419 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3420 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3421 .xlevel = 0x80000008, 3422 .model_id = "Intel Core Processor (Icelake)", 3423 .versions = (X86CPUVersionDefinition[]) { 3424 { .version = 1 }, 3425 { 3426 .version = 2, 3427 .alias = "Icelake-Client-noTSX", 3428 .props = (PropValue[]) { 3429 { "hle", "off" }, 3430 { "rtm", "off" }, 3431 { /* end of list */ } 3432 }, 3433 }, 3434 { /* end of list */ } 3435 } 3436 }, 3437 { 3438 .name = "Icelake-Server", 3439 .level = 0xd, 3440 .vendor = CPUID_VENDOR_INTEL, 3441 .family = 6, 3442 .model = 134, 3443 .stepping = 0, 3444 .features[FEAT_1_EDX] = 3445 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3446 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3447 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3448 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3449 CPUID_DE | CPUID_FP87, 3450 .features[FEAT_1_ECX] = 3451 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3452 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3453 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3454 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3455 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3456 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3457 .features[FEAT_8000_0001_EDX] = 3458 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3459 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3460 .features[FEAT_8000_0001_ECX] = 3461 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3462 .features[FEAT_8000_0008_EBX] = 3463 CPUID_8000_0008_EBX_WBNOINVD, 3464 .features[FEAT_7_0_EBX] = 3465 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3466 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3467 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3468 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3469 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3470 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3471 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3472 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3473 .features[FEAT_7_0_ECX] = 3474 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3475 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3476 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3477 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3478 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 3479 .features[FEAT_7_0_EDX] = 3480 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3481 /* Missing: XSAVES (not supported by some Linux versions, 3482 * including v4.1 to v4.12). 3483 * KVM doesn't yet expose any XSAVES state save component, 3484 * and the only one defined in Skylake (processor tracing) 3485 * probably will block migration anyway. 3486 */ 3487 .features[FEAT_XSAVE] = 3488 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3489 CPUID_XSAVE_XGETBV1, 3490 .features[FEAT_6_EAX] = 3491 CPUID_6_EAX_ARAT, 3492 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3493 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3494 MSR_VMX_BASIC_TRUE_CTLS, 3495 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3496 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3497 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3498 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3499 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3500 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3501 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3502 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3503 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3504 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3505 .features[FEAT_VMX_EXIT_CTLS] = 3506 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3507 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3508 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3509 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3510 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3511 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3512 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3513 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3514 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3515 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3516 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3517 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3518 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3519 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3520 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3521 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3522 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3523 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3524 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3525 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3526 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3527 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3528 .features[FEAT_VMX_SECONDARY_CTLS] = 3529 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3530 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3531 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3532 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3533 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3534 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3535 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3536 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3537 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 3538 .xlevel = 0x80000008, 3539 .model_id = "Intel Xeon Processor (Icelake)", 3540 .versions = (X86CPUVersionDefinition[]) { 3541 { .version = 1 }, 3542 { 3543 .version = 2, 3544 .alias = "Icelake-Server-noTSX", 3545 .props = (PropValue[]) { 3546 { "hle", "off" }, 3547 { "rtm", "off" }, 3548 { /* end of list */ } 3549 }, 3550 }, 3551 { /* end of list */ } 3552 } 3553 }, 3554 { 3555 .name = "Denverton", 3556 .level = 21, 3557 .vendor = CPUID_VENDOR_INTEL, 3558 .family = 6, 3559 .model = 95, 3560 .stepping = 1, 3561 .features[FEAT_1_EDX] = 3562 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | 3563 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | 3564 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3565 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | 3566 CPUID_SSE | CPUID_SSE2, 3567 .features[FEAT_1_ECX] = 3568 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3569 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 | 3570 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3571 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | 3572 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND, 3573 .features[FEAT_8000_0001_EDX] = 3574 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | 3575 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, 3576 .features[FEAT_8000_0001_ECX] = 3577 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3578 .features[FEAT_7_0_EBX] = 3579 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS | 3580 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP | 3581 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI, 3582 .features[FEAT_7_0_EDX] = 3583 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | 3584 CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3585 /* 3586 * Missing: XSAVES (not supported by some Linux versions, 3587 * including v4.1 to v4.12). 3588 * KVM doesn't yet expose any XSAVES state save component, 3589 * and the only one defined in Skylake (processor tracing) 3590 * probably will block migration anyway. 3591 */ 3592 .features[FEAT_XSAVE] = 3593 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, 3594 .features[FEAT_6_EAX] = 3595 CPUID_6_EAX_ARAT, 3596 .features[FEAT_ARCH_CAPABILITIES] = 3597 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY, 3598 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3599 MSR_VMX_BASIC_TRUE_CTLS, 3600 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3601 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3602 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3603 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3604 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3605 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3606 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3607 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3608 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3609 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3610 .features[FEAT_VMX_EXIT_CTLS] = 3611 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3612 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3613 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3614 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3615 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3616 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3617 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3618 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3619 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3620 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3621 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3622 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3623 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3624 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3625 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3626 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3627 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3628 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3629 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3630 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3631 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3632 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3633 .features[FEAT_VMX_SECONDARY_CTLS] = 3634 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3635 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3636 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3637 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3638 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3639 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3640 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3641 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3642 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3643 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3644 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3645 .xlevel = 0x80000008, 3646 .model_id = "Intel Atom Processor (Denverton)", 3647 .versions = (X86CPUVersionDefinition[]) { 3648 { .version = 1 }, 3649 { 3650 .version = 2, 3651 .props = (PropValue[]) { 3652 { "monitor", "off" }, 3653 { "mpx", "off" }, 3654 { /* end of list */ }, 3655 }, 3656 }, 3657 { /* end of list */ }, 3658 }, 3659 }, 3660 { 3661 .name = "Snowridge", 3662 .level = 27, 3663 .vendor = CPUID_VENDOR_INTEL, 3664 .family = 6, 3665 .model = 134, 3666 .stepping = 1, 3667 .features[FEAT_1_EDX] = 3668 /* missing: CPUID_PN CPUID_IA64 */ 3669 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 3670 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | 3671 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | 3672 CPUID_CX8 | CPUID_APIC | CPUID_SEP | 3673 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3674 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | 3675 CPUID_MMX | 3676 CPUID_FXSR | CPUID_SSE | CPUID_SSE2, 3677 .features[FEAT_1_ECX] = 3678 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3679 CPUID_EXT_SSSE3 | 3680 CPUID_EXT_CX16 | 3681 CPUID_EXT_SSE41 | 3682 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3683 CPUID_EXT_POPCNT | 3684 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | 3685 CPUID_EXT_RDRAND, 3686 .features[FEAT_8000_0001_EDX] = 3687 CPUID_EXT2_SYSCALL | 3688 CPUID_EXT2_NX | 3689 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3690 CPUID_EXT2_LM, 3691 .features[FEAT_8000_0001_ECX] = 3692 CPUID_EXT3_LAHF_LM | 3693 CPUID_EXT3_3DNOWPREFETCH, 3694 .features[FEAT_7_0_EBX] = 3695 CPUID_7_0_EBX_FSGSBASE | 3696 CPUID_7_0_EBX_SMEP | 3697 CPUID_7_0_EBX_ERMS | 3698 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ 3699 CPUID_7_0_EBX_RDSEED | 3700 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3701 CPUID_7_0_EBX_CLWB | 3702 CPUID_7_0_EBX_SHA_NI, 3703 .features[FEAT_7_0_ECX] = 3704 CPUID_7_0_ECX_UMIP | 3705 /* missing bit 5 */ 3706 CPUID_7_0_ECX_GFNI | 3707 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | 3708 CPUID_7_0_ECX_MOVDIR64B, 3709 .features[FEAT_7_0_EDX] = 3710 CPUID_7_0_EDX_SPEC_CTRL | 3711 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | 3712 CPUID_7_0_EDX_CORE_CAPABILITY, 3713 .features[FEAT_CORE_CAPABILITY] = 3714 MSR_CORE_CAP_SPLIT_LOCK_DETECT, 3715 /* 3716 * Missing: XSAVES (not supported by some Linux versions, 3717 * including v4.1 to v4.12). 3718 * KVM doesn't yet expose any XSAVES state save component, 3719 * and the only one defined in Skylake (processor tracing) 3720 * probably will block migration anyway. 3721 */ 3722 .features[FEAT_XSAVE] = 3723 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3724 CPUID_XSAVE_XGETBV1, 3725 .features[FEAT_6_EAX] = 3726 CPUID_6_EAX_ARAT, 3727 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3728 MSR_VMX_BASIC_TRUE_CTLS, 3729 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3730 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3731 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3732 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3733 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3734 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3735 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3736 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3737 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3738 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3739 .features[FEAT_VMX_EXIT_CTLS] = 3740 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3741 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3742 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3743 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3744 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3745 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3746 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3747 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3748 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3749 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3750 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3751 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3752 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3753 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3754 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3755 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3756 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3757 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3758 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3759 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3760 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3761 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3762 .features[FEAT_VMX_SECONDARY_CTLS] = 3763 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3764 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3765 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3766 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3767 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3768 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3769 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3770 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3771 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3772 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3773 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3774 .xlevel = 0x80000008, 3775 .model_id = "Intel Atom Processor (SnowRidge)", 3776 .versions = (X86CPUVersionDefinition[]) { 3777 { .version = 1 }, 3778 { 3779 .version = 2, 3780 .props = (PropValue[]) { 3781 { "mpx", "off" }, 3782 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" }, 3783 { /* end of list */ }, 3784 }, 3785 }, 3786 { /* end of list */ }, 3787 }, 3788 }, 3789 { 3790 .name = "KnightsMill", 3791 .level = 0xd, 3792 .vendor = CPUID_VENDOR_INTEL, 3793 .family = 6, 3794 .model = 133, 3795 .stepping = 0, 3796 .features[FEAT_1_EDX] = 3797 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 3798 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 3799 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 3800 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 3801 CPUID_PSE | CPUID_DE | CPUID_FP87, 3802 .features[FEAT_1_ECX] = 3803 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3804 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3805 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3806 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3807 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3808 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3809 .features[FEAT_8000_0001_EDX] = 3810 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3811 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3812 .features[FEAT_8000_0001_ECX] = 3813 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3814 .features[FEAT_7_0_EBX] = 3815 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3816 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 3817 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 3818 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 3819 CPUID_7_0_EBX_AVX512ER, 3820 .features[FEAT_7_0_ECX] = 3821 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3822 .features[FEAT_7_0_EDX] = 3823 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 3824 .features[FEAT_XSAVE] = 3825 CPUID_XSAVE_XSAVEOPT, 3826 .features[FEAT_6_EAX] = 3827 CPUID_6_EAX_ARAT, 3828 .xlevel = 0x80000008, 3829 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 3830 }, 3831 { 3832 .name = "Opteron_G1", 3833 .level = 5, 3834 .vendor = CPUID_VENDOR_AMD, 3835 .family = 15, 3836 .model = 6, 3837 .stepping = 1, 3838 .features[FEAT_1_EDX] = 3839 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3840 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3841 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3842 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3843 CPUID_DE | CPUID_FP87, 3844 .features[FEAT_1_ECX] = 3845 CPUID_EXT_SSE3, 3846 .features[FEAT_8000_0001_EDX] = 3847 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3848 .xlevel = 0x80000008, 3849 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 3850 }, 3851 { 3852 .name = "Opteron_G2", 3853 .level = 5, 3854 .vendor = CPUID_VENDOR_AMD, 3855 .family = 15, 3856 .model = 6, 3857 .stepping = 1, 3858 .features[FEAT_1_EDX] = 3859 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3860 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3861 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3862 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3863 CPUID_DE | CPUID_FP87, 3864 .features[FEAT_1_ECX] = 3865 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 3866 .features[FEAT_8000_0001_EDX] = 3867 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3868 .features[FEAT_8000_0001_ECX] = 3869 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3870 .xlevel = 0x80000008, 3871 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 3872 }, 3873 { 3874 .name = "Opteron_G3", 3875 .level = 5, 3876 .vendor = CPUID_VENDOR_AMD, 3877 .family = 16, 3878 .model = 2, 3879 .stepping = 3, 3880 .features[FEAT_1_EDX] = 3881 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3882 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3883 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3884 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3885 CPUID_DE | CPUID_FP87, 3886 .features[FEAT_1_ECX] = 3887 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 3888 CPUID_EXT_SSE3, 3889 .features[FEAT_8000_0001_EDX] = 3890 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 3891 CPUID_EXT2_RDTSCP, 3892 .features[FEAT_8000_0001_ECX] = 3893 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 3894 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3895 .xlevel = 0x80000008, 3896 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 3897 }, 3898 { 3899 .name = "Opteron_G4", 3900 .level = 0xd, 3901 .vendor = CPUID_VENDOR_AMD, 3902 .family = 21, 3903 .model = 1, 3904 .stepping = 2, 3905 .features[FEAT_1_EDX] = 3906 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3907 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3908 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3909 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3910 CPUID_DE | CPUID_FP87, 3911 .features[FEAT_1_ECX] = 3912 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3913 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3914 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 3915 CPUID_EXT_SSE3, 3916 .features[FEAT_8000_0001_EDX] = 3917 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3918 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3919 .features[FEAT_8000_0001_ECX] = 3920 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3921 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3922 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3923 CPUID_EXT3_LAHF_LM, 3924 .features[FEAT_SVM] = 3925 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3926 /* no xsaveopt! */ 3927 .xlevel = 0x8000001A, 3928 .model_id = "AMD Opteron 62xx class CPU", 3929 }, 3930 { 3931 .name = "Opteron_G5", 3932 .level = 0xd, 3933 .vendor = CPUID_VENDOR_AMD, 3934 .family = 21, 3935 .model = 2, 3936 .stepping = 0, 3937 .features[FEAT_1_EDX] = 3938 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3939 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3940 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3941 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3942 CPUID_DE | CPUID_FP87, 3943 .features[FEAT_1_ECX] = 3944 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 3945 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 3946 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 3947 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3948 .features[FEAT_8000_0001_EDX] = 3949 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3950 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3951 .features[FEAT_8000_0001_ECX] = 3952 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3953 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3954 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3955 CPUID_EXT3_LAHF_LM, 3956 .features[FEAT_SVM] = 3957 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3958 /* no xsaveopt! */ 3959 .xlevel = 0x8000001A, 3960 .model_id = "AMD Opteron 63xx class CPU", 3961 }, 3962 { 3963 .name = "EPYC", 3964 .level = 0xd, 3965 .vendor = CPUID_VENDOR_AMD, 3966 .family = 23, 3967 .model = 1, 3968 .stepping = 2, 3969 .features[FEAT_1_EDX] = 3970 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 3971 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 3972 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 3973 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 3974 CPUID_VME | CPUID_FP87, 3975 .features[FEAT_1_ECX] = 3976 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 3977 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 3978 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3979 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 3980 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3981 .features[FEAT_8000_0001_EDX] = 3982 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 3983 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 3984 CPUID_EXT2_SYSCALL, 3985 .features[FEAT_8000_0001_ECX] = 3986 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 3987 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 3988 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 3989 CPUID_EXT3_TOPOEXT, 3990 .features[FEAT_7_0_EBX] = 3991 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3992 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 3993 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3994 CPUID_7_0_EBX_SHA_NI, 3995 .features[FEAT_XSAVE] = 3996 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3997 CPUID_XSAVE_XGETBV1, 3998 .features[FEAT_6_EAX] = 3999 CPUID_6_EAX_ARAT, 4000 .features[FEAT_SVM] = 4001 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4002 .xlevel = 0x8000001E, 4003 .model_id = "AMD EPYC Processor", 4004 .cache_info = &epyc_cache_info, 4005 .versions = (X86CPUVersionDefinition[]) { 4006 { .version = 1 }, 4007 { 4008 .version = 2, 4009 .alias = "EPYC-IBPB", 4010 .props = (PropValue[]) { 4011 { "ibpb", "on" }, 4012 { "model-id", 4013 "AMD EPYC Processor (with IBPB)" }, 4014 { /* end of list */ } 4015 } 4016 }, 4017 { 4018 .version = 3, 4019 .props = (PropValue[]) { 4020 { "ibpb", "on" }, 4021 { "perfctr-core", "on" }, 4022 { "clzero", "on" }, 4023 { "xsaveerptr", "on" }, 4024 { "xsaves", "on" }, 4025 { "model-id", 4026 "AMD EPYC Processor" }, 4027 { /* end of list */ } 4028 } 4029 }, 4030 { /* end of list */ } 4031 } 4032 }, 4033 { 4034 .name = "Dhyana", 4035 .level = 0xd, 4036 .vendor = CPUID_VENDOR_HYGON, 4037 .family = 24, 4038 .model = 0, 4039 .stepping = 1, 4040 .features[FEAT_1_EDX] = 4041 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4042 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4043 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4044 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4045 CPUID_VME | CPUID_FP87, 4046 .features[FEAT_1_ECX] = 4047 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4048 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 4049 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4050 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4051 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 4052 .features[FEAT_8000_0001_EDX] = 4053 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4054 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4055 CPUID_EXT2_SYSCALL, 4056 .features[FEAT_8000_0001_ECX] = 4057 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4058 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4059 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4060 CPUID_EXT3_TOPOEXT, 4061 .features[FEAT_8000_0008_EBX] = 4062 CPUID_8000_0008_EBX_IBPB, 4063 .features[FEAT_7_0_EBX] = 4064 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4065 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4066 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 4067 /* 4068 * Missing: XSAVES (not supported by some Linux versions, 4069 * including v4.1 to v4.12). 4070 * KVM doesn't yet expose any XSAVES state save component. 4071 */ 4072 .features[FEAT_XSAVE] = 4073 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4074 CPUID_XSAVE_XGETBV1, 4075 .features[FEAT_6_EAX] = 4076 CPUID_6_EAX_ARAT, 4077 .features[FEAT_SVM] = 4078 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4079 .xlevel = 0x8000001E, 4080 .model_id = "Hygon Dhyana Processor", 4081 .cache_info = &epyc_cache_info, 4082 }, 4083 { 4084 .name = "EPYC-Rome", 4085 .level = 0xd, 4086 .vendor = CPUID_VENDOR_AMD, 4087 .family = 23, 4088 .model = 49, 4089 .stepping = 0, 4090 .features[FEAT_1_EDX] = 4091 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4092 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4093 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4094 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4095 CPUID_VME | CPUID_FP87, 4096 .features[FEAT_1_ECX] = 4097 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4098 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 4099 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4100 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4101 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 4102 .features[FEAT_8000_0001_EDX] = 4103 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4104 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4105 CPUID_EXT2_SYSCALL, 4106 .features[FEAT_8000_0001_ECX] = 4107 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4108 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4109 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4110 CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, 4111 .features[FEAT_8000_0008_EBX] = 4112 CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | 4113 CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | 4114 CPUID_8000_0008_EBX_STIBP, 4115 .features[FEAT_7_0_EBX] = 4116 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4117 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4118 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 4119 CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB, 4120 .features[FEAT_7_0_ECX] = 4121 CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID, 4122 .features[FEAT_XSAVE] = 4123 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4124 CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, 4125 .features[FEAT_6_EAX] = 4126 CPUID_6_EAX_ARAT, 4127 .features[FEAT_SVM] = 4128 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4129 .xlevel = 0x8000001E, 4130 .model_id = "AMD EPYC-Rome Processor", 4131 .cache_info = &epyc_rome_cache_info, 4132 }, 4133 }; 4134 4135 /* KVM-specific features that are automatically added/removed 4136 * from all CPU models when KVM is enabled. 4137 */ 4138 static PropValue kvm_default_props[] = { 4139 { "kvmclock", "on" }, 4140 { "kvm-nopiodelay", "on" }, 4141 { "kvm-asyncpf", "on" }, 4142 { "kvm-steal-time", "on" }, 4143 { "kvm-pv-eoi", "on" }, 4144 { "kvmclock-stable-bit", "on" }, 4145 { "x2apic", "on" }, 4146 { "acpi", "off" }, 4147 { "monitor", "off" }, 4148 { "svm", "off" }, 4149 { NULL, NULL }, 4150 }; 4151 4152 /* TCG-specific defaults that override all CPU models when using TCG 4153 */ 4154 static PropValue tcg_default_props[] = { 4155 { "vme", "off" }, 4156 { NULL, NULL }, 4157 }; 4158 4159 4160 /* 4161 * We resolve CPU model aliases using -v1 when using "-machine 4162 * none", but this is just for compatibility while libvirt isn't 4163 * adapted to resolve CPU model versions before creating VMs. 4164 * See "Runnability guarantee of CPU models" at * qemu-deprecated.texi. 4165 */ 4166 X86CPUVersion default_cpu_version = 1; 4167 4168 void x86_cpu_set_default_version(X86CPUVersion version) 4169 { 4170 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ 4171 assert(version != CPU_VERSION_AUTO); 4172 default_cpu_version = version; 4173 } 4174 4175 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) 4176 { 4177 int v = 0; 4178 const X86CPUVersionDefinition *vdef = 4179 x86_cpu_def_get_versions(model->cpudef); 4180 while (vdef->version) { 4181 v = vdef->version; 4182 vdef++; 4183 } 4184 return v; 4185 } 4186 4187 /* Return the actual version being used for a specific CPU model */ 4188 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) 4189 { 4190 X86CPUVersion v = model->version; 4191 if (v == CPU_VERSION_AUTO) { 4192 v = default_cpu_version; 4193 } 4194 if (v == CPU_VERSION_LATEST) { 4195 return x86_cpu_model_last_version(model); 4196 } 4197 return v; 4198 } 4199 4200 void x86_cpu_change_kvm_default(const char *prop, const char *value) 4201 { 4202 PropValue *pv; 4203 for (pv = kvm_default_props; pv->prop; pv++) { 4204 if (!strcmp(pv->prop, prop)) { 4205 pv->value = value; 4206 break; 4207 } 4208 } 4209 4210 /* It is valid to call this function only for properties that 4211 * are already present in the kvm_default_props table. 4212 */ 4213 assert(pv->prop); 4214 } 4215 4216 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 4217 bool migratable_only); 4218 4219 static bool lmce_supported(void) 4220 { 4221 uint64_t mce_cap = 0; 4222 4223 #ifdef CONFIG_KVM 4224 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 4225 return false; 4226 } 4227 #endif 4228 4229 return !!(mce_cap & MCG_LMCE_P); 4230 } 4231 4232 #define CPUID_MODEL_ID_SZ 48 4233 4234 /** 4235 * cpu_x86_fill_model_id: 4236 * Get CPUID model ID string from host CPU. 4237 * 4238 * @str should have at least CPUID_MODEL_ID_SZ bytes 4239 * 4240 * The function does NOT add a null terminator to the string 4241 * automatically. 4242 */ 4243 static int cpu_x86_fill_model_id(char *str) 4244 { 4245 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 4246 int i; 4247 4248 for (i = 0; i < 3; i++) { 4249 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 4250 memcpy(str + i * 16 + 0, &eax, 4); 4251 memcpy(str + i * 16 + 4, &ebx, 4); 4252 memcpy(str + i * 16 + 8, &ecx, 4); 4253 memcpy(str + i * 16 + 12, &edx, 4); 4254 } 4255 return 0; 4256 } 4257 4258 static Property max_x86_cpu_properties[] = { 4259 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 4260 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 4261 DEFINE_PROP_END_OF_LIST() 4262 }; 4263 4264 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 4265 { 4266 DeviceClass *dc = DEVICE_CLASS(oc); 4267 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4268 4269 xcc->ordering = 9; 4270 4271 xcc->model_description = 4272 "Enables all features supported by the accelerator in the current host"; 4273 4274 device_class_set_props(dc, max_x86_cpu_properties); 4275 } 4276 4277 static void max_x86_cpu_initfn(Object *obj) 4278 { 4279 X86CPU *cpu = X86_CPU(obj); 4280 CPUX86State *env = &cpu->env; 4281 KVMState *s = kvm_state; 4282 4283 /* We can't fill the features array here because we don't know yet if 4284 * "migratable" is true or false. 4285 */ 4286 cpu->max_features = true; 4287 4288 if (accel_uses_host_cpuid()) { 4289 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 4290 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 4291 int family, model, stepping; 4292 4293 host_vendor_fms(vendor, &family, &model, &stepping); 4294 cpu_x86_fill_model_id(model_id); 4295 4296 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort); 4297 object_property_set_int(OBJECT(cpu), family, "family", &error_abort); 4298 object_property_set_int(OBJECT(cpu), model, "model", &error_abort); 4299 object_property_set_int(OBJECT(cpu), stepping, "stepping", 4300 &error_abort); 4301 object_property_set_str(OBJECT(cpu), model_id, "model-id", 4302 &error_abort); 4303 4304 if (kvm_enabled()) { 4305 env->cpuid_min_level = 4306 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 4307 env->cpuid_min_xlevel = 4308 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 4309 env->cpuid_min_xlevel2 = 4310 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 4311 } else { 4312 env->cpuid_min_level = 4313 hvf_get_supported_cpuid(0x0, 0, R_EAX); 4314 env->cpuid_min_xlevel = 4315 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 4316 env->cpuid_min_xlevel2 = 4317 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 4318 } 4319 4320 if (lmce_supported()) { 4321 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 4322 } 4323 } else { 4324 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 4325 "vendor", &error_abort); 4326 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 4327 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 4328 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 4329 object_property_set_str(OBJECT(cpu), 4330 "QEMU TCG CPU version " QEMU_HW_VERSION, 4331 "model-id", &error_abort); 4332 } 4333 4334 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 4335 } 4336 4337 static const TypeInfo max_x86_cpu_type_info = { 4338 .name = X86_CPU_TYPE_NAME("max"), 4339 .parent = TYPE_X86_CPU, 4340 .instance_init = max_x86_cpu_initfn, 4341 .class_init = max_x86_cpu_class_init, 4342 }; 4343 4344 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 4345 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 4346 { 4347 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4348 4349 xcc->host_cpuid_required = true; 4350 xcc->ordering = 8; 4351 4352 #if defined(CONFIG_KVM) 4353 xcc->model_description = 4354 "KVM processor with all supported host features "; 4355 #elif defined(CONFIG_HVF) 4356 xcc->model_description = 4357 "HVF processor with all supported host features "; 4358 #endif 4359 } 4360 4361 static const TypeInfo host_x86_cpu_type_info = { 4362 .name = X86_CPU_TYPE_NAME("host"), 4363 .parent = X86_CPU_TYPE_NAME("max"), 4364 .class_init = host_x86_cpu_class_init, 4365 }; 4366 4367 #endif 4368 4369 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 4370 { 4371 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 4372 4373 switch (f->type) { 4374 case CPUID_FEATURE_WORD: 4375 { 4376 const char *reg = get_register_name_32(f->cpuid.reg); 4377 assert(reg); 4378 return g_strdup_printf("CPUID.%02XH:%s", 4379 f->cpuid.eax, reg); 4380 } 4381 case MSR_FEATURE_WORD: 4382 return g_strdup_printf("MSR(%02XH)", 4383 f->msr.index); 4384 } 4385 4386 return NULL; 4387 } 4388 4389 static bool x86_cpu_have_filtered_features(X86CPU *cpu) 4390 { 4391 FeatureWord w; 4392 4393 for (w = 0; w < FEATURE_WORDS; w++) { 4394 if (cpu->filtered_features[w]) { 4395 return true; 4396 } 4397 } 4398 4399 return false; 4400 } 4401 4402 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, 4403 const char *verbose_prefix) 4404 { 4405 CPUX86State *env = &cpu->env; 4406 FeatureWordInfo *f = &feature_word_info[w]; 4407 int i; 4408 4409 if (!cpu->force_features) { 4410 env->features[w] &= ~mask; 4411 } 4412 cpu->filtered_features[w] |= mask; 4413 4414 if (!verbose_prefix) { 4415 return; 4416 } 4417 4418 for (i = 0; i < 64; ++i) { 4419 if ((1ULL << i) & mask) { 4420 g_autofree char *feat_word_str = feature_word_description(f, i); 4421 warn_report("%s: %s%s%s [bit %d]", 4422 verbose_prefix, 4423 feat_word_str, 4424 f->feat_names[i] ? "." : "", 4425 f->feat_names[i] ? f->feat_names[i] : "", i); 4426 } 4427 } 4428 } 4429 4430 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 4431 const char *name, void *opaque, 4432 Error **errp) 4433 { 4434 X86CPU *cpu = X86_CPU(obj); 4435 CPUX86State *env = &cpu->env; 4436 int64_t value; 4437 4438 value = (env->cpuid_version >> 8) & 0xf; 4439 if (value == 0xf) { 4440 value += (env->cpuid_version >> 20) & 0xff; 4441 } 4442 visit_type_int(v, name, &value, errp); 4443 } 4444 4445 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 4446 const char *name, void *opaque, 4447 Error **errp) 4448 { 4449 X86CPU *cpu = X86_CPU(obj); 4450 CPUX86State *env = &cpu->env; 4451 const int64_t min = 0; 4452 const int64_t max = 0xff + 0xf; 4453 Error *local_err = NULL; 4454 int64_t value; 4455 4456 visit_type_int(v, name, &value, &local_err); 4457 if (local_err) { 4458 error_propagate(errp, local_err); 4459 return; 4460 } 4461 if (value < min || value > max) { 4462 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4463 name ? name : "null", value, min, max); 4464 return; 4465 } 4466 4467 env->cpuid_version &= ~0xff00f00; 4468 if (value > 0x0f) { 4469 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 4470 } else { 4471 env->cpuid_version |= value << 8; 4472 } 4473 } 4474 4475 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 4476 const char *name, void *opaque, 4477 Error **errp) 4478 { 4479 X86CPU *cpu = X86_CPU(obj); 4480 CPUX86State *env = &cpu->env; 4481 int64_t value; 4482 4483 value = (env->cpuid_version >> 4) & 0xf; 4484 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 4485 visit_type_int(v, name, &value, errp); 4486 } 4487 4488 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 4489 const char *name, void *opaque, 4490 Error **errp) 4491 { 4492 X86CPU *cpu = X86_CPU(obj); 4493 CPUX86State *env = &cpu->env; 4494 const int64_t min = 0; 4495 const int64_t max = 0xff; 4496 Error *local_err = NULL; 4497 int64_t value; 4498 4499 visit_type_int(v, name, &value, &local_err); 4500 if (local_err) { 4501 error_propagate(errp, local_err); 4502 return; 4503 } 4504 if (value < min || value > max) { 4505 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4506 name ? name : "null", value, min, max); 4507 return; 4508 } 4509 4510 env->cpuid_version &= ~0xf00f0; 4511 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 4512 } 4513 4514 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 4515 const char *name, void *opaque, 4516 Error **errp) 4517 { 4518 X86CPU *cpu = X86_CPU(obj); 4519 CPUX86State *env = &cpu->env; 4520 int64_t value; 4521 4522 value = env->cpuid_version & 0xf; 4523 visit_type_int(v, name, &value, errp); 4524 } 4525 4526 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 4527 const char *name, void *opaque, 4528 Error **errp) 4529 { 4530 X86CPU *cpu = X86_CPU(obj); 4531 CPUX86State *env = &cpu->env; 4532 const int64_t min = 0; 4533 const int64_t max = 0xf; 4534 Error *local_err = NULL; 4535 int64_t value; 4536 4537 visit_type_int(v, name, &value, &local_err); 4538 if (local_err) { 4539 error_propagate(errp, local_err); 4540 return; 4541 } 4542 if (value < min || value > max) { 4543 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4544 name ? name : "null", value, min, max); 4545 return; 4546 } 4547 4548 env->cpuid_version &= ~0xf; 4549 env->cpuid_version |= value & 0xf; 4550 } 4551 4552 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 4553 { 4554 X86CPU *cpu = X86_CPU(obj); 4555 CPUX86State *env = &cpu->env; 4556 char *value; 4557 4558 value = g_malloc(CPUID_VENDOR_SZ + 1); 4559 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 4560 env->cpuid_vendor3); 4561 return value; 4562 } 4563 4564 static void x86_cpuid_set_vendor(Object *obj, const char *value, 4565 Error **errp) 4566 { 4567 X86CPU *cpu = X86_CPU(obj); 4568 CPUX86State *env = &cpu->env; 4569 int i; 4570 4571 if (strlen(value) != CPUID_VENDOR_SZ) { 4572 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 4573 return; 4574 } 4575 4576 env->cpuid_vendor1 = 0; 4577 env->cpuid_vendor2 = 0; 4578 env->cpuid_vendor3 = 0; 4579 for (i = 0; i < 4; i++) { 4580 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 4581 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 4582 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 4583 } 4584 } 4585 4586 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 4587 { 4588 X86CPU *cpu = X86_CPU(obj); 4589 CPUX86State *env = &cpu->env; 4590 char *value; 4591 int i; 4592 4593 value = g_malloc(48 + 1); 4594 for (i = 0; i < 48; i++) { 4595 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 4596 } 4597 value[48] = '\0'; 4598 return value; 4599 } 4600 4601 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 4602 Error **errp) 4603 { 4604 X86CPU *cpu = X86_CPU(obj); 4605 CPUX86State *env = &cpu->env; 4606 int c, len, i; 4607 4608 if (model_id == NULL) { 4609 model_id = ""; 4610 } 4611 len = strlen(model_id); 4612 memset(env->cpuid_model, 0, 48); 4613 for (i = 0; i < 48; i++) { 4614 if (i >= len) { 4615 c = '\0'; 4616 } else { 4617 c = (uint8_t)model_id[i]; 4618 } 4619 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 4620 } 4621 } 4622 4623 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 4624 void *opaque, Error **errp) 4625 { 4626 X86CPU *cpu = X86_CPU(obj); 4627 int64_t value; 4628 4629 value = cpu->env.tsc_khz * 1000; 4630 visit_type_int(v, name, &value, errp); 4631 } 4632 4633 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 4634 void *opaque, Error **errp) 4635 { 4636 X86CPU *cpu = X86_CPU(obj); 4637 const int64_t min = 0; 4638 const int64_t max = INT64_MAX; 4639 Error *local_err = NULL; 4640 int64_t value; 4641 4642 visit_type_int(v, name, &value, &local_err); 4643 if (local_err) { 4644 error_propagate(errp, local_err); 4645 return; 4646 } 4647 if (value < min || value > max) { 4648 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4649 name ? name : "null", value, min, max); 4650 return; 4651 } 4652 4653 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 4654 } 4655 4656 /* Generic getter for "feature-words" and "filtered-features" properties */ 4657 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 4658 const char *name, void *opaque, 4659 Error **errp) 4660 { 4661 uint64_t *array = (uint64_t *)opaque; 4662 FeatureWord w; 4663 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 4664 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 4665 X86CPUFeatureWordInfoList *list = NULL; 4666 4667 for (w = 0; w < FEATURE_WORDS; w++) { 4668 FeatureWordInfo *wi = &feature_word_info[w]; 4669 /* 4670 * We didn't have MSR features when "feature-words" was 4671 * introduced. Therefore skipped other type entries. 4672 */ 4673 if (wi->type != CPUID_FEATURE_WORD) { 4674 continue; 4675 } 4676 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 4677 qwi->cpuid_input_eax = wi->cpuid.eax; 4678 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 4679 qwi->cpuid_input_ecx = wi->cpuid.ecx; 4680 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 4681 qwi->features = array[w]; 4682 4683 /* List will be in reverse order, but order shouldn't matter */ 4684 list_entries[w].next = list; 4685 list_entries[w].value = &word_infos[w]; 4686 list = &list_entries[w]; 4687 } 4688 4689 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 4690 } 4691 4692 /* Convert all '_' in a feature string option name to '-', to make feature 4693 * name conform to QOM property naming rule, which uses '-' instead of '_'. 4694 */ 4695 static inline void feat2prop(char *s) 4696 { 4697 while ((s = strchr(s, '_'))) { 4698 *s = '-'; 4699 } 4700 } 4701 4702 /* Return the feature property name for a feature flag bit */ 4703 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 4704 { 4705 const char *name; 4706 /* XSAVE components are automatically enabled by other features, 4707 * so return the original feature name instead 4708 */ 4709 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 4710 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 4711 4712 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 4713 x86_ext_save_areas[comp].bits) { 4714 w = x86_ext_save_areas[comp].feature; 4715 bitnr = ctz32(x86_ext_save_areas[comp].bits); 4716 } 4717 } 4718 4719 assert(bitnr < 64); 4720 assert(w < FEATURE_WORDS); 4721 name = feature_word_info[w].feat_names[bitnr]; 4722 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD)); 4723 return name; 4724 } 4725 4726 /* Compatibily hack to maintain legacy +-feat semantic, 4727 * where +-feat overwrites any feature set by 4728 * feat=on|feat even if the later is parsed after +-feat 4729 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 4730 */ 4731 static GList *plus_features, *minus_features; 4732 4733 static gint compare_string(gconstpointer a, gconstpointer b) 4734 { 4735 return g_strcmp0(a, b); 4736 } 4737 4738 /* Parse "+feature,-feature,feature=foo" CPU feature string 4739 */ 4740 static void x86_cpu_parse_featurestr(const char *typename, char *features, 4741 Error **errp) 4742 { 4743 char *featurestr; /* Single 'key=value" string being parsed */ 4744 static bool cpu_globals_initialized; 4745 bool ambiguous = false; 4746 4747 if (cpu_globals_initialized) { 4748 return; 4749 } 4750 cpu_globals_initialized = true; 4751 4752 if (!features) { 4753 return; 4754 } 4755 4756 for (featurestr = strtok(features, ","); 4757 featurestr; 4758 featurestr = strtok(NULL, ",")) { 4759 const char *name; 4760 const char *val = NULL; 4761 char *eq = NULL; 4762 char num[32]; 4763 GlobalProperty *prop; 4764 4765 /* Compatibility syntax: */ 4766 if (featurestr[0] == '+') { 4767 plus_features = g_list_append(plus_features, 4768 g_strdup(featurestr + 1)); 4769 continue; 4770 } else if (featurestr[0] == '-') { 4771 minus_features = g_list_append(minus_features, 4772 g_strdup(featurestr + 1)); 4773 continue; 4774 } 4775 4776 eq = strchr(featurestr, '='); 4777 if (eq) { 4778 *eq++ = 0; 4779 val = eq; 4780 } else { 4781 val = "on"; 4782 } 4783 4784 feat2prop(featurestr); 4785 name = featurestr; 4786 4787 if (g_list_find_custom(plus_features, name, compare_string)) { 4788 warn_report("Ambiguous CPU model string. " 4789 "Don't mix both \"+%s\" and \"%s=%s\"", 4790 name, name, val); 4791 ambiguous = true; 4792 } 4793 if (g_list_find_custom(minus_features, name, compare_string)) { 4794 warn_report("Ambiguous CPU model string. " 4795 "Don't mix both \"-%s\" and \"%s=%s\"", 4796 name, name, val); 4797 ambiguous = true; 4798 } 4799 4800 /* Special case: */ 4801 if (!strcmp(name, "tsc-freq")) { 4802 int ret; 4803 uint64_t tsc_freq; 4804 4805 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 4806 if (ret < 0 || tsc_freq > INT64_MAX) { 4807 error_setg(errp, "bad numerical value %s", val); 4808 return; 4809 } 4810 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 4811 val = num; 4812 name = "tsc-frequency"; 4813 } 4814 4815 prop = g_new0(typeof(*prop), 1); 4816 prop->driver = typename; 4817 prop->property = g_strdup(name); 4818 prop->value = g_strdup(val); 4819 qdev_prop_register_global(prop); 4820 } 4821 4822 if (ambiguous) { 4823 warn_report("Compatibility of ambiguous CPU model " 4824 "strings won't be kept on future QEMU versions"); 4825 } 4826 } 4827 4828 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 4829 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); 4830 4831 /* Build a list with the name of all features on a feature word array */ 4832 static void x86_cpu_list_feature_names(FeatureWordArray features, 4833 strList **feat_names) 4834 { 4835 FeatureWord w; 4836 strList **next = feat_names; 4837 4838 for (w = 0; w < FEATURE_WORDS; w++) { 4839 uint64_t filtered = features[w]; 4840 int i; 4841 for (i = 0; i < 64; i++) { 4842 if (filtered & (1ULL << i)) { 4843 strList *new = g_new0(strList, 1); 4844 new->value = g_strdup(x86_cpu_feature_name(w, i)); 4845 *next = new; 4846 next = &new->next; 4847 } 4848 } 4849 } 4850 } 4851 4852 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 4853 const char *name, void *opaque, 4854 Error **errp) 4855 { 4856 X86CPU *xc = X86_CPU(obj); 4857 strList *result = NULL; 4858 4859 x86_cpu_list_feature_names(xc->filtered_features, &result); 4860 visit_type_strList(v, "unavailable-features", &result, errp); 4861 } 4862 4863 /* Check for missing features that may prevent the CPU class from 4864 * running using the current machine and accelerator. 4865 */ 4866 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 4867 strList **missing_feats) 4868 { 4869 X86CPU *xc; 4870 Error *err = NULL; 4871 strList **next = missing_feats; 4872 4873 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 4874 strList *new = g_new0(strList, 1); 4875 new->value = g_strdup("kvm"); 4876 *missing_feats = new; 4877 return; 4878 } 4879 4880 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 4881 4882 x86_cpu_expand_features(xc, &err); 4883 if (err) { 4884 /* Errors at x86_cpu_expand_features should never happen, 4885 * but in case it does, just report the model as not 4886 * runnable at all using the "type" property. 4887 */ 4888 strList *new = g_new0(strList, 1); 4889 new->value = g_strdup("type"); 4890 *next = new; 4891 next = &new->next; 4892 } 4893 4894 x86_cpu_filter_features(xc, false); 4895 4896 x86_cpu_list_feature_names(xc->filtered_features, next); 4897 4898 object_unref(OBJECT(xc)); 4899 } 4900 4901 /* Print all cpuid feature names in featureset 4902 */ 4903 static void listflags(GList *features) 4904 { 4905 size_t len = 0; 4906 GList *tmp; 4907 4908 for (tmp = features; tmp; tmp = tmp->next) { 4909 const char *name = tmp->data; 4910 if ((len + strlen(name) + 1) >= 75) { 4911 qemu_printf("\n"); 4912 len = 0; 4913 } 4914 qemu_printf("%s%s", len == 0 ? " " : " ", name); 4915 len += strlen(name) + 1; 4916 } 4917 qemu_printf("\n"); 4918 } 4919 4920 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 4921 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 4922 { 4923 ObjectClass *class_a = (ObjectClass *)a; 4924 ObjectClass *class_b = (ObjectClass *)b; 4925 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 4926 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 4927 int ret; 4928 4929 if (cc_a->ordering != cc_b->ordering) { 4930 ret = cc_a->ordering - cc_b->ordering; 4931 } else { 4932 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a); 4933 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b); 4934 ret = strcmp(name_a, name_b); 4935 } 4936 return ret; 4937 } 4938 4939 static GSList *get_sorted_cpu_model_list(void) 4940 { 4941 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 4942 list = g_slist_sort(list, x86_cpu_list_compare); 4943 return list; 4944 } 4945 4946 static char *x86_cpu_class_get_model_id(X86CPUClass *xc) 4947 { 4948 Object *obj = object_new_with_class(OBJECT_CLASS(xc)); 4949 char *r = object_property_get_str(obj, "model-id", &error_abort); 4950 object_unref(obj); 4951 return r; 4952 } 4953 4954 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) 4955 { 4956 X86CPUVersion version; 4957 4958 if (!cc->model || !cc->model->is_alias) { 4959 return NULL; 4960 } 4961 version = x86_cpu_model_resolve_version(cc->model); 4962 if (version <= 0) { 4963 return NULL; 4964 } 4965 return x86_cpu_versioned_model_name(cc->model->cpudef, version); 4966 } 4967 4968 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 4969 { 4970 ObjectClass *oc = data; 4971 X86CPUClass *cc = X86_CPU_CLASS(oc); 4972 g_autofree char *name = x86_cpu_class_get_model_name(cc); 4973 g_autofree char *desc = g_strdup(cc->model_description); 4974 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc); 4975 g_autofree char *model_id = x86_cpu_class_get_model_id(cc); 4976 4977 if (!desc && alias_of) { 4978 if (cc->model && cc->model->version == CPU_VERSION_AUTO) { 4979 desc = g_strdup("(alias configured by machine type)"); 4980 } else { 4981 desc = g_strdup_printf("(alias of %s)", alias_of); 4982 } 4983 } 4984 if (!desc && cc->model && cc->model->note) { 4985 desc = g_strdup_printf("%s [%s]", model_id, cc->model->note); 4986 } 4987 if (!desc) { 4988 desc = g_strdup_printf("%s", model_id); 4989 } 4990 4991 qemu_printf("x86 %-20s %-58s\n", name, desc); 4992 } 4993 4994 /* list available CPU models and flags */ 4995 void x86_cpu_list(void) 4996 { 4997 int i, j; 4998 GSList *list; 4999 GList *names = NULL; 5000 5001 qemu_printf("Available CPUs:\n"); 5002 list = get_sorted_cpu_model_list(); 5003 g_slist_foreach(list, x86_cpu_list_entry, NULL); 5004 g_slist_free(list); 5005 5006 names = NULL; 5007 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 5008 FeatureWordInfo *fw = &feature_word_info[i]; 5009 for (j = 0; j < 64; j++) { 5010 if (fw->feat_names[j]) { 5011 names = g_list_append(names, (gpointer)fw->feat_names[j]); 5012 } 5013 } 5014 } 5015 5016 names = g_list_sort(names, (GCompareFunc)strcmp); 5017 5018 qemu_printf("\nRecognized CPUID flags:\n"); 5019 listflags(names); 5020 qemu_printf("\n"); 5021 g_list_free(names); 5022 } 5023 5024 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 5025 { 5026 ObjectClass *oc = data; 5027 X86CPUClass *cc = X86_CPU_CLASS(oc); 5028 CpuDefinitionInfoList **cpu_list = user_data; 5029 CpuDefinitionInfoList *entry; 5030 CpuDefinitionInfo *info; 5031 5032 info = g_malloc0(sizeof(*info)); 5033 info->name = x86_cpu_class_get_model_name(cc); 5034 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 5035 info->has_unavailable_features = true; 5036 info->q_typename = g_strdup(object_class_get_name(oc)); 5037 info->migration_safe = cc->migration_safe; 5038 info->has_migration_safe = true; 5039 info->q_static = cc->static_model; 5040 /* 5041 * Old machine types won't report aliases, so that alias translation 5042 * doesn't break compatibility with previous QEMU versions. 5043 */ 5044 if (default_cpu_version != CPU_VERSION_LEGACY) { 5045 info->alias_of = x86_cpu_class_get_alias_of(cc); 5046 info->has_alias_of = !!info->alias_of; 5047 } 5048 5049 entry = g_malloc0(sizeof(*entry)); 5050 entry->value = info; 5051 entry->next = *cpu_list; 5052 *cpu_list = entry; 5053 } 5054 5055 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 5056 { 5057 CpuDefinitionInfoList *cpu_list = NULL; 5058 GSList *list = get_sorted_cpu_model_list(); 5059 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 5060 g_slist_free(list); 5061 return cpu_list; 5062 } 5063 5064 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 5065 bool migratable_only) 5066 { 5067 FeatureWordInfo *wi = &feature_word_info[w]; 5068 uint64_t r = 0; 5069 5070 if (kvm_enabled()) { 5071 switch (wi->type) { 5072 case CPUID_FEATURE_WORD: 5073 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 5074 wi->cpuid.ecx, 5075 wi->cpuid.reg); 5076 break; 5077 case MSR_FEATURE_WORD: 5078 r = kvm_arch_get_supported_msr_feature(kvm_state, 5079 wi->msr.index); 5080 break; 5081 } 5082 } else if (hvf_enabled()) { 5083 if (wi->type != CPUID_FEATURE_WORD) { 5084 return 0; 5085 } 5086 r = hvf_get_supported_cpuid(wi->cpuid.eax, 5087 wi->cpuid.ecx, 5088 wi->cpuid.reg); 5089 } else if (tcg_enabled()) { 5090 r = wi->tcg_features; 5091 } else { 5092 return ~0; 5093 } 5094 if (migratable_only) { 5095 r &= x86_cpu_get_migratable_flags(w); 5096 } 5097 return r; 5098 } 5099 5100 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 5101 { 5102 PropValue *pv; 5103 for (pv = props; pv->prop; pv++) { 5104 if (!pv->value) { 5105 continue; 5106 } 5107 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 5108 &error_abort); 5109 } 5110 } 5111 5112 /* Apply properties for the CPU model version specified in model */ 5113 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) 5114 { 5115 const X86CPUVersionDefinition *vdef; 5116 X86CPUVersion version = x86_cpu_model_resolve_version(model); 5117 5118 if (version == CPU_VERSION_LEGACY) { 5119 return; 5120 } 5121 5122 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { 5123 PropValue *p; 5124 5125 for (p = vdef->props; p && p->prop; p++) { 5126 object_property_parse(OBJECT(cpu), p->value, p->prop, 5127 &error_abort); 5128 } 5129 5130 if (vdef->version == version) { 5131 break; 5132 } 5133 } 5134 5135 /* 5136 * If we reached the end of the list, version number was invalid 5137 */ 5138 assert(vdef->version == version); 5139 } 5140 5141 /* Load data from X86CPUDefinition into a X86CPU object 5142 */ 5143 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp) 5144 { 5145 X86CPUDefinition *def = model->cpudef; 5146 CPUX86State *env = &cpu->env; 5147 const char *vendor; 5148 char host_vendor[CPUID_VENDOR_SZ + 1]; 5149 FeatureWord w; 5150 5151 /*NOTE: any property set by this function should be returned by 5152 * x86_cpu_static_props(), so static expansion of 5153 * query-cpu-model-expansion is always complete. 5154 */ 5155 5156 /* CPU models only set _minimum_ values for level/xlevel: */ 5157 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp); 5158 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp); 5159 5160 object_property_set_int(OBJECT(cpu), def->family, "family", errp); 5161 object_property_set_int(OBJECT(cpu), def->model, "model", errp); 5162 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp); 5163 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); 5164 for (w = 0; w < FEATURE_WORDS; w++) { 5165 env->features[w] = def->features[w]; 5166 } 5167 5168 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 5169 cpu->legacy_cache = !def->cache_info; 5170 5171 /* Special cases not set in the X86CPUDefinition structs: */ 5172 /* TODO: in-kernel irqchip for hvf */ 5173 if (kvm_enabled()) { 5174 if (!kvm_irqchip_in_kernel()) { 5175 x86_cpu_change_kvm_default("x2apic", "off"); 5176 } 5177 5178 x86_cpu_apply_props(cpu, kvm_default_props); 5179 } else if (tcg_enabled()) { 5180 x86_cpu_apply_props(cpu, tcg_default_props); 5181 } 5182 5183 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 5184 5185 /* sysenter isn't supported in compatibility mode on AMD, 5186 * syscall isn't supported in compatibility mode on Intel. 5187 * Normally we advertise the actual CPU vendor, but you can 5188 * override this using the 'vendor' property if you want to use 5189 * KVM's sysenter/syscall emulation in compatibility mode and 5190 * when doing cross vendor migration 5191 */ 5192 vendor = def->vendor; 5193 if (accel_uses_host_cpuid()) { 5194 uint32_t ebx = 0, ecx = 0, edx = 0; 5195 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 5196 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 5197 vendor = host_vendor; 5198 } 5199 5200 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp); 5201 5202 x86_cpu_apply_version_props(cpu, model); 5203 } 5204 5205 #ifndef CONFIG_USER_ONLY 5206 /* Return a QDict containing keys for all properties that can be included 5207 * in static expansion of CPU models. All properties set by x86_cpu_load_model() 5208 * must be included in the dictionary. 5209 */ 5210 static QDict *x86_cpu_static_props(void) 5211 { 5212 FeatureWord w; 5213 int i; 5214 static const char *props[] = { 5215 "min-level", 5216 "min-xlevel", 5217 "family", 5218 "model", 5219 "stepping", 5220 "model-id", 5221 "vendor", 5222 "lmce", 5223 NULL, 5224 }; 5225 static QDict *d; 5226 5227 if (d) { 5228 return d; 5229 } 5230 5231 d = qdict_new(); 5232 for (i = 0; props[i]; i++) { 5233 qdict_put_null(d, props[i]); 5234 } 5235 5236 for (w = 0; w < FEATURE_WORDS; w++) { 5237 FeatureWordInfo *fi = &feature_word_info[w]; 5238 int bit; 5239 for (bit = 0; bit < 64; bit++) { 5240 if (!fi->feat_names[bit]) { 5241 continue; 5242 } 5243 qdict_put_null(d, fi->feat_names[bit]); 5244 } 5245 } 5246 5247 return d; 5248 } 5249 5250 /* Add an entry to @props dict, with the value for property. */ 5251 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 5252 { 5253 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 5254 &error_abort); 5255 5256 qdict_put_obj(props, prop, value); 5257 } 5258 5259 /* Convert CPU model data from X86CPU object to a property dictionary 5260 * that can recreate exactly the same CPU model. 5261 */ 5262 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 5263 { 5264 QDict *sprops = x86_cpu_static_props(); 5265 const QDictEntry *e; 5266 5267 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 5268 const char *prop = qdict_entry_key(e); 5269 x86_cpu_expand_prop(cpu, props, prop); 5270 } 5271 } 5272 5273 /* Convert CPU model data from X86CPU object to a property dictionary 5274 * that can recreate exactly the same CPU model, including every 5275 * writeable QOM property. 5276 */ 5277 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 5278 { 5279 ObjectPropertyIterator iter; 5280 ObjectProperty *prop; 5281 5282 object_property_iter_init(&iter, OBJECT(cpu)); 5283 while ((prop = object_property_iter_next(&iter))) { 5284 /* skip read-only or write-only properties */ 5285 if (!prop->get || !prop->set) { 5286 continue; 5287 } 5288 5289 /* "hotplugged" is the only property that is configurable 5290 * on the command-line but will be set differently on CPUs 5291 * created using "-cpu ... -smp ..." and by CPUs created 5292 * on the fly by x86_cpu_from_model() for querying. Skip it. 5293 */ 5294 if (!strcmp(prop->name, "hotplugged")) { 5295 continue; 5296 } 5297 x86_cpu_expand_prop(cpu, props, prop->name); 5298 } 5299 } 5300 5301 static void object_apply_props(Object *obj, QDict *props, Error **errp) 5302 { 5303 const QDictEntry *prop; 5304 Error *err = NULL; 5305 5306 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 5307 object_property_set_qobject(obj, qdict_entry_value(prop), 5308 qdict_entry_key(prop), &err); 5309 if (err) { 5310 break; 5311 } 5312 } 5313 5314 error_propagate(errp, err); 5315 } 5316 5317 /* Create X86CPU object according to model+props specification */ 5318 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 5319 { 5320 X86CPU *xc = NULL; 5321 X86CPUClass *xcc; 5322 Error *err = NULL; 5323 5324 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 5325 if (xcc == NULL) { 5326 error_setg(&err, "CPU model '%s' not found", model); 5327 goto out; 5328 } 5329 5330 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 5331 if (props) { 5332 object_apply_props(OBJECT(xc), props, &err); 5333 if (err) { 5334 goto out; 5335 } 5336 } 5337 5338 x86_cpu_expand_features(xc, &err); 5339 if (err) { 5340 goto out; 5341 } 5342 5343 out: 5344 if (err) { 5345 error_propagate(errp, err); 5346 object_unref(OBJECT(xc)); 5347 xc = NULL; 5348 } 5349 return xc; 5350 } 5351 5352 CpuModelExpansionInfo * 5353 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 5354 CpuModelInfo *model, 5355 Error **errp) 5356 { 5357 X86CPU *xc = NULL; 5358 Error *err = NULL; 5359 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 5360 QDict *props = NULL; 5361 const char *base_name; 5362 5363 xc = x86_cpu_from_model(model->name, 5364 model->has_props ? 5365 qobject_to(QDict, model->props) : 5366 NULL, &err); 5367 if (err) { 5368 goto out; 5369 } 5370 5371 props = qdict_new(); 5372 ret->model = g_new0(CpuModelInfo, 1); 5373 ret->model->props = QOBJECT(props); 5374 ret->model->has_props = true; 5375 5376 switch (type) { 5377 case CPU_MODEL_EXPANSION_TYPE_STATIC: 5378 /* Static expansion will be based on "base" only */ 5379 base_name = "base"; 5380 x86_cpu_to_dict(xc, props); 5381 break; 5382 case CPU_MODEL_EXPANSION_TYPE_FULL: 5383 /* As we don't return every single property, full expansion needs 5384 * to keep the original model name+props, and add extra 5385 * properties on top of that. 5386 */ 5387 base_name = model->name; 5388 x86_cpu_to_dict_full(xc, props); 5389 break; 5390 default: 5391 error_setg(&err, "Unsupported expansion type"); 5392 goto out; 5393 } 5394 5395 x86_cpu_to_dict(xc, props); 5396 5397 ret->model->name = g_strdup(base_name); 5398 5399 out: 5400 object_unref(OBJECT(xc)); 5401 if (err) { 5402 error_propagate(errp, err); 5403 qapi_free_CpuModelExpansionInfo(ret); 5404 ret = NULL; 5405 } 5406 return ret; 5407 } 5408 #endif /* !CONFIG_USER_ONLY */ 5409 5410 static gchar *x86_gdb_arch_name(CPUState *cs) 5411 { 5412 #ifdef TARGET_X86_64 5413 return g_strdup("i386:x86-64"); 5414 #else 5415 return g_strdup("i386"); 5416 #endif 5417 } 5418 5419 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 5420 { 5421 X86CPUModel *model = data; 5422 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5423 5424 xcc->model = model; 5425 xcc->migration_safe = true; 5426 } 5427 5428 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) 5429 { 5430 g_autofree char *typename = x86_cpu_type_name(name); 5431 TypeInfo ti = { 5432 .name = typename, 5433 .parent = TYPE_X86_CPU, 5434 .class_init = x86_cpu_cpudef_class_init, 5435 .class_data = model, 5436 }; 5437 5438 type_register(&ti); 5439 } 5440 5441 static void x86_register_cpudef_types(X86CPUDefinition *def) 5442 { 5443 X86CPUModel *m; 5444 const X86CPUVersionDefinition *vdef; 5445 5446 /* AMD aliases are handled at runtime based on CPUID vendor, so 5447 * they shouldn't be set on the CPU model table. 5448 */ 5449 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 5450 /* catch mistakes instead of silently truncating model_id when too long */ 5451 assert(def->model_id && strlen(def->model_id) <= 48); 5452 5453 /* Unversioned model: */ 5454 m = g_new0(X86CPUModel, 1); 5455 m->cpudef = def; 5456 m->version = CPU_VERSION_AUTO; 5457 m->is_alias = true; 5458 x86_register_cpu_model_type(def->name, m); 5459 5460 /* Versioned models: */ 5461 5462 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { 5463 X86CPUModel *m = g_new0(X86CPUModel, 1); 5464 g_autofree char *name = 5465 x86_cpu_versioned_model_name(def, vdef->version); 5466 m->cpudef = def; 5467 m->version = vdef->version; 5468 m->note = vdef->note; 5469 x86_register_cpu_model_type(name, m); 5470 5471 if (vdef->alias) { 5472 X86CPUModel *am = g_new0(X86CPUModel, 1); 5473 am->cpudef = def; 5474 am->version = vdef->version; 5475 am->is_alias = true; 5476 x86_register_cpu_model_type(vdef->alias, am); 5477 } 5478 } 5479 5480 } 5481 5482 #if !defined(CONFIG_USER_ONLY) 5483 5484 void cpu_clear_apic_feature(CPUX86State *env) 5485 { 5486 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 5487 } 5488 5489 #endif /* !CONFIG_USER_ONLY */ 5490 5491 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 5492 uint32_t *eax, uint32_t *ebx, 5493 uint32_t *ecx, uint32_t *edx) 5494 { 5495 X86CPU *cpu = env_archcpu(env); 5496 CPUState *cs = env_cpu(env); 5497 uint32_t die_offset; 5498 uint32_t limit; 5499 uint32_t signature[3]; 5500 5501 /* Calculate & apply limits for different index ranges */ 5502 if (index >= 0xC0000000) { 5503 limit = env->cpuid_xlevel2; 5504 } else if (index >= 0x80000000) { 5505 limit = env->cpuid_xlevel; 5506 } else if (index >= 0x40000000) { 5507 limit = 0x40000001; 5508 } else { 5509 limit = env->cpuid_level; 5510 } 5511 5512 if (index > limit) { 5513 /* Intel documentation states that invalid EAX input will 5514 * return the same information as EAX=cpuid_level 5515 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 5516 */ 5517 index = env->cpuid_level; 5518 } 5519 5520 switch(index) { 5521 case 0: 5522 *eax = env->cpuid_level; 5523 *ebx = env->cpuid_vendor1; 5524 *edx = env->cpuid_vendor2; 5525 *ecx = env->cpuid_vendor3; 5526 break; 5527 case 1: 5528 *eax = env->cpuid_version; 5529 *ebx = (cpu->apic_id << 24) | 5530 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 5531 *ecx = env->features[FEAT_1_ECX]; 5532 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 5533 *ecx |= CPUID_EXT_OSXSAVE; 5534 } 5535 *edx = env->features[FEAT_1_EDX]; 5536 if (cs->nr_cores * cs->nr_threads > 1) { 5537 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 5538 *edx |= CPUID_HT; 5539 } 5540 break; 5541 case 2: 5542 /* cache info: needed for Pentium Pro compatibility */ 5543 if (cpu->cache_info_passthrough) { 5544 host_cpuid(index, 0, eax, ebx, ecx, edx); 5545 break; 5546 } 5547 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 5548 *ebx = 0; 5549 if (!cpu->enable_l3_cache) { 5550 *ecx = 0; 5551 } else { 5552 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 5553 } 5554 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 5555 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 5556 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 5557 break; 5558 case 4: 5559 /* cache info: needed for Core compatibility */ 5560 if (cpu->cache_info_passthrough) { 5561 host_cpuid(index, count, eax, ebx, ecx, edx); 5562 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 5563 *eax &= ~0xFC000000; 5564 if ((*eax & 31) && cs->nr_cores > 1) { 5565 *eax |= (cs->nr_cores - 1) << 26; 5566 } 5567 } else { 5568 *eax = 0; 5569 switch (count) { 5570 case 0: /* L1 dcache info */ 5571 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 5572 1, cs->nr_cores, 5573 eax, ebx, ecx, edx); 5574 break; 5575 case 1: /* L1 icache info */ 5576 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 5577 1, cs->nr_cores, 5578 eax, ebx, ecx, edx); 5579 break; 5580 case 2: /* L2 cache info */ 5581 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 5582 cs->nr_threads, cs->nr_cores, 5583 eax, ebx, ecx, edx); 5584 break; 5585 case 3: /* L3 cache info */ 5586 die_offset = apicid_die_offset(env->nr_dies, 5587 cs->nr_cores, cs->nr_threads); 5588 if (cpu->enable_l3_cache) { 5589 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 5590 (1 << die_offset), cs->nr_cores, 5591 eax, ebx, ecx, edx); 5592 break; 5593 } 5594 /* fall through */ 5595 default: /* end of info */ 5596 *eax = *ebx = *ecx = *edx = 0; 5597 break; 5598 } 5599 } 5600 break; 5601 case 5: 5602 /* MONITOR/MWAIT Leaf */ 5603 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 5604 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 5605 *ecx = cpu->mwait.ecx; /* flags */ 5606 *edx = cpu->mwait.edx; /* mwait substates */ 5607 break; 5608 case 6: 5609 /* Thermal and Power Leaf */ 5610 *eax = env->features[FEAT_6_EAX]; 5611 *ebx = 0; 5612 *ecx = 0; 5613 *edx = 0; 5614 break; 5615 case 7: 5616 /* Structured Extended Feature Flags Enumeration Leaf */ 5617 if (count == 0) { 5618 /* Maximum ECX value for sub-leaves */ 5619 *eax = env->cpuid_level_func7; 5620 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 5621 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 5622 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 5623 *ecx |= CPUID_7_0_ECX_OSPKE; 5624 } 5625 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 5626 } else if (count == 1) { 5627 *eax = env->features[FEAT_7_1_EAX]; 5628 *ebx = 0; 5629 *ecx = 0; 5630 *edx = 0; 5631 } else { 5632 *eax = 0; 5633 *ebx = 0; 5634 *ecx = 0; 5635 *edx = 0; 5636 } 5637 break; 5638 case 9: 5639 /* Direct Cache Access Information Leaf */ 5640 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 5641 *ebx = 0; 5642 *ecx = 0; 5643 *edx = 0; 5644 break; 5645 case 0xA: 5646 /* Architectural Performance Monitoring Leaf */ 5647 if (kvm_enabled() && cpu->enable_pmu) { 5648 KVMState *s = cs->kvm_state; 5649 5650 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 5651 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 5652 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 5653 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 5654 } else if (hvf_enabled() && cpu->enable_pmu) { 5655 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 5656 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 5657 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 5658 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 5659 } else { 5660 *eax = 0; 5661 *ebx = 0; 5662 *ecx = 0; 5663 *edx = 0; 5664 } 5665 break; 5666 case 0xB: 5667 /* Extended Topology Enumeration Leaf */ 5668 if (!cpu->enable_cpuid_0xb) { 5669 *eax = *ebx = *ecx = *edx = 0; 5670 break; 5671 } 5672 5673 *ecx = count & 0xff; 5674 *edx = cpu->apic_id; 5675 5676 switch (count) { 5677 case 0: 5678 *eax = apicid_core_offset(env->nr_dies, 5679 cs->nr_cores, cs->nr_threads); 5680 *ebx = cs->nr_threads; 5681 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5682 break; 5683 case 1: 5684 *eax = apicid_pkg_offset(env->nr_dies, 5685 cs->nr_cores, cs->nr_threads); 5686 *ebx = cs->nr_cores * cs->nr_threads; 5687 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5688 break; 5689 default: 5690 *eax = 0; 5691 *ebx = 0; 5692 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5693 } 5694 5695 assert(!(*eax & ~0x1f)); 5696 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5697 break; 5698 case 0x1F: 5699 /* V2 Extended Topology Enumeration Leaf */ 5700 if (env->nr_dies < 2) { 5701 *eax = *ebx = *ecx = *edx = 0; 5702 break; 5703 } 5704 5705 *ecx = count & 0xff; 5706 *edx = cpu->apic_id; 5707 switch (count) { 5708 case 0: 5709 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores, 5710 cs->nr_threads); 5711 *ebx = cs->nr_threads; 5712 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5713 break; 5714 case 1: 5715 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores, 5716 cs->nr_threads); 5717 *ebx = cs->nr_cores * cs->nr_threads; 5718 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5719 break; 5720 case 2: 5721 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores, 5722 cs->nr_threads); 5723 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; 5724 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; 5725 break; 5726 default: 5727 *eax = 0; 5728 *ebx = 0; 5729 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5730 } 5731 assert(!(*eax & ~0x1f)); 5732 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5733 break; 5734 case 0xD: { 5735 /* Processor Extended State */ 5736 *eax = 0; 5737 *ebx = 0; 5738 *ecx = 0; 5739 *edx = 0; 5740 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 5741 break; 5742 } 5743 5744 if (count == 0) { 5745 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 5746 *eax = env->features[FEAT_XSAVE_COMP_LO]; 5747 *edx = env->features[FEAT_XSAVE_COMP_HI]; 5748 /* 5749 * The initial value of xcr0 and ebx == 0, On host without kvm 5750 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 5751 * even through guest update xcr0, this will crash some legacy guest 5752 * (e.g., CentOS 6), So set ebx == ecx to workaroud it. 5753 */ 5754 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0); 5755 } else if (count == 1) { 5756 *eax = env->features[FEAT_XSAVE]; 5757 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 5758 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 5759 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 5760 *eax = esa->size; 5761 *ebx = esa->offset; 5762 } 5763 } 5764 break; 5765 } 5766 case 0x14: { 5767 /* Intel Processor Trace Enumeration */ 5768 *eax = 0; 5769 *ebx = 0; 5770 *ecx = 0; 5771 *edx = 0; 5772 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 5773 !kvm_enabled()) { 5774 break; 5775 } 5776 5777 if (count == 0) { 5778 *eax = INTEL_PT_MAX_SUBLEAF; 5779 *ebx = INTEL_PT_MINIMAL_EBX; 5780 *ecx = INTEL_PT_MINIMAL_ECX; 5781 } else if (count == 1) { 5782 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 5783 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 5784 } 5785 break; 5786 } 5787 case 0x40000000: 5788 /* 5789 * CPUID code in kvm_arch_init_vcpu() ignores stuff 5790 * set here, but we restrict to TCG none the less. 5791 */ 5792 if (tcg_enabled() && cpu->expose_tcg) { 5793 memcpy(signature, "TCGTCGTCGTCG", 12); 5794 *eax = 0x40000001; 5795 *ebx = signature[0]; 5796 *ecx = signature[1]; 5797 *edx = signature[2]; 5798 } else { 5799 *eax = 0; 5800 *ebx = 0; 5801 *ecx = 0; 5802 *edx = 0; 5803 } 5804 break; 5805 case 0x40000001: 5806 *eax = 0; 5807 *ebx = 0; 5808 *ecx = 0; 5809 *edx = 0; 5810 break; 5811 case 0x80000000: 5812 *eax = env->cpuid_xlevel; 5813 *ebx = env->cpuid_vendor1; 5814 *edx = env->cpuid_vendor2; 5815 *ecx = env->cpuid_vendor3; 5816 break; 5817 case 0x80000001: 5818 *eax = env->cpuid_version; 5819 *ebx = 0; 5820 *ecx = env->features[FEAT_8000_0001_ECX]; 5821 *edx = env->features[FEAT_8000_0001_EDX]; 5822 5823 /* The Linux kernel checks for the CMPLegacy bit and 5824 * discards multiple thread information if it is set. 5825 * So don't set it here for Intel to make Linux guests happy. 5826 */ 5827 if (cs->nr_cores * cs->nr_threads > 1) { 5828 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 5829 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 5830 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 5831 *ecx |= 1 << 1; /* CmpLegacy bit */ 5832 } 5833 } 5834 break; 5835 case 0x80000002: 5836 case 0x80000003: 5837 case 0x80000004: 5838 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 5839 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 5840 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 5841 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 5842 break; 5843 case 0x80000005: 5844 /* cache info (L1 cache) */ 5845 if (cpu->cache_info_passthrough) { 5846 host_cpuid(index, 0, eax, ebx, ecx, edx); 5847 break; 5848 } 5849 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ 5850 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 5851 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ 5852 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 5853 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 5854 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 5855 break; 5856 case 0x80000006: 5857 /* cache info (L2 cache) */ 5858 if (cpu->cache_info_passthrough) { 5859 host_cpuid(index, 0, eax, ebx, ecx, edx); 5860 break; 5861 } 5862 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ 5863 (L2_DTLB_2M_ENTRIES << 16) | \ 5864 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ 5865 (L2_ITLB_2M_ENTRIES); 5866 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ 5867 (L2_DTLB_4K_ENTRIES << 16) | \ 5868 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ 5869 (L2_ITLB_4K_ENTRIES); 5870 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 5871 cpu->enable_l3_cache ? 5872 env->cache_info_amd.l3_cache : NULL, 5873 ecx, edx); 5874 break; 5875 case 0x80000007: 5876 *eax = 0; 5877 *ebx = 0; 5878 *ecx = 0; 5879 *edx = env->features[FEAT_8000_0007_EDX]; 5880 break; 5881 case 0x80000008: 5882 /* virtual & phys address size in low 2 bytes. */ 5883 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5884 /* 64 bit processor */ 5885 *eax = cpu->phys_bits; /* configurable physical bits */ 5886 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 5887 *eax |= 0x00003900; /* 57 bits virtual */ 5888 } else { 5889 *eax |= 0x00003000; /* 48 bits virtual */ 5890 } 5891 } else { 5892 *eax = cpu->phys_bits; 5893 } 5894 *ebx = env->features[FEAT_8000_0008_EBX]; 5895 *ecx = 0; 5896 *edx = 0; 5897 if (cs->nr_cores * cs->nr_threads > 1) { 5898 *ecx |= (cs->nr_cores * cs->nr_threads) - 1; 5899 } 5900 break; 5901 case 0x8000000A: 5902 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5903 *eax = 0x00000001; /* SVM Revision */ 5904 *ebx = 0x00000010; /* nr of ASIDs */ 5905 *ecx = 0; 5906 *edx = env->features[FEAT_SVM]; /* optional features */ 5907 } else { 5908 *eax = 0; 5909 *ebx = 0; 5910 *ecx = 0; 5911 *edx = 0; 5912 } 5913 break; 5914 case 0x8000001D: 5915 *eax = 0; 5916 if (cpu->cache_info_passthrough) { 5917 host_cpuid(index, count, eax, ebx, ecx, edx); 5918 break; 5919 } 5920 switch (count) { 5921 case 0: /* L1 dcache info */ 5922 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs, 5923 eax, ebx, ecx, edx); 5924 break; 5925 case 1: /* L1 icache info */ 5926 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs, 5927 eax, ebx, ecx, edx); 5928 break; 5929 case 2: /* L2 cache info */ 5930 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs, 5931 eax, ebx, ecx, edx); 5932 break; 5933 case 3: /* L3 cache info */ 5934 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs, 5935 eax, ebx, ecx, edx); 5936 break; 5937 default: /* end of info */ 5938 *eax = *ebx = *ecx = *edx = 0; 5939 break; 5940 } 5941 break; 5942 case 0x8000001E: 5943 assert(cpu->core_id <= 255); 5944 encode_topo_cpuid8000001e(cs, cpu, 5945 eax, ebx, ecx, edx); 5946 break; 5947 case 0xC0000000: 5948 *eax = env->cpuid_xlevel2; 5949 *ebx = 0; 5950 *ecx = 0; 5951 *edx = 0; 5952 break; 5953 case 0xC0000001: 5954 /* Support for VIA CPU's CPUID instruction */ 5955 *eax = env->cpuid_version; 5956 *ebx = 0; 5957 *ecx = 0; 5958 *edx = env->features[FEAT_C000_0001_EDX]; 5959 break; 5960 case 0xC0000002: 5961 case 0xC0000003: 5962 case 0xC0000004: 5963 /* Reserved for the future, and now filled with zero */ 5964 *eax = 0; 5965 *ebx = 0; 5966 *ecx = 0; 5967 *edx = 0; 5968 break; 5969 case 0x8000001F: 5970 *eax = sev_enabled() ? 0x2 : 0; 5971 *ebx = sev_get_cbit_position(); 5972 *ebx |= sev_get_reduced_phys_bits() << 6; 5973 *ecx = 0; 5974 *edx = 0; 5975 break; 5976 default: 5977 /* reserved values: zero */ 5978 *eax = 0; 5979 *ebx = 0; 5980 *ecx = 0; 5981 *edx = 0; 5982 break; 5983 } 5984 } 5985 5986 static void x86_cpu_reset(DeviceState *dev) 5987 { 5988 CPUState *s = CPU(dev); 5989 X86CPU *cpu = X86_CPU(s); 5990 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 5991 CPUX86State *env = &cpu->env; 5992 target_ulong cr4; 5993 uint64_t xcr0; 5994 int i; 5995 5996 xcc->parent_reset(dev); 5997 5998 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 5999 6000 env->old_exception = -1; 6001 6002 /* init to reset state */ 6003 6004 env->hflags2 |= HF2_GIF_MASK; 6005 6006 cpu_x86_update_cr0(env, 0x60000010); 6007 env->a20_mask = ~0x0; 6008 env->smbase = 0x30000; 6009 env->msr_smi_count = 0; 6010 6011 env->idt.limit = 0xffff; 6012 env->gdt.limit = 0xffff; 6013 env->ldt.limit = 0xffff; 6014 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 6015 env->tr.limit = 0xffff; 6016 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 6017 6018 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 6019 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 6020 DESC_R_MASK | DESC_A_MASK); 6021 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 6022 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6023 DESC_A_MASK); 6024 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 6025 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6026 DESC_A_MASK); 6027 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 6028 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6029 DESC_A_MASK); 6030 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 6031 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6032 DESC_A_MASK); 6033 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 6034 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6035 DESC_A_MASK); 6036 6037 env->eip = 0xfff0; 6038 env->regs[R_EDX] = env->cpuid_version; 6039 6040 env->eflags = 0x2; 6041 6042 /* FPU init */ 6043 for (i = 0; i < 8; i++) { 6044 env->fptags[i] = 1; 6045 } 6046 cpu_set_fpuc(env, 0x37f); 6047 6048 env->mxcsr = 0x1f80; 6049 /* All units are in INIT state. */ 6050 env->xstate_bv = 0; 6051 6052 env->pat = 0x0007040600070406ULL; 6053 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 6054 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 6055 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 6056 } 6057 6058 memset(env->dr, 0, sizeof(env->dr)); 6059 env->dr[6] = DR6_FIXED_1; 6060 env->dr[7] = DR7_FIXED_1; 6061 cpu_breakpoint_remove_all(s, BP_CPU); 6062 cpu_watchpoint_remove_all(s, BP_CPU); 6063 6064 cr4 = 0; 6065 xcr0 = XSTATE_FP_MASK; 6066 6067 #ifdef CONFIG_USER_ONLY 6068 /* Enable all the features for user-mode. */ 6069 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 6070 xcr0 |= XSTATE_SSE_MASK; 6071 } 6072 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6073 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6074 if (env->features[esa->feature] & esa->bits) { 6075 xcr0 |= 1ull << i; 6076 } 6077 } 6078 6079 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 6080 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 6081 } 6082 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 6083 cr4 |= CR4_FSGSBASE_MASK; 6084 } 6085 #endif 6086 6087 env->xcr0 = xcr0; 6088 cpu_x86_update_cr4(env, cr4); 6089 6090 /* 6091 * SDM 11.11.5 requires: 6092 * - IA32_MTRR_DEF_TYPE MSR.E = 0 6093 * - IA32_MTRR_PHYSMASKn.V = 0 6094 * All other bits are undefined. For simplification, zero it all. 6095 */ 6096 env->mtrr_deftype = 0; 6097 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 6098 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 6099 6100 env->interrupt_injected = -1; 6101 env->exception_nr = -1; 6102 env->exception_pending = 0; 6103 env->exception_injected = 0; 6104 env->exception_has_payload = false; 6105 env->exception_payload = 0; 6106 env->nmi_injected = false; 6107 #if !defined(CONFIG_USER_ONLY) 6108 /* We hard-wire the BSP to the first CPU. */ 6109 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 6110 6111 s->halted = !cpu_is_bsp(cpu); 6112 6113 if (kvm_enabled()) { 6114 kvm_arch_reset_vcpu(cpu); 6115 } 6116 else if (hvf_enabled()) { 6117 hvf_reset_vcpu(s); 6118 } 6119 #endif 6120 } 6121 6122 #ifndef CONFIG_USER_ONLY 6123 bool cpu_is_bsp(X86CPU *cpu) 6124 { 6125 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 6126 } 6127 6128 /* TODO: remove me, when reset over QOM tree is implemented */ 6129 static void x86_cpu_machine_reset_cb(void *opaque) 6130 { 6131 X86CPU *cpu = opaque; 6132 cpu_reset(CPU(cpu)); 6133 } 6134 #endif 6135 6136 static void mce_init(X86CPU *cpu) 6137 { 6138 CPUX86State *cenv = &cpu->env; 6139 unsigned int bank; 6140 6141 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 6142 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 6143 (CPUID_MCE | CPUID_MCA)) { 6144 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 6145 (cpu->enable_lmce ? MCG_LMCE_P : 0); 6146 cenv->mcg_ctl = ~(uint64_t)0; 6147 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 6148 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 6149 } 6150 } 6151 } 6152 6153 #ifndef CONFIG_USER_ONLY 6154 APICCommonClass *apic_get_class(void) 6155 { 6156 const char *apic_type = "apic"; 6157 6158 /* TODO: in-kernel irqchip for hvf */ 6159 if (kvm_apic_in_kernel()) { 6160 apic_type = "kvm-apic"; 6161 } else if (xen_enabled()) { 6162 apic_type = "xen-apic"; 6163 } 6164 6165 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 6166 } 6167 6168 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 6169 { 6170 APICCommonState *apic; 6171 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 6172 6173 cpu->apic_state = DEVICE(object_new_with_class(apic_class)); 6174 6175 object_property_add_child(OBJECT(cpu), "lapic", 6176 OBJECT(cpu->apic_state), &error_abort); 6177 object_unref(OBJECT(cpu->apic_state)); 6178 6179 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 6180 /* TODO: convert to link<> */ 6181 apic = APIC_COMMON(cpu->apic_state); 6182 apic->cpu = cpu; 6183 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 6184 } 6185 6186 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6187 { 6188 APICCommonState *apic; 6189 static bool apic_mmio_map_once; 6190 6191 if (cpu->apic_state == NULL) { 6192 return; 6193 } 6194 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized", 6195 errp); 6196 6197 /* Map APIC MMIO area */ 6198 apic = APIC_COMMON(cpu->apic_state); 6199 if (!apic_mmio_map_once) { 6200 memory_region_add_subregion_overlap(get_system_memory(), 6201 apic->apicbase & 6202 MSR_IA32_APICBASE_BASE, 6203 &apic->io_memory, 6204 0x1000); 6205 apic_mmio_map_once = true; 6206 } 6207 } 6208 6209 static void x86_cpu_machine_done(Notifier *n, void *unused) 6210 { 6211 X86CPU *cpu = container_of(n, X86CPU, machine_done); 6212 MemoryRegion *smram = 6213 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 6214 6215 if (smram) { 6216 cpu->smram = g_new(MemoryRegion, 1); 6217 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 6218 smram, 0, 1ull << 32); 6219 memory_region_set_enabled(cpu->smram, true); 6220 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 6221 } 6222 } 6223 #else 6224 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6225 { 6226 } 6227 #endif 6228 6229 /* Note: Only safe for use on x86(-64) hosts */ 6230 static uint32_t x86_host_phys_bits(void) 6231 { 6232 uint32_t eax; 6233 uint32_t host_phys_bits; 6234 6235 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 6236 if (eax >= 0x80000008) { 6237 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 6238 /* Note: According to AMD doc 25481 rev 2.34 they have a field 6239 * at 23:16 that can specify a maximum physical address bits for 6240 * the guest that can override this value; but I've not seen 6241 * anything with that set. 6242 */ 6243 host_phys_bits = eax & 0xff; 6244 } else { 6245 /* It's an odd 64 bit machine that doesn't have the leaf for 6246 * physical address bits; fall back to 36 that's most older 6247 * Intel. 6248 */ 6249 host_phys_bits = 36; 6250 } 6251 6252 return host_phys_bits; 6253 } 6254 6255 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 6256 { 6257 if (*min < value) { 6258 *min = value; 6259 } 6260 } 6261 6262 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 6263 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 6264 { 6265 CPUX86State *env = &cpu->env; 6266 FeatureWordInfo *fi = &feature_word_info[w]; 6267 uint32_t eax = fi->cpuid.eax; 6268 uint32_t region = eax & 0xF0000000; 6269 6270 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 6271 if (!env->features[w]) { 6272 return; 6273 } 6274 6275 switch (region) { 6276 case 0x00000000: 6277 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 6278 break; 6279 case 0x80000000: 6280 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 6281 break; 6282 case 0xC0000000: 6283 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 6284 break; 6285 } 6286 6287 if (eax == 7) { 6288 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7, 6289 fi->cpuid.ecx); 6290 } 6291 } 6292 6293 /* Calculate XSAVE components based on the configured CPU feature flags */ 6294 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 6295 { 6296 CPUX86State *env = &cpu->env; 6297 int i; 6298 uint64_t mask; 6299 6300 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 6301 return; 6302 } 6303 6304 mask = 0; 6305 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6306 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6307 if (env->features[esa->feature] & esa->bits) { 6308 mask |= (1ULL << i); 6309 } 6310 } 6311 6312 env->features[FEAT_XSAVE_COMP_LO] = mask; 6313 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 6314 } 6315 6316 /***** Steps involved on loading and filtering CPUID data 6317 * 6318 * When initializing and realizing a CPU object, the steps 6319 * involved in setting up CPUID data are: 6320 * 6321 * 1) Loading CPU model definition (X86CPUDefinition). This is 6322 * implemented by x86_cpu_load_model() and should be completely 6323 * transparent, as it is done automatically by instance_init. 6324 * No code should need to look at X86CPUDefinition structs 6325 * outside instance_init. 6326 * 6327 * 2) CPU expansion. This is done by realize before CPUID 6328 * filtering, and will make sure host/accelerator data is 6329 * loaded for CPU models that depend on host capabilities 6330 * (e.g. "host"). Done by x86_cpu_expand_features(). 6331 * 6332 * 3) CPUID filtering. This initializes extra data related to 6333 * CPUID, and checks if the host supports all capabilities 6334 * required by the CPU. Runnability of a CPU model is 6335 * determined at this step. Done by x86_cpu_filter_features(). 6336 * 6337 * Some operations don't require all steps to be performed. 6338 * More precisely: 6339 * 6340 * - CPU instance creation (instance_init) will run only CPU 6341 * model loading. CPU expansion can't run at instance_init-time 6342 * because host/accelerator data may be not available yet. 6343 * - CPU realization will perform both CPU model expansion and CPUID 6344 * filtering, and return an error in case one of them fails. 6345 * - query-cpu-definitions needs to run all 3 steps. It needs 6346 * to run CPUID filtering, as the 'unavailable-features' 6347 * field is set based on the filtering results. 6348 * - The query-cpu-model-expansion QMP command only needs to run 6349 * CPU model loading and CPU expansion. It should not filter 6350 * any CPUID data based on host capabilities. 6351 */ 6352 6353 /* Expand CPU configuration data, based on configured features 6354 * and host/accelerator capabilities when appropriate. 6355 */ 6356 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 6357 { 6358 CPUX86State *env = &cpu->env; 6359 FeatureWord w; 6360 int i; 6361 GList *l; 6362 Error *local_err = NULL; 6363 6364 for (l = plus_features; l; l = l->next) { 6365 const char *prop = l->data; 6366 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 6367 if (local_err) { 6368 goto out; 6369 } 6370 } 6371 6372 for (l = minus_features; l; l = l->next) { 6373 const char *prop = l->data; 6374 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 6375 if (local_err) { 6376 goto out; 6377 } 6378 } 6379 6380 /*TODO: Now cpu->max_features doesn't overwrite features 6381 * set using QOM properties, and we can convert 6382 * plus_features & minus_features to global properties 6383 * inside x86_cpu_parse_featurestr() too. 6384 */ 6385 if (cpu->max_features) { 6386 for (w = 0; w < FEATURE_WORDS; w++) { 6387 /* Override only features that weren't set explicitly 6388 * by the user. 6389 */ 6390 env->features[w] |= 6391 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 6392 ~env->user_features[w] & \ 6393 ~feature_word_info[w].no_autoenable_flags; 6394 } 6395 } 6396 6397 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) { 6398 FeatureDep *d = &feature_dependencies[i]; 6399 if (!(env->features[d->from.index] & d->from.mask)) { 6400 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask; 6401 6402 /* Not an error unless the dependent feature was added explicitly. */ 6403 mark_unavailable_features(cpu, d->to.index, 6404 unavailable_features & env->user_features[d->to.index], 6405 "This feature depends on other features that were not requested"); 6406 6407 env->user_features[d->to.index] |= unavailable_features; 6408 env->features[d->to.index] &= ~unavailable_features; 6409 } 6410 } 6411 6412 if (!kvm_enabled() || !cpu->expose_kvm) { 6413 env->features[FEAT_KVM] = 0; 6414 } 6415 6416 x86_cpu_enable_xsave_components(cpu); 6417 6418 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 6419 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 6420 if (cpu->full_cpuid_auto_level) { 6421 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 6422 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 6423 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 6424 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 6425 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); 6426 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 6427 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 6428 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 6429 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 6430 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 6431 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 6432 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 6433 6434 /* Intel Processor Trace requires CPUID[0x14] */ 6435 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 6436 kvm_enabled() && cpu->intel_pt_auto_level) { 6437 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 6438 } 6439 6440 /* CPU topology with multi-dies support requires CPUID[0x1F] */ 6441 if (env->nr_dies > 1) { 6442 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); 6443 } 6444 6445 /* SVM requires CPUID[0x8000000A] */ 6446 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 6447 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 6448 } 6449 6450 /* SEV requires CPUID[0x8000001F] */ 6451 if (sev_enabled()) { 6452 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 6453 } 6454 } 6455 6456 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 6457 if (env->cpuid_level_func7 == UINT32_MAX) { 6458 env->cpuid_level_func7 = env->cpuid_min_level_func7; 6459 } 6460 if (env->cpuid_level == UINT32_MAX) { 6461 env->cpuid_level = env->cpuid_min_level; 6462 } 6463 if (env->cpuid_xlevel == UINT32_MAX) { 6464 env->cpuid_xlevel = env->cpuid_min_xlevel; 6465 } 6466 if (env->cpuid_xlevel2 == UINT32_MAX) { 6467 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 6468 } 6469 6470 out: 6471 if (local_err != NULL) { 6472 error_propagate(errp, local_err); 6473 } 6474 } 6475 6476 /* 6477 * Finishes initialization of CPUID data, filters CPU feature 6478 * words based on host availability of each feature. 6479 * 6480 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 6481 */ 6482 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) 6483 { 6484 CPUX86State *env = &cpu->env; 6485 FeatureWord w; 6486 const char *prefix = NULL; 6487 6488 if (verbose) { 6489 prefix = accel_uses_host_cpuid() 6490 ? "host doesn't support requested feature" 6491 : "TCG doesn't support requested feature"; 6492 } 6493 6494 for (w = 0; w < FEATURE_WORDS; w++) { 6495 uint64_t host_feat = 6496 x86_cpu_get_supported_feature_word(w, false); 6497 uint64_t requested_features = env->features[w]; 6498 uint64_t unavailable_features = requested_features & ~host_feat; 6499 mark_unavailable_features(cpu, w, unavailable_features, prefix); 6500 } 6501 6502 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 6503 kvm_enabled()) { 6504 KVMState *s = CPU(cpu)->kvm_state; 6505 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 6506 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 6507 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 6508 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 6509 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 6510 6511 if (!eax_0 || 6512 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 6513 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 6514 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 6515 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 6516 INTEL_PT_ADDR_RANGES_NUM) || 6517 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 6518 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 6519 (ecx_0 & INTEL_PT_IP_LIP)) { 6520 /* 6521 * Processor Trace capabilities aren't configurable, so if the 6522 * host can't emulate the capabilities we report on 6523 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 6524 */ 6525 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix); 6526 } 6527 } 6528 } 6529 6530 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 6531 { 6532 CPUState *cs = CPU(dev); 6533 X86CPU *cpu = X86_CPU(dev); 6534 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6535 CPUX86State *env = &cpu->env; 6536 Error *local_err = NULL; 6537 static bool ht_warned; 6538 6539 if (xcc->host_cpuid_required) { 6540 if (!accel_uses_host_cpuid()) { 6541 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6542 error_setg(&local_err, "CPU model '%s' requires KVM", name); 6543 goto out; 6544 } 6545 } 6546 6547 if (cpu->max_features && accel_uses_host_cpuid()) { 6548 if (enable_cpu_pm) { 6549 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 6550 &cpu->mwait.ecx, &cpu->mwait.edx); 6551 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 6552 } 6553 if (kvm_enabled() && cpu->ucode_rev == 0) { 6554 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state, 6555 MSR_IA32_UCODE_REV); 6556 } 6557 } 6558 6559 if (cpu->ucode_rev == 0) { 6560 /* The default is the same as KVM's. */ 6561 if (IS_AMD_CPU(env)) { 6562 cpu->ucode_rev = 0x01000065; 6563 } else { 6564 cpu->ucode_rev = 0x100000000ULL; 6565 } 6566 } 6567 6568 /* mwait extended info: needed for Core compatibility */ 6569 /* We always wake on interrupt even if host does not have the capability */ 6570 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 6571 6572 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 6573 error_setg(errp, "apic-id property was not initialized properly"); 6574 return; 6575 } 6576 6577 x86_cpu_expand_features(cpu, &local_err); 6578 if (local_err) { 6579 goto out; 6580 } 6581 6582 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); 6583 6584 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { 6585 error_setg(&local_err, 6586 accel_uses_host_cpuid() ? 6587 "Host doesn't support requested features" : 6588 "TCG doesn't support requested features"); 6589 goto out; 6590 } 6591 6592 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 6593 * CPUID[1].EDX. 6594 */ 6595 if (IS_AMD_CPU(env)) { 6596 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 6597 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 6598 & CPUID_EXT2_AMD_ALIASES); 6599 } 6600 6601 /* For 64bit systems think about the number of physical bits to present. 6602 * ideally this should be the same as the host; anything other than matching 6603 * the host can cause incorrect guest behaviour. 6604 * QEMU used to pick the magic value of 40 bits that corresponds to 6605 * consumer AMD devices but nothing else. 6606 */ 6607 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 6608 if (accel_uses_host_cpuid()) { 6609 uint32_t host_phys_bits = x86_host_phys_bits(); 6610 static bool warned; 6611 6612 /* Print a warning if the user set it to a value that's not the 6613 * host value. 6614 */ 6615 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 6616 !warned) { 6617 warn_report("Host physical bits (%u)" 6618 " does not match phys-bits property (%u)", 6619 host_phys_bits, cpu->phys_bits); 6620 warned = true; 6621 } 6622 6623 if (cpu->host_phys_bits) { 6624 /* The user asked for us to use the host physical bits */ 6625 cpu->phys_bits = host_phys_bits; 6626 if (cpu->host_phys_bits_limit && 6627 cpu->phys_bits > cpu->host_phys_bits_limit) { 6628 cpu->phys_bits = cpu->host_phys_bits_limit; 6629 } 6630 } 6631 6632 if (cpu->phys_bits && 6633 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 6634 cpu->phys_bits < 32)) { 6635 error_setg(errp, "phys-bits should be between 32 and %u " 6636 " (but is %u)", 6637 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 6638 return; 6639 } 6640 } else { 6641 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 6642 error_setg(errp, "TCG only supports phys-bits=%u", 6643 TCG_PHYS_ADDR_BITS); 6644 return; 6645 } 6646 } 6647 /* 0 means it was not explicitly set by the user (or by machine 6648 * compat_props or by the host code above). In this case, the default 6649 * is the value used by TCG (40). 6650 */ 6651 if (cpu->phys_bits == 0) { 6652 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 6653 } 6654 } else { 6655 /* For 32 bit systems don't use the user set value, but keep 6656 * phys_bits consistent with what we tell the guest. 6657 */ 6658 if (cpu->phys_bits != 0) { 6659 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 6660 return; 6661 } 6662 6663 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 6664 cpu->phys_bits = 36; 6665 } else { 6666 cpu->phys_bits = 32; 6667 } 6668 } 6669 6670 /* Cache information initialization */ 6671 if (!cpu->legacy_cache) { 6672 if (!xcc->model || !xcc->model->cpudef->cache_info) { 6673 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6674 error_setg(errp, 6675 "CPU model '%s' doesn't support legacy-cache=off", name); 6676 return; 6677 } 6678 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 6679 *xcc->model->cpudef->cache_info; 6680 } else { 6681 /* Build legacy cache information */ 6682 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 6683 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 6684 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 6685 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 6686 6687 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 6688 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 6689 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 6690 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 6691 6692 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 6693 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 6694 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 6695 env->cache_info_amd.l3_cache = &legacy_l3_cache; 6696 } 6697 6698 6699 cpu_exec_realizefn(cs, &local_err); 6700 if (local_err != NULL) { 6701 error_propagate(errp, local_err); 6702 return; 6703 } 6704 6705 #ifndef CONFIG_USER_ONLY 6706 MachineState *ms = MACHINE(qdev_get_machine()); 6707 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 6708 6709 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { 6710 x86_cpu_apic_create(cpu, &local_err); 6711 if (local_err != NULL) { 6712 goto out; 6713 } 6714 } 6715 #endif 6716 6717 mce_init(cpu); 6718 6719 #ifndef CONFIG_USER_ONLY 6720 if (tcg_enabled()) { 6721 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 6722 cpu->cpu_as_root = g_new(MemoryRegion, 1); 6723 6724 /* Outer container... */ 6725 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 6726 memory_region_set_enabled(cpu->cpu_as_root, true); 6727 6728 /* ... with two regions inside: normal system memory with low 6729 * priority, and... 6730 */ 6731 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 6732 get_system_memory(), 0, ~0ull); 6733 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 6734 memory_region_set_enabled(cpu->cpu_as_mem, true); 6735 6736 cs->num_ases = 2; 6737 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 6738 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 6739 6740 /* ... SMRAM with higher priority, linked from /machine/smram. */ 6741 cpu->machine_done.notify = x86_cpu_machine_done; 6742 qemu_add_machine_init_done_notifier(&cpu->machine_done); 6743 } 6744 #endif 6745 6746 qemu_init_vcpu(cs); 6747 6748 /* 6749 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 6750 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 6751 * based on inputs (sockets,cores,threads), it is still better to give 6752 * users a warning. 6753 * 6754 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 6755 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 6756 */ 6757 if (IS_AMD_CPU(env) && 6758 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 6759 cs->nr_threads > 1 && !ht_warned) { 6760 warn_report("This family of AMD CPU doesn't support " 6761 "hyperthreading(%d)", 6762 cs->nr_threads); 6763 error_printf("Please configure -smp options properly" 6764 " or try enabling topoext feature.\n"); 6765 ht_warned = true; 6766 } 6767 6768 x86_cpu_apic_realize(cpu, &local_err); 6769 if (local_err != NULL) { 6770 goto out; 6771 } 6772 cpu_reset(cs); 6773 6774 xcc->parent_realize(dev, &local_err); 6775 6776 out: 6777 if (local_err != NULL) { 6778 error_propagate(errp, local_err); 6779 return; 6780 } 6781 } 6782 6783 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) 6784 { 6785 X86CPU *cpu = X86_CPU(dev); 6786 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6787 Error *local_err = NULL; 6788 6789 #ifndef CONFIG_USER_ONLY 6790 cpu_remove_sync(CPU(dev)); 6791 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 6792 #endif 6793 6794 if (cpu->apic_state) { 6795 object_unparent(OBJECT(cpu->apic_state)); 6796 cpu->apic_state = NULL; 6797 } 6798 6799 xcc->parent_unrealize(dev, &local_err); 6800 if (local_err != NULL) { 6801 error_propagate(errp, local_err); 6802 return; 6803 } 6804 } 6805 6806 typedef struct BitProperty { 6807 FeatureWord w; 6808 uint64_t mask; 6809 } BitProperty; 6810 6811 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 6812 void *opaque, Error **errp) 6813 { 6814 X86CPU *cpu = X86_CPU(obj); 6815 BitProperty *fp = opaque; 6816 uint64_t f = cpu->env.features[fp->w]; 6817 bool value = (f & fp->mask) == fp->mask; 6818 visit_type_bool(v, name, &value, errp); 6819 } 6820 6821 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 6822 void *opaque, Error **errp) 6823 { 6824 DeviceState *dev = DEVICE(obj); 6825 X86CPU *cpu = X86_CPU(obj); 6826 BitProperty *fp = opaque; 6827 Error *local_err = NULL; 6828 bool value; 6829 6830 if (dev->realized) { 6831 qdev_prop_set_after_realize(dev, name, errp); 6832 return; 6833 } 6834 6835 visit_type_bool(v, name, &value, &local_err); 6836 if (local_err) { 6837 error_propagate(errp, local_err); 6838 return; 6839 } 6840 6841 if (value) { 6842 cpu->env.features[fp->w] |= fp->mask; 6843 } else { 6844 cpu->env.features[fp->w] &= ~fp->mask; 6845 } 6846 cpu->env.user_features[fp->w] |= fp->mask; 6847 } 6848 6849 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 6850 void *opaque) 6851 { 6852 BitProperty *prop = opaque; 6853 g_free(prop); 6854 } 6855 6856 /* Register a boolean property to get/set a single bit in a uint32_t field. 6857 * 6858 * The same property name can be registered multiple times to make it affect 6859 * multiple bits in the same FeatureWord. In that case, the getter will return 6860 * true only if all bits are set. 6861 */ 6862 static void x86_cpu_register_bit_prop(X86CPU *cpu, 6863 const char *prop_name, 6864 FeatureWord w, 6865 int bitnr) 6866 { 6867 BitProperty *fp; 6868 ObjectProperty *op; 6869 uint64_t mask = (1ULL << bitnr); 6870 6871 op = object_property_find(OBJECT(cpu), prop_name, NULL); 6872 if (op) { 6873 fp = op->opaque; 6874 assert(fp->w == w); 6875 fp->mask |= mask; 6876 } else { 6877 fp = g_new0(BitProperty, 1); 6878 fp->w = w; 6879 fp->mask = mask; 6880 object_property_add(OBJECT(cpu), prop_name, "bool", 6881 x86_cpu_get_bit_prop, 6882 x86_cpu_set_bit_prop, 6883 x86_cpu_release_bit_prop, fp, &error_abort); 6884 } 6885 } 6886 6887 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 6888 FeatureWord w, 6889 int bitnr) 6890 { 6891 FeatureWordInfo *fi = &feature_word_info[w]; 6892 const char *name = fi->feat_names[bitnr]; 6893 6894 if (!name) { 6895 return; 6896 } 6897 6898 /* Property names should use "-" instead of "_". 6899 * Old names containing underscores are registered as aliases 6900 * using object_property_add_alias() 6901 */ 6902 assert(!strchr(name, '_')); 6903 /* aliases don't use "|" delimiters anymore, they are registered 6904 * manually using object_property_add_alias() */ 6905 assert(!strchr(name, '|')); 6906 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 6907 } 6908 6909 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 6910 { 6911 X86CPU *cpu = X86_CPU(cs); 6912 CPUX86State *env = &cpu->env; 6913 GuestPanicInformation *panic_info = NULL; 6914 6915 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 6916 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 6917 6918 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 6919 6920 assert(HV_CRASH_PARAMS >= 5); 6921 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 6922 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 6923 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 6924 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 6925 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 6926 } 6927 6928 return panic_info; 6929 } 6930 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 6931 const char *name, void *opaque, 6932 Error **errp) 6933 { 6934 CPUState *cs = CPU(obj); 6935 GuestPanicInformation *panic_info; 6936 6937 if (!cs->crash_occurred) { 6938 error_setg(errp, "No crash occured"); 6939 return; 6940 } 6941 6942 panic_info = x86_cpu_get_crash_info(cs); 6943 if (panic_info == NULL) { 6944 error_setg(errp, "No crash information"); 6945 return; 6946 } 6947 6948 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 6949 errp); 6950 qapi_free_GuestPanicInformation(panic_info); 6951 } 6952 6953 static void x86_cpu_initfn(Object *obj) 6954 { 6955 X86CPU *cpu = X86_CPU(obj); 6956 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 6957 CPUX86State *env = &cpu->env; 6958 FeatureWord w; 6959 6960 env->nr_dies = 1; 6961 cpu_set_cpustate_pointers(cpu); 6962 6963 object_property_add(obj, "family", "int", 6964 x86_cpuid_version_get_family, 6965 x86_cpuid_version_set_family, NULL, NULL, NULL); 6966 object_property_add(obj, "model", "int", 6967 x86_cpuid_version_get_model, 6968 x86_cpuid_version_set_model, NULL, NULL, NULL); 6969 object_property_add(obj, "stepping", "int", 6970 x86_cpuid_version_get_stepping, 6971 x86_cpuid_version_set_stepping, NULL, NULL, NULL); 6972 object_property_add_str(obj, "vendor", 6973 x86_cpuid_get_vendor, 6974 x86_cpuid_set_vendor, NULL); 6975 object_property_add_str(obj, "model-id", 6976 x86_cpuid_get_model_id, 6977 x86_cpuid_set_model_id, NULL); 6978 object_property_add(obj, "tsc-frequency", "int", 6979 x86_cpuid_get_tsc_freq, 6980 x86_cpuid_set_tsc_freq, NULL, NULL, NULL); 6981 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 6982 x86_cpu_get_feature_words, 6983 NULL, NULL, (void *)env->features, NULL); 6984 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 6985 x86_cpu_get_feature_words, 6986 NULL, NULL, (void *)cpu->filtered_features, NULL); 6987 /* 6988 * The "unavailable-features" property has the same semantics as 6989 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 6990 * QMP command: they list the features that would have prevented the 6991 * CPU from running if the "enforce" flag was set. 6992 */ 6993 object_property_add(obj, "unavailable-features", "strList", 6994 x86_cpu_get_unavailable_features, 6995 NULL, NULL, NULL, &error_abort); 6996 6997 object_property_add(obj, "crash-information", "GuestPanicInformation", 6998 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); 6999 7000 for (w = 0; w < FEATURE_WORDS; w++) { 7001 int bitnr; 7002 7003 for (bitnr = 0; bitnr < 64; bitnr++) { 7004 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 7005 } 7006 } 7007 7008 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort); 7009 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort); 7010 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort); 7011 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort); 7012 object_property_add_alias(obj, "xd", obj, "nx", &error_abort); 7013 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort); 7014 object_property_add_alias(obj, "i64", obj, "lm", &error_abort); 7015 7016 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort); 7017 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort); 7018 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort); 7019 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort); 7020 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort); 7021 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort); 7022 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort); 7023 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort); 7024 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort); 7025 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort); 7026 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort); 7027 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort); 7028 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort); 7029 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort); 7030 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control", 7031 &error_abort); 7032 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort); 7033 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort); 7034 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort); 7035 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort); 7036 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort); 7037 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort); 7038 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort); 7039 7040 if (xcc->model) { 7041 x86_cpu_load_model(cpu, xcc->model, &error_abort); 7042 } 7043 } 7044 7045 static int64_t x86_cpu_get_arch_id(CPUState *cs) 7046 { 7047 X86CPU *cpu = X86_CPU(cs); 7048 7049 return cpu->apic_id; 7050 } 7051 7052 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 7053 { 7054 X86CPU *cpu = X86_CPU(cs); 7055 7056 return cpu->env.cr[0] & CR0_PG_MASK; 7057 } 7058 7059 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 7060 { 7061 X86CPU *cpu = X86_CPU(cs); 7062 7063 cpu->env.eip = value; 7064 } 7065 7066 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 7067 { 7068 X86CPU *cpu = X86_CPU(cs); 7069 7070 cpu->env.eip = tb->pc - tb->cs_base; 7071 } 7072 7073 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 7074 { 7075 X86CPU *cpu = X86_CPU(cs); 7076 CPUX86State *env = &cpu->env; 7077 7078 #if !defined(CONFIG_USER_ONLY) 7079 if (interrupt_request & CPU_INTERRUPT_POLL) { 7080 return CPU_INTERRUPT_POLL; 7081 } 7082 #endif 7083 if (interrupt_request & CPU_INTERRUPT_SIPI) { 7084 return CPU_INTERRUPT_SIPI; 7085 } 7086 7087 if (env->hflags2 & HF2_GIF_MASK) { 7088 if ((interrupt_request & CPU_INTERRUPT_SMI) && 7089 !(env->hflags & HF_SMM_MASK)) { 7090 return CPU_INTERRUPT_SMI; 7091 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 7092 !(env->hflags2 & HF2_NMI_MASK)) { 7093 return CPU_INTERRUPT_NMI; 7094 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 7095 return CPU_INTERRUPT_MCE; 7096 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 7097 (((env->hflags2 & HF2_VINTR_MASK) && 7098 (env->hflags2 & HF2_HIF_MASK)) || 7099 (!(env->hflags2 & HF2_VINTR_MASK) && 7100 (env->eflags & IF_MASK && 7101 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 7102 return CPU_INTERRUPT_HARD; 7103 #if !defined(CONFIG_USER_ONLY) 7104 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 7105 (env->eflags & IF_MASK) && 7106 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 7107 return CPU_INTERRUPT_VIRQ; 7108 #endif 7109 } 7110 } 7111 7112 return 0; 7113 } 7114 7115 static bool x86_cpu_has_work(CPUState *cs) 7116 { 7117 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 7118 } 7119 7120 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 7121 { 7122 X86CPU *cpu = X86_CPU(cs); 7123 CPUX86State *env = &cpu->env; 7124 7125 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 7126 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 7127 : bfd_mach_i386_i8086); 7128 info->print_insn = print_insn_i386; 7129 7130 info->cap_arch = CS_ARCH_X86; 7131 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 7132 : env->hflags & HF_CS32_MASK ? CS_MODE_32 7133 : CS_MODE_16); 7134 info->cap_insn_unit = 1; 7135 info->cap_insn_split = 8; 7136 } 7137 7138 void x86_update_hflags(CPUX86State *env) 7139 { 7140 uint32_t hflags; 7141 #define HFLAG_COPY_MASK \ 7142 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 7143 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 7144 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 7145 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 7146 7147 hflags = env->hflags & HFLAG_COPY_MASK; 7148 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 7149 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 7150 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 7151 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 7152 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 7153 7154 if (env->cr[4] & CR4_OSFXSR_MASK) { 7155 hflags |= HF_OSFXSR_MASK; 7156 } 7157 7158 if (env->efer & MSR_EFER_LMA) { 7159 hflags |= HF_LMA_MASK; 7160 } 7161 7162 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 7163 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 7164 } else { 7165 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 7166 (DESC_B_SHIFT - HF_CS32_SHIFT); 7167 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 7168 (DESC_B_SHIFT - HF_SS32_SHIFT); 7169 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 7170 !(hflags & HF_CS32_MASK)) { 7171 hflags |= HF_ADDSEG_MASK; 7172 } else { 7173 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 7174 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 7175 } 7176 } 7177 env->hflags = hflags; 7178 } 7179 7180 static Property x86_cpu_properties[] = { 7181 #ifdef CONFIG_USER_ONLY 7182 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 7183 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 7184 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 7185 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 7186 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0), 7187 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 7188 #else 7189 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 7190 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 7191 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 7192 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1), 7193 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 7194 #endif 7195 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 7196 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 7197 7198 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, 7199 HYPERV_SPINLOCK_NEVER_RETRY), 7200 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 7201 HYPERV_FEAT_RELAXED, 0), 7202 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 7203 HYPERV_FEAT_VAPIC, 0), 7204 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 7205 HYPERV_FEAT_TIME, 0), 7206 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 7207 HYPERV_FEAT_CRASH, 0), 7208 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 7209 HYPERV_FEAT_RESET, 0), 7210 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 7211 HYPERV_FEAT_VPINDEX, 0), 7212 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 7213 HYPERV_FEAT_RUNTIME, 0), 7214 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 7215 HYPERV_FEAT_SYNIC, 0), 7216 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 7217 HYPERV_FEAT_STIMER, 0), 7218 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 7219 HYPERV_FEAT_FREQUENCIES, 0), 7220 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 7221 HYPERV_FEAT_REENLIGHTENMENT, 0), 7222 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 7223 HYPERV_FEAT_TLBFLUSH, 0), 7224 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 7225 HYPERV_FEAT_EVMCS, 0), 7226 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 7227 HYPERV_FEAT_IPI, 0), 7228 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 7229 HYPERV_FEAT_STIMER_DIRECT, 0), 7230 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, 7231 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), 7232 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 7233 7234 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 7235 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 7236 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false), 7237 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 7238 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 7239 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 7240 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 7241 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 7242 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7, 7243 UINT32_MAX), 7244 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 7245 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 7246 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 7247 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 7248 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 7249 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 7250 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0), 7251 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 7252 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 7253 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 7254 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 7255 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 7256 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 7257 false), 7258 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 7259 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 7260 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 7261 true), 7262 /* 7263 * lecacy_cache defaults to true unless the CPU model provides its 7264 * own cache information (see x86_cpu_load_def()). 7265 */ 7266 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 7267 7268 /* 7269 * From "Requirements for Implementing the Microsoft 7270 * Hypervisor Interface": 7271 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 7272 * 7273 * "Starting with Windows Server 2012 and Windows 8, if 7274 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 7275 * the hypervisor imposes no specific limit to the number of VPs. 7276 * In this case, Windows Server 2012 guest VMs may use more than 7277 * 64 VPs, up to the maximum supported number of processors applicable 7278 * to the specific Windows version being used." 7279 */ 7280 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 7281 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 7282 false), 7283 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 7284 true), 7285 DEFINE_PROP_END_OF_LIST() 7286 }; 7287 7288 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 7289 { 7290 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7291 CPUClass *cc = CPU_CLASS(oc); 7292 DeviceClass *dc = DEVICE_CLASS(oc); 7293 7294 device_class_set_parent_realize(dc, x86_cpu_realizefn, 7295 &xcc->parent_realize); 7296 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 7297 &xcc->parent_unrealize); 7298 device_class_set_props(dc, x86_cpu_properties); 7299 7300 device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset); 7301 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 7302 7303 cc->class_by_name = x86_cpu_class_by_name; 7304 cc->parse_features = x86_cpu_parse_featurestr; 7305 cc->has_work = x86_cpu_has_work; 7306 #ifdef CONFIG_TCG 7307 cc->do_interrupt = x86_cpu_do_interrupt; 7308 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 7309 #endif 7310 cc->dump_state = x86_cpu_dump_state; 7311 cc->get_crash_info = x86_cpu_get_crash_info; 7312 cc->set_pc = x86_cpu_set_pc; 7313 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 7314 cc->gdb_read_register = x86_cpu_gdb_read_register; 7315 cc->gdb_write_register = x86_cpu_gdb_write_register; 7316 cc->get_arch_id = x86_cpu_get_arch_id; 7317 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 7318 #ifndef CONFIG_USER_ONLY 7319 cc->asidx_from_attrs = x86_asidx_from_attrs; 7320 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 7321 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug; 7322 cc->write_elf64_note = x86_cpu_write_elf64_note; 7323 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 7324 cc->write_elf32_note = x86_cpu_write_elf32_note; 7325 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 7326 cc->vmsd = &vmstate_x86_cpu; 7327 #endif 7328 cc->gdb_arch_name = x86_gdb_arch_name; 7329 #ifdef TARGET_X86_64 7330 cc->gdb_core_xml_file = "i386-64bit.xml"; 7331 cc->gdb_num_core_regs = 66; 7332 #else 7333 cc->gdb_core_xml_file = "i386-32bit.xml"; 7334 cc->gdb_num_core_regs = 50; 7335 #endif 7336 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 7337 cc->debug_excp_handler = breakpoint_handler; 7338 #endif 7339 cc->cpu_exec_enter = x86_cpu_exec_enter; 7340 cc->cpu_exec_exit = x86_cpu_exec_exit; 7341 #ifdef CONFIG_TCG 7342 cc->tcg_initialize = tcg_x86_init; 7343 cc->tlb_fill = x86_cpu_tlb_fill; 7344 #endif 7345 cc->disas_set_info = x86_disas_set_info; 7346 7347 dc->user_creatable = true; 7348 } 7349 7350 static const TypeInfo x86_cpu_type_info = { 7351 .name = TYPE_X86_CPU, 7352 .parent = TYPE_CPU, 7353 .instance_size = sizeof(X86CPU), 7354 .instance_init = x86_cpu_initfn, 7355 .abstract = true, 7356 .class_size = sizeof(X86CPUClass), 7357 .class_init = x86_cpu_common_class_init, 7358 }; 7359 7360 7361 /* "base" CPU model, used by query-cpu-model-expansion */ 7362 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 7363 { 7364 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7365 7366 xcc->static_model = true; 7367 xcc->migration_safe = true; 7368 xcc->model_description = "base CPU model type with no features enabled"; 7369 xcc->ordering = 8; 7370 } 7371 7372 static const TypeInfo x86_base_cpu_type_info = { 7373 .name = X86_CPU_TYPE_NAME("base"), 7374 .parent = TYPE_X86_CPU, 7375 .class_init = x86_cpu_base_class_init, 7376 }; 7377 7378 static void x86_cpu_register_types(void) 7379 { 7380 int i; 7381 7382 type_register_static(&x86_cpu_type_info); 7383 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 7384 x86_register_cpudef_types(&builtin_x86_defs[i]); 7385 } 7386 type_register_static(&max_x86_cpu_type_info); 7387 type_register_static(&x86_base_cpu_type_info); 7388 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 7389 type_register_static(&host_x86_cpu_type_info); 7390 #endif 7391 } 7392 7393 type_init(x86_cpu_register_types) 7394