1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "exec/exec-all.h" 28 #include "sysemu/kvm.h" 29 #include "sysemu/reset.h" 30 #include "sysemu/hvf.h" 31 #include "sysemu/cpus.h" 32 #include "kvm_i386.h" 33 #include "sev_i386.h" 34 35 #include "qemu/error-report.h" 36 #include "qemu/module.h" 37 #include "qemu/option.h" 38 #include "qemu/config-file.h" 39 #include "qapi/error.h" 40 #include "qapi/qapi-visit-machine.h" 41 #include "qapi/qapi-visit-run-state.h" 42 #include "qapi/qmp/qdict.h" 43 #include "qapi/qmp/qerror.h" 44 #include "qapi/visitor.h" 45 #include "qom/qom-qobject.h" 46 #include "sysemu/arch_init.h" 47 #include "qapi/qapi-commands-machine-target.h" 48 49 #include "standard-headers/asm-x86/kvm_para.h" 50 51 #include "sysemu/sysemu.h" 52 #include "sysemu/tcg.h" 53 #include "hw/qdev-properties.h" 54 #include "hw/i386/topology.h" 55 #ifndef CONFIG_USER_ONLY 56 #include "exec/address-spaces.h" 57 #include "hw/xen/xen.h" 58 #include "hw/i386/apic_internal.h" 59 #include "hw/boards.h" 60 #endif 61 62 #include "disas/capstone.h" 63 64 /* Helpers for building CPUID[2] descriptors: */ 65 66 struct CPUID2CacheDescriptorInfo { 67 enum CacheType type; 68 int level; 69 int size; 70 int line_size; 71 int associativity; 72 }; 73 74 /* 75 * Known CPUID 2 cache descriptors. 76 * From Intel SDM Volume 2A, CPUID instruction 77 */ 78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 80 .associativity = 4, .line_size = 32, }, 81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 82 .associativity = 4, .line_size = 32, }, 83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 84 .associativity = 4, .line_size = 64, }, 85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 86 .associativity = 2, .line_size = 32, }, 87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 88 .associativity = 4, .line_size = 32, }, 89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 90 .associativity = 4, .line_size = 64, }, 91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 92 .associativity = 6, .line_size = 64, }, 93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 94 .associativity = 2, .line_size = 64, }, 95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 96 .associativity = 8, .line_size = 64, }, 97 /* lines per sector is not supported cpuid2_cache_descriptor(), 98 * so descriptors 0x22, 0x23 are not included 99 */ 100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 101 .associativity = 16, .line_size = 64, }, 102 /* lines per sector is not supported cpuid2_cache_descriptor(), 103 * so descriptors 0x25, 0x20 are not included 104 */ 105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 106 .associativity = 8, .line_size = 64, }, 107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 108 .associativity = 8, .line_size = 64, }, 109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 110 .associativity = 4, .line_size = 32, }, 111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 112 .associativity = 4, .line_size = 32, }, 113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 114 .associativity = 4, .line_size = 32, }, 115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 116 .associativity = 4, .line_size = 32, }, 117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 118 .associativity = 4, .line_size = 32, }, 119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 120 .associativity = 4, .line_size = 64, }, 121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 122 .associativity = 8, .line_size = 64, }, 123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 124 .associativity = 12, .line_size = 64, }, 125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 127 .associativity = 12, .line_size = 64, }, 128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 129 .associativity = 16, .line_size = 64, }, 130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 131 .associativity = 12, .line_size = 64, }, 132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 133 .associativity = 16, .line_size = 64, }, 134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 135 .associativity = 24, .line_size = 64, }, 136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 137 .associativity = 8, .line_size = 64, }, 138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 139 .associativity = 4, .line_size = 64, }, 140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 141 .associativity = 4, .line_size = 64, }, 142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 143 .associativity = 4, .line_size = 64, }, 144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 145 .associativity = 4, .line_size = 64, }, 146 /* lines per sector is not supported cpuid2_cache_descriptor(), 147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 148 */ 149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 150 .associativity = 8, .line_size = 64, }, 151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 152 .associativity = 2, .line_size = 64, }, 153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 154 .associativity = 8, .line_size = 64, }, 155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 156 .associativity = 8, .line_size = 32, }, 157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 158 .associativity = 8, .line_size = 32, }, 159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 160 .associativity = 8, .line_size = 32, }, 161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 162 .associativity = 8, .line_size = 32, }, 163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 164 .associativity = 4, .line_size = 64, }, 165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 166 .associativity = 8, .line_size = 64, }, 167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 168 .associativity = 4, .line_size = 64, }, 169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 170 .associativity = 4, .line_size = 64, }, 171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 172 .associativity = 4, .line_size = 64, }, 173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 174 .associativity = 8, .line_size = 64, }, 175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 176 .associativity = 8, .line_size = 64, }, 177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 178 .associativity = 8, .line_size = 64, }, 179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 180 .associativity = 12, .line_size = 64, }, 181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 182 .associativity = 12, .line_size = 64, }, 183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 184 .associativity = 12, .line_size = 64, }, 185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 186 .associativity = 16, .line_size = 64, }, 187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 188 .associativity = 16, .line_size = 64, }, 189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 190 .associativity = 16, .line_size = 64, }, 191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 192 .associativity = 24, .line_size = 64, }, 193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 194 .associativity = 24, .line_size = 64, }, 195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 196 .associativity = 24, .line_size = 64, }, 197 }; 198 199 /* 200 * "CPUID leaf 2 does not report cache descriptor information, 201 * use CPUID leaf 4 to query cache parameters" 202 */ 203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 204 205 /* 206 * Return a CPUID 2 cache descriptor for a given cache. 207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 208 */ 209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 210 { 211 int i; 212 213 assert(cache->size > 0); 214 assert(cache->level > 0); 215 assert(cache->line_size > 0); 216 assert(cache->associativity > 0); 217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 219 if (d->level == cache->level && d->type == cache->type && 220 d->size == cache->size && d->line_size == cache->line_size && 221 d->associativity == cache->associativity) { 222 return i; 223 } 224 } 225 226 return CACHE_DESCRIPTOR_UNAVAILABLE; 227 } 228 229 /* CPUID Leaf 4 constants: */ 230 231 /* EAX: */ 232 #define CACHE_TYPE_D 1 233 #define CACHE_TYPE_I 2 234 #define CACHE_TYPE_UNIFIED 3 235 236 #define CACHE_LEVEL(l) (l << 5) 237 238 #define CACHE_SELF_INIT_LEVEL (1 << 8) 239 240 /* EDX: */ 241 #define CACHE_NO_INVD_SHARING (1 << 0) 242 #define CACHE_INCLUSIVE (1 << 1) 243 #define CACHE_COMPLEX_IDX (1 << 2) 244 245 /* Encode CacheType for CPUID[4].EAX */ 246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 249 0 /* Invalid value */) 250 251 252 /* Encode cache info for CPUID[4] */ 253 static void encode_cache_cpuid4(CPUCacheInfo *cache, 254 int num_apic_ids, int num_cores, 255 uint32_t *eax, uint32_t *ebx, 256 uint32_t *ecx, uint32_t *edx) 257 { 258 assert(cache->size == cache->line_size * cache->associativity * 259 cache->partitions * cache->sets); 260 261 assert(num_apic_ids > 0); 262 *eax = CACHE_TYPE(cache->type) | 263 CACHE_LEVEL(cache->level) | 264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 265 ((num_cores - 1) << 26) | 266 ((num_apic_ids - 1) << 14); 267 268 assert(cache->line_size > 0); 269 assert(cache->partitions > 0); 270 assert(cache->associativity > 0); 271 /* We don't implement fully-associative caches */ 272 assert(cache->associativity < cache->sets); 273 *ebx = (cache->line_size - 1) | 274 ((cache->partitions - 1) << 12) | 275 ((cache->associativity - 1) << 22); 276 277 assert(cache->sets > 0); 278 *ecx = cache->sets - 1; 279 280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 281 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 283 } 284 285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 287 { 288 assert(cache->size % 1024 == 0); 289 assert(cache->lines_per_tag > 0); 290 assert(cache->associativity > 0); 291 assert(cache->line_size > 0); 292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 293 (cache->lines_per_tag << 8) | (cache->line_size); 294 } 295 296 #define ASSOC_FULL 0xFF 297 298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 300 a == 2 ? 0x2 : \ 301 a == 4 ? 0x4 : \ 302 a == 8 ? 0x6 : \ 303 a == 16 ? 0x8 : \ 304 a == 32 ? 0xA : \ 305 a == 48 ? 0xB : \ 306 a == 64 ? 0xC : \ 307 a == 96 ? 0xD : \ 308 a == 128 ? 0xE : \ 309 a == ASSOC_FULL ? 0xF : \ 310 0 /* invalid value */) 311 312 /* 313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 314 * @l3 can be NULL. 315 */ 316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 317 CPUCacheInfo *l3, 318 uint32_t *ecx, uint32_t *edx) 319 { 320 assert(l2->size % 1024 == 0); 321 assert(l2->associativity > 0); 322 assert(l2->lines_per_tag > 0); 323 assert(l2->line_size > 0); 324 *ecx = ((l2->size / 1024) << 16) | 325 (AMD_ENC_ASSOC(l2->associativity) << 12) | 326 (l2->lines_per_tag << 8) | (l2->line_size); 327 328 if (l3) { 329 assert(l3->size % (512 * 1024) == 0); 330 assert(l3->associativity > 0); 331 assert(l3->lines_per_tag > 0); 332 assert(l3->line_size > 0); 333 *edx = ((l3->size / (512 * 1024)) << 18) | 334 (AMD_ENC_ASSOC(l3->associativity) << 12) | 335 (l3->lines_per_tag << 8) | (l3->line_size); 336 } else { 337 *edx = 0; 338 } 339 } 340 341 /* 342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E 343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3. 344 * Define the constants to build the cpu topology. Right now, TOPOEXT 345 * feature is enabled only on EPYC. So, these constants are based on 346 * EPYC supported configurations. We may need to handle the cases if 347 * these values change in future. 348 */ 349 /* Maximum core complexes in a node */ 350 #define MAX_CCX 2 351 /* Maximum cores in a core complex */ 352 #define MAX_CORES_IN_CCX 4 353 /* Maximum cores in a node */ 354 #define MAX_CORES_IN_NODE 8 355 /* Maximum nodes in a socket */ 356 #define MAX_NODES_PER_SOCKET 4 357 358 /* 359 * Figure out the number of nodes required to build this config. 360 * Max cores in a node is 8 361 */ 362 static int nodes_in_socket(int nr_cores) 363 { 364 int nodes; 365 366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE); 367 368 /* Hardware does not support config with 3 nodes, return 4 in that case */ 369 return (nodes == 3) ? 4 : nodes; 370 } 371 372 /* 373 * Decide the number of cores in a core complex with the given nr_cores using 374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and 375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible 376 * L3 cache is shared across all cores in a core complex. So, this will also 377 * tell us how many cores are sharing the L3 cache. 378 */ 379 static int cores_in_core_complex(int nr_cores) 380 { 381 int nodes; 382 383 /* Check if we can fit all the cores in one core complex */ 384 if (nr_cores <= MAX_CORES_IN_CCX) { 385 return nr_cores; 386 } 387 /* Get the number of nodes required to build this config */ 388 nodes = nodes_in_socket(nr_cores); 389 390 /* 391 * Divide the cores accros all the core complexes 392 * Return rounded up value 393 */ 394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX); 395 } 396 397 /* Encode cache info for CPUID[8000001D] */ 398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs, 399 uint32_t *eax, uint32_t *ebx, 400 uint32_t *ecx, uint32_t *edx) 401 { 402 uint32_t l3_cores; 403 assert(cache->size == cache->line_size * cache->associativity * 404 cache->partitions * cache->sets); 405 406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 408 409 /* L3 is shared among multiple cores */ 410 if (cache->level == 3) { 411 l3_cores = cores_in_core_complex(cs->nr_cores); 412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14; 413 } else { 414 *eax |= ((cs->nr_threads - 1) << 14); 415 } 416 417 assert(cache->line_size > 0); 418 assert(cache->partitions > 0); 419 assert(cache->associativity > 0); 420 /* We don't implement fully-associative caches */ 421 assert(cache->associativity < cache->sets); 422 *ebx = (cache->line_size - 1) | 423 ((cache->partitions - 1) << 12) | 424 ((cache->associativity - 1) << 22); 425 426 assert(cache->sets > 0); 427 *ecx = cache->sets - 1; 428 429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 430 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 432 } 433 434 /* Data structure to hold the configuration info for a given core index */ 435 struct core_topology { 436 /* core complex id of the current core index */ 437 int ccx_id; 438 /* 439 * Adjusted core index for this core in the topology 440 * This can be 0,1,2,3 with max 4 cores in a core complex 441 */ 442 int core_id; 443 /* Node id for this core index */ 444 int node_id; 445 /* Number of nodes in this config */ 446 int num_nodes; 447 }; 448 449 /* 450 * Build the configuration closely match the EPYC hardware. Using the EPYC 451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE) 452 * right now. This could change in future. 453 * nr_cores : Total number of cores in the config 454 * core_id : Core index of the current CPU 455 * topo : Data structure to hold all the config info for this core index 456 */ 457 static void build_core_topology(int nr_cores, int core_id, 458 struct core_topology *topo) 459 { 460 int nodes, cores_in_ccx; 461 462 /* First get the number of nodes required */ 463 nodes = nodes_in_socket(nr_cores); 464 465 cores_in_ccx = cores_in_core_complex(nr_cores); 466 467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX); 468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx; 469 topo->core_id = core_id % cores_in_ccx; 470 topo->num_nodes = nodes; 471 } 472 473 /* Encode cache info for CPUID[8000001E] */ 474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu, 475 uint32_t *eax, uint32_t *ebx, 476 uint32_t *ecx, uint32_t *edx) 477 { 478 struct core_topology topo = {0}; 479 unsigned long nodes; 480 int shift; 481 482 build_core_topology(cs->nr_cores, cpu->core_id, &topo); 483 *eax = cpu->apic_id; 484 /* 485 * CPUID_Fn8000001E_EBX 486 * 31:16 Reserved 487 * 15:8 Threads per core (The number of threads per core is 488 * Threads per core + 1) 489 * 7:0 Core id (see bit decoding below) 490 * SMT: 491 * 4:3 node id 492 * 2 Core complex id 493 * 1:0 Core id 494 * Non SMT: 495 * 5:4 node id 496 * 3 Core complex id 497 * 1:0 Core id 498 */ 499 if (cs->nr_threads - 1) { 500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) | 501 (topo.ccx_id << 2) | topo.core_id; 502 } else { 503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id; 504 } 505 /* 506 * CPUID_Fn8000001E_ECX 507 * 31:11 Reserved 508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) 509 * 7:0 Node id (see bit decoding below) 510 * 2 Socket id 511 * 1:0 Node id 512 */ 513 if (topo.num_nodes <= 4) { 514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) | 515 topo.node_id; 516 } else { 517 /* 518 * Node id fix up. Actual hardware supports up to 4 nodes. But with 519 * more than 32 cores, we may end up with more than 4 nodes. 520 * Node id is a combination of socket id and node id. Only requirement 521 * here is that this number should be unique accross the system. 522 * Shift the socket id to accommodate more nodes. We dont expect both 523 * socket id and node id to be big number at the same time. This is not 524 * an ideal config but we need to to support it. Max nodes we can have 525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need 526 * 5 bits for nodes. Find the left most set bit to represent the total 527 * number of nodes. find_last_bit returns last set bit(0 based). Left 528 * shift(+1) the socket id to represent all the nodes. 529 */ 530 nodes = topo.num_nodes - 1; 531 shift = find_last_bit(&nodes, 8); 532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) | 533 topo.node_id; 534 } 535 *edx = 0; 536 } 537 538 /* 539 * Definitions of the hardcoded cache entries we expose: 540 * These are legacy cache values. If there is a need to change any 541 * of these values please use builtin_x86_defs 542 */ 543 544 /* L1 data cache: */ 545 static CPUCacheInfo legacy_l1d_cache = { 546 .type = DATA_CACHE, 547 .level = 1, 548 .size = 32 * KiB, 549 .self_init = 1, 550 .line_size = 64, 551 .associativity = 8, 552 .sets = 64, 553 .partitions = 1, 554 .no_invd_sharing = true, 555 }; 556 557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 558 static CPUCacheInfo legacy_l1d_cache_amd = { 559 .type = DATA_CACHE, 560 .level = 1, 561 .size = 64 * KiB, 562 .self_init = 1, 563 .line_size = 64, 564 .associativity = 2, 565 .sets = 512, 566 .partitions = 1, 567 .lines_per_tag = 1, 568 .no_invd_sharing = true, 569 }; 570 571 /* L1 instruction cache: */ 572 static CPUCacheInfo legacy_l1i_cache = { 573 .type = INSTRUCTION_CACHE, 574 .level = 1, 575 .size = 32 * KiB, 576 .self_init = 1, 577 .line_size = 64, 578 .associativity = 8, 579 .sets = 64, 580 .partitions = 1, 581 .no_invd_sharing = true, 582 }; 583 584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 585 static CPUCacheInfo legacy_l1i_cache_amd = { 586 .type = INSTRUCTION_CACHE, 587 .level = 1, 588 .size = 64 * KiB, 589 .self_init = 1, 590 .line_size = 64, 591 .associativity = 2, 592 .sets = 512, 593 .partitions = 1, 594 .lines_per_tag = 1, 595 .no_invd_sharing = true, 596 }; 597 598 /* Level 2 unified cache: */ 599 static CPUCacheInfo legacy_l2_cache = { 600 .type = UNIFIED_CACHE, 601 .level = 2, 602 .size = 4 * MiB, 603 .self_init = 1, 604 .line_size = 64, 605 .associativity = 16, 606 .sets = 4096, 607 .partitions = 1, 608 .no_invd_sharing = true, 609 }; 610 611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 612 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 613 .type = UNIFIED_CACHE, 614 .level = 2, 615 .size = 2 * MiB, 616 .line_size = 64, 617 .associativity = 8, 618 }; 619 620 621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 622 static CPUCacheInfo legacy_l2_cache_amd = { 623 .type = UNIFIED_CACHE, 624 .level = 2, 625 .size = 512 * KiB, 626 .line_size = 64, 627 .lines_per_tag = 1, 628 .associativity = 16, 629 .sets = 512, 630 .partitions = 1, 631 }; 632 633 /* Level 3 unified cache: */ 634 static CPUCacheInfo legacy_l3_cache = { 635 .type = UNIFIED_CACHE, 636 .level = 3, 637 .size = 16 * MiB, 638 .line_size = 64, 639 .associativity = 16, 640 .sets = 16384, 641 .partitions = 1, 642 .lines_per_tag = 1, 643 .self_init = true, 644 .inclusive = true, 645 .complex_indexing = true, 646 }; 647 648 /* TLB definitions: */ 649 650 #define L1_DTLB_2M_ASSOC 1 651 #define L1_DTLB_2M_ENTRIES 255 652 #define L1_DTLB_4K_ASSOC 1 653 #define L1_DTLB_4K_ENTRIES 255 654 655 #define L1_ITLB_2M_ASSOC 1 656 #define L1_ITLB_2M_ENTRIES 255 657 #define L1_ITLB_4K_ASSOC 1 658 #define L1_ITLB_4K_ENTRIES 255 659 660 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 662 #define L2_DTLB_4K_ASSOC 4 663 #define L2_DTLB_4K_ENTRIES 512 664 665 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 667 #define L2_ITLB_4K_ASSOC 4 668 #define L2_ITLB_4K_ENTRIES 512 669 670 /* CPUID Leaf 0x14 constants: */ 671 #define INTEL_PT_MAX_SUBLEAF 0x1 672 /* 673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 674 * MSR can be accessed; 675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 677 * of Intel PT MSRs across warm reset; 678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 679 */ 680 #define INTEL_PT_MINIMAL_EBX 0xf 681 /* 682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 684 * accessed; 685 * bit[01]: ToPA tables can hold any number of output entries, up to the 686 * maximum allowed by the MaskOrTableOffset field of 687 * IA32_RTIT_OUTPUT_MASK_PTRS; 688 * bit[02]: Support Single-Range Output scheme; 689 */ 690 #define INTEL_PT_MINIMAL_ECX 0x7 691 /* generated packets which contain IP payloads have LIP values */ 692 #define INTEL_PT_IP_LIP (1 << 31) 693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 698 699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 700 uint32_t vendor2, uint32_t vendor3) 701 { 702 int i; 703 for (i = 0; i < 4; i++) { 704 dst[i] = vendor1 >> (8 * i); 705 dst[i + 4] = vendor2 >> (8 * i); 706 dst[i + 8] = vendor3 >> (8 * i); 707 } 708 dst[CPUID_VENDOR_SZ] = '\0'; 709 } 710 711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 716 CPUID_PSE36 | CPUID_FXSR) 717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 721 CPUID_PAE | CPUID_SEP | CPUID_APIC) 722 723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 728 /* partly implemented: 729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 730 /* missing: 731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 737 CPUID_EXT_RDRAND) 738 /* missing: 739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 743 CPUID_EXT_F16C */ 744 745 #ifdef TARGET_X86_64 746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 747 #else 748 #define TCG_EXT2_X86_64_FEATURES 0 749 #endif 750 751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 754 TCG_EXT2_X86_64_FEATURES) 755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 757 #define TCG_EXT4_FEATURES 0 758 #define TCG_SVM_FEATURES CPUID_SVM_NPT 759 #define TCG_KVM_FEATURES 0 760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 764 CPUID_7_0_EBX_ERMS) 765 /* missing: 766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 768 CPUID_7_0_EBX_RDSEED */ 769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 771 CPUID_7_0_ECX_LA57) 772 #define TCG_7_0_EDX_FEATURES 0 773 #define TCG_7_1_EAX_FEATURES 0 774 #define TCG_APM_FEATURES 0 775 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 776 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 777 /* missing: 778 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 779 780 typedef enum FeatureWordType { 781 CPUID_FEATURE_WORD, 782 MSR_FEATURE_WORD, 783 } FeatureWordType; 784 785 typedef struct FeatureWordInfo { 786 FeatureWordType type; 787 /* feature flags names are taken from "Intel Processor Identification and 788 * the CPUID Instruction" and AMD's "CPUID Specification". 789 * In cases of disagreement between feature naming conventions, 790 * aliases may be added. 791 */ 792 const char *feat_names[64]; 793 union { 794 /* If type==CPUID_FEATURE_WORD */ 795 struct { 796 uint32_t eax; /* Input EAX for CPUID */ 797 bool needs_ecx; /* CPUID instruction uses ECX as input */ 798 uint32_t ecx; /* Input ECX value for CPUID */ 799 int reg; /* output register (R_* constant) */ 800 } cpuid; 801 /* If type==MSR_FEATURE_WORD */ 802 struct { 803 uint32_t index; 804 } msr; 805 }; 806 uint64_t tcg_features; /* Feature flags supported by TCG */ 807 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */ 808 uint64_t migratable_flags; /* Feature flags known to be migratable */ 809 /* Features that shouldn't be auto-enabled by "-cpu host" */ 810 uint64_t no_autoenable_flags; 811 } FeatureWordInfo; 812 813 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 814 [FEAT_1_EDX] = { 815 .type = CPUID_FEATURE_WORD, 816 .feat_names = { 817 "fpu", "vme", "de", "pse", 818 "tsc", "msr", "pae", "mce", 819 "cx8", "apic", NULL, "sep", 820 "mtrr", "pge", "mca", "cmov", 821 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 822 NULL, "ds" /* Intel dts */, "acpi", "mmx", 823 "fxsr", "sse", "sse2", "ss", 824 "ht" /* Intel htt */, "tm", "ia64", "pbe", 825 }, 826 .cpuid = {.eax = 1, .reg = R_EDX, }, 827 .tcg_features = TCG_FEATURES, 828 }, 829 [FEAT_1_ECX] = { 830 .type = CPUID_FEATURE_WORD, 831 .feat_names = { 832 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 833 "ds-cpl", "vmx", "smx", "est", 834 "tm2", "ssse3", "cid", NULL, 835 "fma", "cx16", "xtpr", "pdcm", 836 NULL, "pcid", "dca", "sse4.1", 837 "sse4.2", "x2apic", "movbe", "popcnt", 838 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 839 "avx", "f16c", "rdrand", "hypervisor", 840 }, 841 .cpuid = { .eax = 1, .reg = R_ECX, }, 842 .tcg_features = TCG_EXT_FEATURES, 843 }, 844 /* Feature names that are already defined on feature_name[] but 845 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 846 * names on feat_names below. They are copied automatically 847 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 848 */ 849 [FEAT_8000_0001_EDX] = { 850 .type = CPUID_FEATURE_WORD, 851 .feat_names = { 852 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 853 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 854 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 855 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 856 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 857 "nx", NULL, "mmxext", NULL /* mmx */, 858 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 859 NULL, "lm", "3dnowext", "3dnow", 860 }, 861 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 862 .tcg_features = TCG_EXT2_FEATURES, 863 }, 864 [FEAT_8000_0001_ECX] = { 865 .type = CPUID_FEATURE_WORD, 866 .feat_names = { 867 "lahf-lm", "cmp-legacy", "svm", "extapic", 868 "cr8legacy", "abm", "sse4a", "misalignsse", 869 "3dnowprefetch", "osvw", "ibs", "xop", 870 "skinit", "wdt", NULL, "lwp", 871 "fma4", "tce", NULL, "nodeid-msr", 872 NULL, "tbm", "topoext", "perfctr-core", 873 "perfctr-nb", NULL, NULL, NULL, 874 NULL, NULL, NULL, NULL, 875 }, 876 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 877 .tcg_features = TCG_EXT3_FEATURES, 878 /* 879 * TOPOEXT is always allowed but can't be enabled blindly by 880 * "-cpu host", as it requires consistent cache topology info 881 * to be provided so it doesn't confuse guests. 882 */ 883 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 884 }, 885 [FEAT_C000_0001_EDX] = { 886 .type = CPUID_FEATURE_WORD, 887 .feat_names = { 888 NULL, NULL, "xstore", "xstore-en", 889 NULL, NULL, "xcrypt", "xcrypt-en", 890 "ace2", "ace2-en", "phe", "phe-en", 891 "pmm", "pmm-en", NULL, NULL, 892 NULL, NULL, NULL, NULL, 893 NULL, NULL, NULL, NULL, 894 NULL, NULL, NULL, NULL, 895 NULL, NULL, NULL, NULL, 896 }, 897 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 898 .tcg_features = TCG_EXT4_FEATURES, 899 }, 900 [FEAT_KVM] = { 901 .type = CPUID_FEATURE_WORD, 902 .feat_names = { 903 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 904 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 905 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 906 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL, 907 NULL, NULL, NULL, NULL, 908 NULL, NULL, NULL, NULL, 909 "kvmclock-stable-bit", NULL, NULL, NULL, 910 NULL, NULL, NULL, NULL, 911 }, 912 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 913 .tcg_features = TCG_KVM_FEATURES, 914 }, 915 [FEAT_KVM_HINTS] = { 916 .type = CPUID_FEATURE_WORD, 917 .feat_names = { 918 "kvm-hint-dedicated", NULL, NULL, NULL, 919 NULL, NULL, NULL, NULL, 920 NULL, NULL, NULL, NULL, 921 NULL, NULL, NULL, NULL, 922 NULL, NULL, NULL, NULL, 923 NULL, NULL, NULL, NULL, 924 NULL, NULL, NULL, NULL, 925 NULL, NULL, NULL, NULL, 926 }, 927 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 928 .tcg_features = TCG_KVM_FEATURES, 929 /* 930 * KVM hints aren't auto-enabled by -cpu host, they need to be 931 * explicitly enabled in the command-line. 932 */ 933 .no_autoenable_flags = ~0U, 934 }, 935 /* 936 * .feat_names are commented out for Hyper-V enlightenments because we 937 * don't want to have two different ways for enabling them on QEMU command 938 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 939 * enabling several feature bits simultaneously, exposing these bits 940 * individually may just confuse guests. 941 */ 942 [FEAT_HYPERV_EAX] = { 943 .type = CPUID_FEATURE_WORD, 944 .feat_names = { 945 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 946 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 947 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 948 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 949 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 950 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 951 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 952 NULL, NULL, 953 NULL, NULL, NULL, NULL, 954 NULL, NULL, NULL, NULL, 955 NULL, NULL, NULL, NULL, 956 NULL, NULL, NULL, NULL, 957 }, 958 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 959 }, 960 [FEAT_HYPERV_EBX] = { 961 .type = CPUID_FEATURE_WORD, 962 .feat_names = { 963 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 964 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 965 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 966 NULL /* hv_create_port */, NULL /* hv_connect_port */, 967 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 968 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 969 NULL, NULL, 970 NULL, NULL, NULL, NULL, 971 NULL, NULL, NULL, NULL, 972 NULL, NULL, NULL, NULL, 973 NULL, NULL, NULL, NULL, 974 }, 975 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 976 }, 977 [FEAT_HYPERV_EDX] = { 978 .type = CPUID_FEATURE_WORD, 979 .feat_names = { 980 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 981 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 982 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 983 NULL, NULL, 984 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 985 NULL, NULL, NULL, NULL, 986 NULL, NULL, NULL, NULL, 987 NULL, NULL, NULL, NULL, 988 NULL, NULL, NULL, NULL, 989 NULL, NULL, NULL, NULL, 990 }, 991 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 992 }, 993 [FEAT_HV_RECOMM_EAX] = { 994 .type = CPUID_FEATURE_WORD, 995 .feat_names = { 996 NULL /* hv_recommend_pv_as_switch */, 997 NULL /* hv_recommend_pv_tlbflush_local */, 998 NULL /* hv_recommend_pv_tlbflush_remote */, 999 NULL /* hv_recommend_msr_apic_access */, 1000 NULL /* hv_recommend_msr_reset */, 1001 NULL /* hv_recommend_relaxed_timing */, 1002 NULL /* hv_recommend_dma_remapping */, 1003 NULL /* hv_recommend_int_remapping */, 1004 NULL /* hv_recommend_x2apic_msrs */, 1005 NULL /* hv_recommend_autoeoi_deprecation */, 1006 NULL /* hv_recommend_pv_ipi */, 1007 NULL /* hv_recommend_ex_hypercalls */, 1008 NULL /* hv_hypervisor_is_nested */, 1009 NULL /* hv_recommend_int_mbec */, 1010 NULL /* hv_recommend_evmcs */, 1011 NULL, 1012 NULL, NULL, NULL, NULL, 1013 NULL, NULL, NULL, NULL, 1014 NULL, NULL, NULL, NULL, 1015 NULL, NULL, NULL, NULL, 1016 }, 1017 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 1018 }, 1019 [FEAT_HV_NESTED_EAX] = { 1020 .type = CPUID_FEATURE_WORD, 1021 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 1022 }, 1023 [FEAT_SVM] = { 1024 .type = CPUID_FEATURE_WORD, 1025 .feat_names = { 1026 "npt", "lbrv", "svm-lock", "nrip-save", 1027 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 1028 NULL, NULL, "pause-filter", NULL, 1029 "pfthreshold", NULL, NULL, NULL, 1030 NULL, NULL, NULL, NULL, 1031 NULL, NULL, NULL, NULL, 1032 NULL, NULL, NULL, NULL, 1033 NULL, NULL, NULL, NULL, 1034 }, 1035 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 1036 .tcg_features = TCG_SVM_FEATURES, 1037 }, 1038 [FEAT_7_0_EBX] = { 1039 .type = CPUID_FEATURE_WORD, 1040 .feat_names = { 1041 "fsgsbase", "tsc-adjust", NULL, "bmi1", 1042 "hle", "avx2", NULL, "smep", 1043 "bmi2", "erms", "invpcid", "rtm", 1044 NULL, NULL, "mpx", NULL, 1045 "avx512f", "avx512dq", "rdseed", "adx", 1046 "smap", "avx512ifma", "pcommit", "clflushopt", 1047 "clwb", "intel-pt", "avx512pf", "avx512er", 1048 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 1049 }, 1050 .cpuid = { 1051 .eax = 7, 1052 .needs_ecx = true, .ecx = 0, 1053 .reg = R_EBX, 1054 }, 1055 .tcg_features = TCG_7_0_EBX_FEATURES, 1056 }, 1057 [FEAT_7_0_ECX] = { 1058 .type = CPUID_FEATURE_WORD, 1059 .feat_names = { 1060 NULL, "avx512vbmi", "umip", "pku", 1061 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL, 1062 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 1063 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 1064 "la57", NULL, NULL, NULL, 1065 NULL, NULL, "rdpid", NULL, 1066 NULL, "cldemote", NULL, "movdiri", 1067 "movdir64b", NULL, NULL, NULL, 1068 }, 1069 .cpuid = { 1070 .eax = 7, 1071 .needs_ecx = true, .ecx = 0, 1072 .reg = R_ECX, 1073 }, 1074 .tcg_features = TCG_7_0_ECX_FEATURES, 1075 }, 1076 [FEAT_7_0_EDX] = { 1077 .type = CPUID_FEATURE_WORD, 1078 .feat_names = { 1079 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 1080 NULL, NULL, NULL, NULL, 1081 NULL, NULL, "md-clear", NULL, 1082 NULL, NULL, NULL, NULL, 1083 NULL, NULL, NULL /* pconfig */, NULL, 1084 NULL, NULL, NULL, NULL, 1085 NULL, NULL, "spec-ctrl", "stibp", 1086 NULL, "arch-capabilities", "core-capability", "ssbd", 1087 }, 1088 .cpuid = { 1089 .eax = 7, 1090 .needs_ecx = true, .ecx = 0, 1091 .reg = R_EDX, 1092 }, 1093 .tcg_features = TCG_7_0_EDX_FEATURES, 1094 }, 1095 [FEAT_7_1_EAX] = { 1096 .type = CPUID_FEATURE_WORD, 1097 .feat_names = { 1098 NULL, NULL, NULL, NULL, 1099 NULL, "avx512-bf16", NULL, NULL, 1100 NULL, NULL, NULL, NULL, 1101 NULL, NULL, NULL, NULL, 1102 NULL, NULL, NULL, NULL, 1103 NULL, NULL, NULL, NULL, 1104 NULL, NULL, NULL, NULL, 1105 NULL, NULL, NULL, NULL, 1106 }, 1107 .cpuid = { 1108 .eax = 7, 1109 .needs_ecx = true, .ecx = 1, 1110 .reg = R_EAX, 1111 }, 1112 .tcg_features = TCG_7_1_EAX_FEATURES, 1113 }, 1114 [FEAT_8000_0007_EDX] = { 1115 .type = CPUID_FEATURE_WORD, 1116 .feat_names = { 1117 NULL, NULL, NULL, NULL, 1118 NULL, NULL, NULL, NULL, 1119 "invtsc", NULL, NULL, NULL, 1120 NULL, NULL, NULL, NULL, 1121 NULL, NULL, NULL, NULL, 1122 NULL, NULL, NULL, NULL, 1123 NULL, NULL, NULL, NULL, 1124 NULL, NULL, NULL, NULL, 1125 }, 1126 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1127 .tcg_features = TCG_APM_FEATURES, 1128 .unmigratable_flags = CPUID_APM_INVTSC, 1129 }, 1130 [FEAT_8000_0008_EBX] = { 1131 .type = CPUID_FEATURE_WORD, 1132 .feat_names = { 1133 "clzero", NULL, "xsaveerptr", NULL, 1134 NULL, NULL, NULL, NULL, 1135 NULL, "wbnoinvd", NULL, NULL, 1136 "ibpb", NULL, NULL, NULL, 1137 NULL, NULL, NULL, NULL, 1138 NULL, NULL, NULL, NULL, 1139 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1140 NULL, NULL, NULL, NULL, 1141 }, 1142 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1143 .tcg_features = 0, 1144 .unmigratable_flags = 0, 1145 }, 1146 [FEAT_XSAVE] = { 1147 .type = CPUID_FEATURE_WORD, 1148 .feat_names = { 1149 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1150 NULL, NULL, NULL, NULL, 1151 NULL, NULL, NULL, NULL, 1152 NULL, NULL, NULL, NULL, 1153 NULL, NULL, NULL, NULL, 1154 NULL, NULL, NULL, NULL, 1155 NULL, NULL, NULL, NULL, 1156 NULL, NULL, NULL, NULL, 1157 }, 1158 .cpuid = { 1159 .eax = 0xd, 1160 .needs_ecx = true, .ecx = 1, 1161 .reg = R_EAX, 1162 }, 1163 .tcg_features = TCG_XSAVE_FEATURES, 1164 }, 1165 [FEAT_6_EAX] = { 1166 .type = CPUID_FEATURE_WORD, 1167 .feat_names = { 1168 NULL, NULL, "arat", NULL, 1169 NULL, NULL, NULL, NULL, 1170 NULL, NULL, NULL, NULL, 1171 NULL, NULL, NULL, NULL, 1172 NULL, NULL, NULL, NULL, 1173 NULL, NULL, NULL, NULL, 1174 NULL, NULL, NULL, NULL, 1175 NULL, NULL, NULL, NULL, 1176 }, 1177 .cpuid = { .eax = 6, .reg = R_EAX, }, 1178 .tcg_features = TCG_6_EAX_FEATURES, 1179 }, 1180 [FEAT_XSAVE_COMP_LO] = { 1181 .type = CPUID_FEATURE_WORD, 1182 .cpuid = { 1183 .eax = 0xD, 1184 .needs_ecx = true, .ecx = 0, 1185 .reg = R_EAX, 1186 }, 1187 .tcg_features = ~0U, 1188 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1189 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1190 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1191 XSTATE_PKRU_MASK, 1192 }, 1193 [FEAT_XSAVE_COMP_HI] = { 1194 .type = CPUID_FEATURE_WORD, 1195 .cpuid = { 1196 .eax = 0xD, 1197 .needs_ecx = true, .ecx = 0, 1198 .reg = R_EDX, 1199 }, 1200 .tcg_features = ~0U, 1201 }, 1202 /*Below are MSR exposed features*/ 1203 [FEAT_ARCH_CAPABILITIES] = { 1204 .type = MSR_FEATURE_WORD, 1205 .feat_names = { 1206 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1207 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", 1208 "taa-no", NULL, NULL, NULL, 1209 NULL, NULL, NULL, NULL, 1210 NULL, NULL, NULL, NULL, 1211 NULL, NULL, NULL, NULL, 1212 NULL, NULL, NULL, NULL, 1213 NULL, NULL, NULL, NULL, 1214 }, 1215 .msr = { 1216 .index = MSR_IA32_ARCH_CAPABILITIES, 1217 }, 1218 }, 1219 [FEAT_CORE_CAPABILITY] = { 1220 .type = MSR_FEATURE_WORD, 1221 .feat_names = { 1222 NULL, NULL, NULL, NULL, 1223 NULL, "split-lock-detect", NULL, NULL, 1224 NULL, NULL, NULL, NULL, 1225 NULL, NULL, NULL, NULL, 1226 NULL, NULL, NULL, NULL, 1227 NULL, NULL, NULL, NULL, 1228 NULL, NULL, NULL, NULL, 1229 NULL, NULL, NULL, NULL, 1230 }, 1231 .msr = { 1232 .index = MSR_IA32_CORE_CAPABILITY, 1233 }, 1234 }, 1235 1236 [FEAT_VMX_PROCBASED_CTLS] = { 1237 .type = MSR_FEATURE_WORD, 1238 .feat_names = { 1239 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset", 1240 NULL, NULL, NULL, "vmx-hlt-exit", 1241 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit", 1242 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit", 1243 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit", 1244 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit", 1245 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf", 1246 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls", 1247 }, 1248 .msr = { 1249 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1250 } 1251 }, 1252 1253 [FEAT_VMX_SECONDARY_CTLS] = { 1254 .type = MSR_FEATURE_WORD, 1255 .feat_names = { 1256 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit", 1257 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest", 1258 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit", 1259 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit", 1260 "vmx-rdseed-exit", "vmx-pml", NULL, NULL, 1261 "vmx-xsaves", NULL, NULL, NULL, 1262 NULL, NULL, NULL, NULL, 1263 NULL, NULL, NULL, NULL, 1264 }, 1265 .msr = { 1266 .index = MSR_IA32_VMX_PROCBASED_CTLS2, 1267 } 1268 }, 1269 1270 [FEAT_VMX_PINBASED_CTLS] = { 1271 .type = MSR_FEATURE_WORD, 1272 .feat_names = { 1273 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit", 1274 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr", 1275 NULL, NULL, NULL, NULL, 1276 NULL, NULL, NULL, NULL, 1277 NULL, NULL, NULL, NULL, 1278 NULL, NULL, NULL, NULL, 1279 NULL, NULL, NULL, NULL, 1280 NULL, NULL, NULL, NULL, 1281 }, 1282 .msr = { 1283 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1284 } 1285 }, 1286 1287 [FEAT_VMX_EXIT_CTLS] = { 1288 .type = MSR_FEATURE_WORD, 1289 /* 1290 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from 1291 * the LM CPUID bit. 1292 */ 1293 .feat_names = { 1294 NULL, NULL, "vmx-exit-nosave-debugctl", NULL, 1295 NULL, NULL, NULL, NULL, 1296 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL, 1297 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr", 1298 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat", 1299 "vmx-exit-save-efer", "vmx-exit-load-efer", 1300 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", 1301 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, 1302 NULL, NULL, NULL, NULL, 1303 }, 1304 .msr = { 1305 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, 1306 } 1307 }, 1308 1309 [FEAT_VMX_ENTRY_CTLS] = { 1310 .type = MSR_FEATURE_WORD, 1311 .feat_names = { 1312 NULL, NULL, "vmx-entry-noload-debugctl", NULL, 1313 NULL, NULL, NULL, NULL, 1314 NULL, "vmx-entry-ia32e-mode", NULL, NULL, 1315 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", 1316 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, 1317 NULL, NULL, NULL, NULL, 1318 NULL, NULL, NULL, NULL, 1319 NULL, NULL, NULL, NULL, 1320 }, 1321 .msr = { 1322 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1323 } 1324 }, 1325 1326 [FEAT_VMX_MISC] = { 1327 .type = MSR_FEATURE_WORD, 1328 .feat_names = { 1329 NULL, NULL, NULL, NULL, 1330 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown", 1331 "vmx-activity-wait-sipi", NULL, NULL, NULL, 1332 NULL, NULL, NULL, NULL, 1333 NULL, NULL, NULL, NULL, 1334 NULL, NULL, NULL, NULL, 1335 NULL, NULL, NULL, NULL, 1336 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL, 1337 }, 1338 .msr = { 1339 .index = MSR_IA32_VMX_MISC, 1340 } 1341 }, 1342 1343 [FEAT_VMX_EPT_VPID_CAPS] = { 1344 .type = MSR_FEATURE_WORD, 1345 .feat_names = { 1346 "vmx-ept-execonly", NULL, NULL, NULL, 1347 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5", 1348 NULL, NULL, NULL, NULL, 1349 NULL, NULL, NULL, NULL, 1350 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL, 1351 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL, 1352 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL, 1353 NULL, NULL, NULL, NULL, 1354 "vmx-invvpid", NULL, NULL, NULL, 1355 NULL, NULL, NULL, NULL, 1356 "vmx-invvpid-single-addr", "vmx-invept-single-context", 1357 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals", 1358 NULL, NULL, NULL, NULL, 1359 NULL, NULL, NULL, NULL, 1360 NULL, NULL, NULL, NULL, 1361 NULL, NULL, NULL, NULL, 1362 NULL, NULL, NULL, NULL, 1363 }, 1364 .msr = { 1365 .index = MSR_IA32_VMX_EPT_VPID_CAP, 1366 } 1367 }, 1368 1369 [FEAT_VMX_BASIC] = { 1370 .type = MSR_FEATURE_WORD, 1371 .feat_names = { 1372 [54] = "vmx-ins-outs", 1373 [55] = "vmx-true-ctls", 1374 }, 1375 .msr = { 1376 .index = MSR_IA32_VMX_BASIC, 1377 }, 1378 /* Just to be safe - we don't support setting the MSEG version field. */ 1379 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR, 1380 }, 1381 1382 [FEAT_VMX_VMFUNC] = { 1383 .type = MSR_FEATURE_WORD, 1384 .feat_names = { 1385 [0] = "vmx-eptp-switching", 1386 }, 1387 .msr = { 1388 .index = MSR_IA32_VMX_VMFUNC, 1389 } 1390 }, 1391 1392 }; 1393 1394 typedef struct FeatureMask { 1395 FeatureWord index; 1396 uint64_t mask; 1397 } FeatureMask; 1398 1399 typedef struct FeatureDep { 1400 FeatureMask from, to; 1401 } FeatureDep; 1402 1403 static FeatureDep feature_dependencies[] = { 1404 { 1405 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES }, 1406 .to = { FEAT_ARCH_CAPABILITIES, ~0ull }, 1407 }, 1408 { 1409 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY }, 1410 .to = { FEAT_CORE_CAPABILITY, ~0ull }, 1411 }, 1412 { 1413 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1414 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull }, 1415 }, 1416 { 1417 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1418 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull }, 1419 }, 1420 { 1421 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1422 .to = { FEAT_VMX_EXIT_CTLS, ~0ull }, 1423 }, 1424 { 1425 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1426 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull }, 1427 }, 1428 { 1429 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1430 .to = { FEAT_VMX_MISC, ~0ull }, 1431 }, 1432 { 1433 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1434 .to = { FEAT_VMX_BASIC, ~0ull }, 1435 }, 1436 { 1437 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM }, 1438 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE }, 1439 }, 1440 { 1441 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS }, 1442 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull }, 1443 }, 1444 { 1445 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES }, 1446 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES }, 1447 }, 1448 { 1449 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND }, 1450 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING }, 1451 }, 1452 { 1453 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID }, 1454 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID }, 1455 }, 1456 { 1457 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED }, 1458 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING }, 1459 }, 1460 { 1461 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP }, 1462 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP }, 1463 }, 1464 { 1465 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1466 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull }, 1467 }, 1468 { 1469 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1470 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST }, 1471 }, 1472 { 1473 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID }, 1474 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 }, 1475 }, 1476 { 1477 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC }, 1478 .to = { FEAT_VMX_VMFUNC, ~0ull }, 1479 }, 1480 }; 1481 1482 typedef struct X86RegisterInfo32 { 1483 /* Name of register */ 1484 const char *name; 1485 /* QAPI enum value register */ 1486 X86CPURegister32 qapi_enum; 1487 } X86RegisterInfo32; 1488 1489 #define REGISTER(reg) \ 1490 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1491 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1492 REGISTER(EAX), 1493 REGISTER(ECX), 1494 REGISTER(EDX), 1495 REGISTER(EBX), 1496 REGISTER(ESP), 1497 REGISTER(EBP), 1498 REGISTER(ESI), 1499 REGISTER(EDI), 1500 }; 1501 #undef REGISTER 1502 1503 typedef struct ExtSaveArea { 1504 uint32_t feature, bits; 1505 uint32_t offset, size; 1506 } ExtSaveArea; 1507 1508 static const ExtSaveArea x86_ext_save_areas[] = { 1509 [XSTATE_FP_BIT] = { 1510 /* x87 FP state component is always enabled if XSAVE is supported */ 1511 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1512 /* x87 state is in the legacy region of the XSAVE area */ 1513 .offset = 0, 1514 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1515 }, 1516 [XSTATE_SSE_BIT] = { 1517 /* SSE state component is always enabled if XSAVE is supported */ 1518 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1519 /* SSE state is in the legacy region of the XSAVE area */ 1520 .offset = 0, 1521 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1522 }, 1523 [XSTATE_YMM_BIT] = 1524 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1525 .offset = offsetof(X86XSaveArea, avx_state), 1526 .size = sizeof(XSaveAVX) }, 1527 [XSTATE_BNDREGS_BIT] = 1528 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1529 .offset = offsetof(X86XSaveArea, bndreg_state), 1530 .size = sizeof(XSaveBNDREG) }, 1531 [XSTATE_BNDCSR_BIT] = 1532 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1533 .offset = offsetof(X86XSaveArea, bndcsr_state), 1534 .size = sizeof(XSaveBNDCSR) }, 1535 [XSTATE_OPMASK_BIT] = 1536 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1537 .offset = offsetof(X86XSaveArea, opmask_state), 1538 .size = sizeof(XSaveOpmask) }, 1539 [XSTATE_ZMM_Hi256_BIT] = 1540 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1541 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1542 .size = sizeof(XSaveZMM_Hi256) }, 1543 [XSTATE_Hi16_ZMM_BIT] = 1544 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1545 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1546 .size = sizeof(XSaveHi16_ZMM) }, 1547 [XSTATE_PKRU_BIT] = 1548 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1549 .offset = offsetof(X86XSaveArea, pkru_state), 1550 .size = sizeof(XSavePKRU) }, 1551 }; 1552 1553 static uint32_t xsave_area_size(uint64_t mask) 1554 { 1555 int i; 1556 uint64_t ret = 0; 1557 1558 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1559 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1560 if ((mask >> i) & 1) { 1561 ret = MAX(ret, esa->offset + esa->size); 1562 } 1563 } 1564 return ret; 1565 } 1566 1567 static inline bool accel_uses_host_cpuid(void) 1568 { 1569 return kvm_enabled() || hvf_enabled(); 1570 } 1571 1572 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1573 { 1574 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1575 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1576 } 1577 1578 const char *get_register_name_32(unsigned int reg) 1579 { 1580 if (reg >= CPU_NB_REGS32) { 1581 return NULL; 1582 } 1583 return x86_reg_info_32[reg].name; 1584 } 1585 1586 /* 1587 * Returns the set of feature flags that are supported and migratable by 1588 * QEMU, for a given FeatureWord. 1589 */ 1590 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) 1591 { 1592 FeatureWordInfo *wi = &feature_word_info[w]; 1593 uint64_t r = 0; 1594 int i; 1595 1596 for (i = 0; i < 64; i++) { 1597 uint64_t f = 1ULL << i; 1598 1599 /* If the feature name is known, it is implicitly considered migratable, 1600 * unless it is explicitly set in unmigratable_flags */ 1601 if ((wi->migratable_flags & f) || 1602 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1603 r |= f; 1604 } 1605 } 1606 return r; 1607 } 1608 1609 void host_cpuid(uint32_t function, uint32_t count, 1610 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1611 { 1612 uint32_t vec[4]; 1613 1614 #ifdef __x86_64__ 1615 asm volatile("cpuid" 1616 : "=a"(vec[0]), "=b"(vec[1]), 1617 "=c"(vec[2]), "=d"(vec[3]) 1618 : "0"(function), "c"(count) : "cc"); 1619 #elif defined(__i386__) 1620 asm volatile("pusha \n\t" 1621 "cpuid \n\t" 1622 "mov %%eax, 0(%2) \n\t" 1623 "mov %%ebx, 4(%2) \n\t" 1624 "mov %%ecx, 8(%2) \n\t" 1625 "mov %%edx, 12(%2) \n\t" 1626 "popa" 1627 : : "a"(function), "c"(count), "S"(vec) 1628 : "memory", "cc"); 1629 #else 1630 abort(); 1631 #endif 1632 1633 if (eax) 1634 *eax = vec[0]; 1635 if (ebx) 1636 *ebx = vec[1]; 1637 if (ecx) 1638 *ecx = vec[2]; 1639 if (edx) 1640 *edx = vec[3]; 1641 } 1642 1643 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1644 { 1645 uint32_t eax, ebx, ecx, edx; 1646 1647 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1648 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1649 1650 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1651 if (family) { 1652 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1653 } 1654 if (model) { 1655 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1656 } 1657 if (stepping) { 1658 *stepping = eax & 0x0F; 1659 } 1660 } 1661 1662 /* CPU class name definitions: */ 1663 1664 /* Return type name for a given CPU model name 1665 * Caller is responsible for freeing the returned string. 1666 */ 1667 static char *x86_cpu_type_name(const char *model_name) 1668 { 1669 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1670 } 1671 1672 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1673 { 1674 ObjectClass *oc; 1675 char *typename = x86_cpu_type_name(cpu_model); 1676 oc = object_class_by_name(typename); 1677 g_free(typename); 1678 return oc; 1679 } 1680 1681 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1682 { 1683 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1684 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1685 return g_strndup(class_name, 1686 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1687 } 1688 1689 typedef struct PropValue { 1690 const char *prop, *value; 1691 } PropValue; 1692 1693 typedef struct X86CPUVersionDefinition { 1694 X86CPUVersion version; 1695 const char *alias; 1696 PropValue *props; 1697 } X86CPUVersionDefinition; 1698 1699 /* Base definition for a CPU model */ 1700 typedef struct X86CPUDefinition { 1701 const char *name; 1702 uint32_t level; 1703 uint32_t xlevel; 1704 /* vendor is zero-terminated, 12 character ASCII string */ 1705 char vendor[CPUID_VENDOR_SZ + 1]; 1706 int family; 1707 int model; 1708 int stepping; 1709 FeatureWordArray features; 1710 const char *model_id; 1711 CPUCaches *cache_info; 1712 /* 1713 * Definitions for alternative versions of CPU model. 1714 * List is terminated by item with version == 0. 1715 * If NULL, version 1 will be registered automatically. 1716 */ 1717 const X86CPUVersionDefinition *versions; 1718 } X86CPUDefinition; 1719 1720 /* Reference to a specific CPU model version */ 1721 struct X86CPUModel { 1722 /* Base CPU definition */ 1723 X86CPUDefinition *cpudef; 1724 /* CPU model version */ 1725 X86CPUVersion version; 1726 /* 1727 * If true, this is an alias CPU model. 1728 * This matters only for "-cpu help" and query-cpu-definitions 1729 */ 1730 bool is_alias; 1731 }; 1732 1733 /* Get full model name for CPU version */ 1734 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, 1735 X86CPUVersion version) 1736 { 1737 assert(version > 0); 1738 return g_strdup_printf("%s-v%d", cpudef->name, (int)version); 1739 } 1740 1741 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) 1742 { 1743 /* When X86CPUDefinition::versions is NULL, we register only v1 */ 1744 static const X86CPUVersionDefinition default_version_list[] = { 1745 { 1 }, 1746 { /* end of list */ } 1747 }; 1748 1749 return def->versions ?: default_version_list; 1750 } 1751 1752 static CPUCaches epyc_cache_info = { 1753 .l1d_cache = &(CPUCacheInfo) { 1754 .type = DATA_CACHE, 1755 .level = 1, 1756 .size = 32 * KiB, 1757 .line_size = 64, 1758 .associativity = 8, 1759 .partitions = 1, 1760 .sets = 64, 1761 .lines_per_tag = 1, 1762 .self_init = 1, 1763 .no_invd_sharing = true, 1764 }, 1765 .l1i_cache = &(CPUCacheInfo) { 1766 .type = INSTRUCTION_CACHE, 1767 .level = 1, 1768 .size = 64 * KiB, 1769 .line_size = 64, 1770 .associativity = 4, 1771 .partitions = 1, 1772 .sets = 256, 1773 .lines_per_tag = 1, 1774 .self_init = 1, 1775 .no_invd_sharing = true, 1776 }, 1777 .l2_cache = &(CPUCacheInfo) { 1778 .type = UNIFIED_CACHE, 1779 .level = 2, 1780 .size = 512 * KiB, 1781 .line_size = 64, 1782 .associativity = 8, 1783 .partitions = 1, 1784 .sets = 1024, 1785 .lines_per_tag = 1, 1786 }, 1787 .l3_cache = &(CPUCacheInfo) { 1788 .type = UNIFIED_CACHE, 1789 .level = 3, 1790 .size = 8 * MiB, 1791 .line_size = 64, 1792 .associativity = 16, 1793 .partitions = 1, 1794 .sets = 8192, 1795 .lines_per_tag = 1, 1796 .self_init = true, 1797 .inclusive = true, 1798 .complex_indexing = true, 1799 }, 1800 }; 1801 1802 /* The following VMX features are not supported by KVM and are left out in the 1803 * CPU definitions: 1804 * 1805 * Dual-monitor support (all processors) 1806 * Entry to SMM 1807 * Deactivate dual-monitor treatment 1808 * Number of CR3-target values 1809 * Shutdown activity state 1810 * Wait-for-SIPI activity state 1811 * PAUSE-loop exiting (Westmere and newer) 1812 * EPT-violation #VE (Broadwell and newer) 1813 * Inject event with insn length=0 (Skylake and newer) 1814 * Conceal non-root operation from PT 1815 * Conceal VM exits from PT 1816 * Conceal VM entries from PT 1817 * Enable ENCLS exiting 1818 * Mode-based execute control (XS/XU) 1819 s TSC scaling (Skylake Server and newer) 1820 * GPA translation for PT (IceLake and newer) 1821 * User wait and pause 1822 * ENCLV exiting 1823 * Load IA32_RTIT_CTL 1824 * Clear IA32_RTIT_CTL 1825 * Advanced VM-exit information for EPT violations 1826 * Sub-page write permissions 1827 * PT in VMX operation 1828 */ 1829 1830 static X86CPUDefinition builtin_x86_defs[] = { 1831 { 1832 .name = "qemu64", 1833 .level = 0xd, 1834 .vendor = CPUID_VENDOR_AMD, 1835 .family = 6, 1836 .model = 6, 1837 .stepping = 3, 1838 .features[FEAT_1_EDX] = 1839 PPRO_FEATURES | 1840 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1841 CPUID_PSE36, 1842 .features[FEAT_1_ECX] = 1843 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1844 .features[FEAT_8000_0001_EDX] = 1845 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1846 .features[FEAT_8000_0001_ECX] = 1847 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1848 .xlevel = 0x8000000A, 1849 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1850 }, 1851 { 1852 .name = "phenom", 1853 .level = 5, 1854 .vendor = CPUID_VENDOR_AMD, 1855 .family = 16, 1856 .model = 2, 1857 .stepping = 3, 1858 /* Missing: CPUID_HT */ 1859 .features[FEAT_1_EDX] = 1860 PPRO_FEATURES | 1861 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1862 CPUID_PSE36 | CPUID_VME, 1863 .features[FEAT_1_ECX] = 1864 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1865 CPUID_EXT_POPCNT, 1866 .features[FEAT_8000_0001_EDX] = 1867 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1868 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1869 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1870 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1871 CPUID_EXT3_CR8LEG, 1872 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1873 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1874 .features[FEAT_8000_0001_ECX] = 1875 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1876 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1877 /* Missing: CPUID_SVM_LBRV */ 1878 .features[FEAT_SVM] = 1879 CPUID_SVM_NPT, 1880 .xlevel = 0x8000001A, 1881 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1882 }, 1883 { 1884 .name = "core2duo", 1885 .level = 10, 1886 .vendor = CPUID_VENDOR_INTEL, 1887 .family = 6, 1888 .model = 15, 1889 .stepping = 11, 1890 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1891 .features[FEAT_1_EDX] = 1892 PPRO_FEATURES | 1893 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1894 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1895 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1896 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1897 .features[FEAT_1_ECX] = 1898 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1899 CPUID_EXT_CX16, 1900 .features[FEAT_8000_0001_EDX] = 1901 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1902 .features[FEAT_8000_0001_ECX] = 1903 CPUID_EXT3_LAHF_LM, 1904 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 1905 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1906 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1907 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1908 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1909 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 1910 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1911 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1912 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1913 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1914 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1915 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1916 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1917 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 1918 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 1919 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 1920 .features[FEAT_VMX_SECONDARY_CTLS] = 1921 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 1922 .xlevel = 0x80000008, 1923 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1924 }, 1925 { 1926 .name = "kvm64", 1927 .level = 0xd, 1928 .vendor = CPUID_VENDOR_INTEL, 1929 .family = 15, 1930 .model = 6, 1931 .stepping = 1, 1932 /* Missing: CPUID_HT */ 1933 .features[FEAT_1_EDX] = 1934 PPRO_FEATURES | CPUID_VME | 1935 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1936 CPUID_PSE36, 1937 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1938 .features[FEAT_1_ECX] = 1939 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1940 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1941 .features[FEAT_8000_0001_EDX] = 1942 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1943 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1944 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1945 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1946 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1947 .features[FEAT_8000_0001_ECX] = 1948 0, 1949 /* VMX features from Cedar Mill/Prescott */ 1950 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1951 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1952 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1953 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1954 VMX_PIN_BASED_NMI_EXITING, 1955 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1956 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1957 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1958 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1959 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1960 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1961 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1962 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING, 1963 .xlevel = 0x80000008, 1964 .model_id = "Common KVM processor" 1965 }, 1966 { 1967 .name = "qemu32", 1968 .level = 4, 1969 .vendor = CPUID_VENDOR_INTEL, 1970 .family = 6, 1971 .model = 6, 1972 .stepping = 3, 1973 .features[FEAT_1_EDX] = 1974 PPRO_FEATURES, 1975 .features[FEAT_1_ECX] = 1976 CPUID_EXT_SSE3, 1977 .xlevel = 0x80000004, 1978 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1979 }, 1980 { 1981 .name = "kvm32", 1982 .level = 5, 1983 .vendor = CPUID_VENDOR_INTEL, 1984 .family = 15, 1985 .model = 6, 1986 .stepping = 1, 1987 .features[FEAT_1_EDX] = 1988 PPRO_FEATURES | CPUID_VME | 1989 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1990 .features[FEAT_1_ECX] = 1991 CPUID_EXT_SSE3, 1992 .features[FEAT_8000_0001_ECX] = 1993 0, 1994 /* VMX features from Yonah */ 1995 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1996 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1997 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1998 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1999 VMX_PIN_BASED_NMI_EXITING, 2000 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2001 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2002 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2003 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2004 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2005 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2006 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2007 .xlevel = 0x80000008, 2008 .model_id = "Common 32-bit KVM processor" 2009 }, 2010 { 2011 .name = "coreduo", 2012 .level = 10, 2013 .vendor = CPUID_VENDOR_INTEL, 2014 .family = 6, 2015 .model = 14, 2016 .stepping = 8, 2017 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2018 .features[FEAT_1_EDX] = 2019 PPRO_FEATURES | CPUID_VME | 2020 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 2021 CPUID_SS, 2022 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 2023 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 2024 .features[FEAT_1_ECX] = 2025 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 2026 .features[FEAT_8000_0001_EDX] = 2027 CPUID_EXT2_NX, 2028 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2029 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2030 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2031 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2032 VMX_PIN_BASED_NMI_EXITING, 2033 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2034 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2035 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2036 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2037 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2038 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2039 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2040 .xlevel = 0x80000008, 2041 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 2042 }, 2043 { 2044 .name = "486", 2045 .level = 1, 2046 .vendor = CPUID_VENDOR_INTEL, 2047 .family = 4, 2048 .model = 8, 2049 .stepping = 0, 2050 .features[FEAT_1_EDX] = 2051 I486_FEATURES, 2052 .xlevel = 0, 2053 .model_id = "", 2054 }, 2055 { 2056 .name = "pentium", 2057 .level = 1, 2058 .vendor = CPUID_VENDOR_INTEL, 2059 .family = 5, 2060 .model = 4, 2061 .stepping = 3, 2062 .features[FEAT_1_EDX] = 2063 PENTIUM_FEATURES, 2064 .xlevel = 0, 2065 .model_id = "", 2066 }, 2067 { 2068 .name = "pentium2", 2069 .level = 2, 2070 .vendor = CPUID_VENDOR_INTEL, 2071 .family = 6, 2072 .model = 5, 2073 .stepping = 2, 2074 .features[FEAT_1_EDX] = 2075 PENTIUM2_FEATURES, 2076 .xlevel = 0, 2077 .model_id = "", 2078 }, 2079 { 2080 .name = "pentium3", 2081 .level = 3, 2082 .vendor = CPUID_VENDOR_INTEL, 2083 .family = 6, 2084 .model = 7, 2085 .stepping = 3, 2086 .features[FEAT_1_EDX] = 2087 PENTIUM3_FEATURES, 2088 .xlevel = 0, 2089 .model_id = "", 2090 }, 2091 { 2092 .name = "athlon", 2093 .level = 2, 2094 .vendor = CPUID_VENDOR_AMD, 2095 .family = 6, 2096 .model = 2, 2097 .stepping = 3, 2098 .features[FEAT_1_EDX] = 2099 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 2100 CPUID_MCA, 2101 .features[FEAT_8000_0001_EDX] = 2102 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 2103 .xlevel = 0x80000008, 2104 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 2105 }, 2106 { 2107 .name = "n270", 2108 .level = 10, 2109 .vendor = CPUID_VENDOR_INTEL, 2110 .family = 6, 2111 .model = 28, 2112 .stepping = 2, 2113 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2114 .features[FEAT_1_EDX] = 2115 PPRO_FEATURES | 2116 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 2117 CPUID_ACPI | CPUID_SS, 2118 /* Some CPUs got no CPUID_SEP */ 2119 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 2120 * CPUID_EXT_XTPR */ 2121 .features[FEAT_1_ECX] = 2122 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 2123 CPUID_EXT_MOVBE, 2124 .features[FEAT_8000_0001_EDX] = 2125 CPUID_EXT2_NX, 2126 .features[FEAT_8000_0001_ECX] = 2127 CPUID_EXT3_LAHF_LM, 2128 .xlevel = 0x80000008, 2129 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 2130 }, 2131 { 2132 .name = "Conroe", 2133 .level = 10, 2134 .vendor = CPUID_VENDOR_INTEL, 2135 .family = 6, 2136 .model = 15, 2137 .stepping = 3, 2138 .features[FEAT_1_EDX] = 2139 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2140 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2141 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2142 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2143 CPUID_DE | CPUID_FP87, 2144 .features[FEAT_1_ECX] = 2145 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2146 .features[FEAT_8000_0001_EDX] = 2147 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2148 .features[FEAT_8000_0001_ECX] = 2149 CPUID_EXT3_LAHF_LM, 2150 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2151 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2152 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2153 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2154 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2155 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2156 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2157 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2158 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2159 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2160 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2161 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2162 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2163 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2164 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2165 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2166 .features[FEAT_VMX_SECONDARY_CTLS] = 2167 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 2168 .xlevel = 0x80000008, 2169 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 2170 }, 2171 { 2172 .name = "Penryn", 2173 .level = 10, 2174 .vendor = CPUID_VENDOR_INTEL, 2175 .family = 6, 2176 .model = 23, 2177 .stepping = 3, 2178 .features[FEAT_1_EDX] = 2179 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2180 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2181 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2182 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2183 CPUID_DE | CPUID_FP87, 2184 .features[FEAT_1_ECX] = 2185 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2186 CPUID_EXT_SSE3, 2187 .features[FEAT_8000_0001_EDX] = 2188 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2189 .features[FEAT_8000_0001_ECX] = 2190 CPUID_EXT3_LAHF_LM, 2191 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2192 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2193 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 2194 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | 2195 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 2196 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2197 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2198 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2199 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2200 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2201 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2202 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2203 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2204 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2205 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2206 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2207 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2208 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2209 .features[FEAT_VMX_SECONDARY_CTLS] = 2210 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2211 VMX_SECONDARY_EXEC_WBINVD_EXITING, 2212 .xlevel = 0x80000008, 2213 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 2214 }, 2215 { 2216 .name = "Nehalem", 2217 .level = 11, 2218 .vendor = CPUID_VENDOR_INTEL, 2219 .family = 6, 2220 .model = 26, 2221 .stepping = 3, 2222 .features[FEAT_1_EDX] = 2223 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2224 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2225 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2226 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2227 CPUID_DE | CPUID_FP87, 2228 .features[FEAT_1_ECX] = 2229 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2230 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2231 .features[FEAT_8000_0001_EDX] = 2232 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2233 .features[FEAT_8000_0001_ECX] = 2234 CPUID_EXT3_LAHF_LM, 2235 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2236 MSR_VMX_BASIC_TRUE_CTLS, 2237 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2238 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2239 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2240 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2241 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2242 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2243 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2244 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2245 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2246 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2247 .features[FEAT_VMX_EXIT_CTLS] = 2248 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2249 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2250 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2251 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2252 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2253 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2254 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2255 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2256 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2257 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2258 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2259 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2260 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2261 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2262 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2263 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2264 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2265 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2266 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2267 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2268 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2269 .features[FEAT_VMX_SECONDARY_CTLS] = 2270 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2271 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2272 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2273 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2274 VMX_SECONDARY_EXEC_ENABLE_VPID, 2275 .xlevel = 0x80000008, 2276 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 2277 .versions = (X86CPUVersionDefinition[]) { 2278 { .version = 1 }, 2279 { 2280 .version = 2, 2281 .alias = "Nehalem-IBRS", 2282 .props = (PropValue[]) { 2283 { "spec-ctrl", "on" }, 2284 { "model-id", 2285 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, 2286 { /* end of list */ } 2287 } 2288 }, 2289 { /* end of list */ } 2290 } 2291 }, 2292 { 2293 .name = "Westmere", 2294 .level = 11, 2295 .vendor = CPUID_VENDOR_INTEL, 2296 .family = 6, 2297 .model = 44, 2298 .stepping = 1, 2299 .features[FEAT_1_EDX] = 2300 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2301 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2302 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2303 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2304 CPUID_DE | CPUID_FP87, 2305 .features[FEAT_1_ECX] = 2306 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2307 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2308 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2309 .features[FEAT_8000_0001_EDX] = 2310 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2311 .features[FEAT_8000_0001_ECX] = 2312 CPUID_EXT3_LAHF_LM, 2313 .features[FEAT_6_EAX] = 2314 CPUID_6_EAX_ARAT, 2315 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2316 MSR_VMX_BASIC_TRUE_CTLS, 2317 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2318 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2319 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2320 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2321 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2322 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2323 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2324 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2325 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2326 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2327 .features[FEAT_VMX_EXIT_CTLS] = 2328 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2329 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2330 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2331 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2332 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2333 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2334 MSR_VMX_MISC_STORE_LMA, 2335 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2336 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2337 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2338 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2339 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2340 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2341 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2342 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2343 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2344 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2345 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2346 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2347 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2348 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2349 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2350 .features[FEAT_VMX_SECONDARY_CTLS] = 2351 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2352 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2353 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2354 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2355 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2356 .xlevel = 0x80000008, 2357 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 2358 .versions = (X86CPUVersionDefinition[]) { 2359 { .version = 1 }, 2360 { 2361 .version = 2, 2362 .alias = "Westmere-IBRS", 2363 .props = (PropValue[]) { 2364 { "spec-ctrl", "on" }, 2365 { "model-id", 2366 "Westmere E56xx/L56xx/X56xx (IBRS update)" }, 2367 { /* end of list */ } 2368 } 2369 }, 2370 { /* end of list */ } 2371 } 2372 }, 2373 { 2374 .name = "SandyBridge", 2375 .level = 0xd, 2376 .vendor = CPUID_VENDOR_INTEL, 2377 .family = 6, 2378 .model = 42, 2379 .stepping = 1, 2380 .features[FEAT_1_EDX] = 2381 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2382 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2383 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2384 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2385 CPUID_DE | CPUID_FP87, 2386 .features[FEAT_1_ECX] = 2387 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2388 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2389 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2390 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2391 CPUID_EXT_SSE3, 2392 .features[FEAT_8000_0001_EDX] = 2393 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2394 CPUID_EXT2_SYSCALL, 2395 .features[FEAT_8000_0001_ECX] = 2396 CPUID_EXT3_LAHF_LM, 2397 .features[FEAT_XSAVE] = 2398 CPUID_XSAVE_XSAVEOPT, 2399 .features[FEAT_6_EAX] = 2400 CPUID_6_EAX_ARAT, 2401 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2402 MSR_VMX_BASIC_TRUE_CTLS, 2403 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2404 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2405 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2406 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2407 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2408 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2409 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2410 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2411 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2412 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2413 .features[FEAT_VMX_EXIT_CTLS] = 2414 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2415 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2416 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2417 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2418 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2419 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2420 MSR_VMX_MISC_STORE_LMA, 2421 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2422 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2423 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2424 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2425 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2426 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2427 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2428 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2429 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2430 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2431 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2432 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2433 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2434 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2435 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2436 .features[FEAT_VMX_SECONDARY_CTLS] = 2437 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2438 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2439 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2440 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2441 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2442 .xlevel = 0x80000008, 2443 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 2444 .versions = (X86CPUVersionDefinition[]) { 2445 { .version = 1 }, 2446 { 2447 .version = 2, 2448 .alias = "SandyBridge-IBRS", 2449 .props = (PropValue[]) { 2450 { "spec-ctrl", "on" }, 2451 { "model-id", 2452 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, 2453 { /* end of list */ } 2454 } 2455 }, 2456 { /* end of list */ } 2457 } 2458 }, 2459 { 2460 .name = "IvyBridge", 2461 .level = 0xd, 2462 .vendor = CPUID_VENDOR_INTEL, 2463 .family = 6, 2464 .model = 58, 2465 .stepping = 9, 2466 .features[FEAT_1_EDX] = 2467 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2468 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2469 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2470 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2471 CPUID_DE | CPUID_FP87, 2472 .features[FEAT_1_ECX] = 2473 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2474 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2475 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2476 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2477 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2478 .features[FEAT_7_0_EBX] = 2479 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 2480 CPUID_7_0_EBX_ERMS, 2481 .features[FEAT_8000_0001_EDX] = 2482 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2483 CPUID_EXT2_SYSCALL, 2484 .features[FEAT_8000_0001_ECX] = 2485 CPUID_EXT3_LAHF_LM, 2486 .features[FEAT_XSAVE] = 2487 CPUID_XSAVE_XSAVEOPT, 2488 .features[FEAT_6_EAX] = 2489 CPUID_6_EAX_ARAT, 2490 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2491 MSR_VMX_BASIC_TRUE_CTLS, 2492 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2493 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2494 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2495 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2496 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2497 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2498 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2499 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2500 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2501 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2502 .features[FEAT_VMX_EXIT_CTLS] = 2503 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2504 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2505 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2506 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2507 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2508 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2509 MSR_VMX_MISC_STORE_LMA, 2510 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2511 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2512 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2513 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2514 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2515 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2516 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2517 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2518 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2519 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2520 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2521 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2522 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2523 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2524 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2525 .features[FEAT_VMX_SECONDARY_CTLS] = 2526 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2527 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2528 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2529 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2530 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2531 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2532 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2533 VMX_SECONDARY_EXEC_RDRAND_EXITING, 2534 .xlevel = 0x80000008, 2535 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 2536 .versions = (X86CPUVersionDefinition[]) { 2537 { .version = 1 }, 2538 { 2539 .version = 2, 2540 .alias = "IvyBridge-IBRS", 2541 .props = (PropValue[]) { 2542 { "spec-ctrl", "on" }, 2543 { "model-id", 2544 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, 2545 { /* end of list */ } 2546 } 2547 }, 2548 { /* end of list */ } 2549 } 2550 }, 2551 { 2552 .name = "Haswell", 2553 .level = 0xd, 2554 .vendor = CPUID_VENDOR_INTEL, 2555 .family = 6, 2556 .model = 60, 2557 .stepping = 4, 2558 .features[FEAT_1_EDX] = 2559 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2560 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2561 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2562 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2563 CPUID_DE | CPUID_FP87, 2564 .features[FEAT_1_ECX] = 2565 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2566 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2567 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2568 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2569 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2570 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2571 .features[FEAT_8000_0001_EDX] = 2572 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2573 CPUID_EXT2_SYSCALL, 2574 .features[FEAT_8000_0001_ECX] = 2575 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2576 .features[FEAT_7_0_EBX] = 2577 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2578 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2579 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2580 CPUID_7_0_EBX_RTM, 2581 .features[FEAT_XSAVE] = 2582 CPUID_XSAVE_XSAVEOPT, 2583 .features[FEAT_6_EAX] = 2584 CPUID_6_EAX_ARAT, 2585 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2586 MSR_VMX_BASIC_TRUE_CTLS, 2587 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2588 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2589 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2590 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2591 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2592 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2593 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2594 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2595 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2596 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2597 .features[FEAT_VMX_EXIT_CTLS] = 2598 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2599 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2600 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2601 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2602 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2603 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2604 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2605 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2606 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2607 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2608 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2609 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2610 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2611 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2612 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2613 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2614 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2615 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2616 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2617 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2618 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2619 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2620 .features[FEAT_VMX_SECONDARY_CTLS] = 2621 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2622 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2623 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2624 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2625 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2626 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2627 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2628 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2629 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 2630 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2631 .xlevel = 0x80000008, 2632 .model_id = "Intel Core Processor (Haswell)", 2633 .versions = (X86CPUVersionDefinition[]) { 2634 { .version = 1 }, 2635 { 2636 .version = 2, 2637 .alias = "Haswell-noTSX", 2638 .props = (PropValue[]) { 2639 { "hle", "off" }, 2640 { "rtm", "off" }, 2641 { "stepping", "1" }, 2642 { "model-id", "Intel Core Processor (Haswell, no TSX)", }, 2643 { /* end of list */ } 2644 }, 2645 }, 2646 { 2647 .version = 3, 2648 .alias = "Haswell-IBRS", 2649 .props = (PropValue[]) { 2650 /* Restore TSX features removed by -v2 above */ 2651 { "hle", "on" }, 2652 { "rtm", "on" }, 2653 /* 2654 * Haswell and Haswell-IBRS had stepping=4 in 2655 * QEMU 4.0 and older 2656 */ 2657 { "stepping", "4" }, 2658 { "spec-ctrl", "on" }, 2659 { "model-id", 2660 "Intel Core Processor (Haswell, IBRS)" }, 2661 { /* end of list */ } 2662 } 2663 }, 2664 { 2665 .version = 4, 2666 .alias = "Haswell-noTSX-IBRS", 2667 .props = (PropValue[]) { 2668 { "hle", "off" }, 2669 { "rtm", "off" }, 2670 /* spec-ctrl was already enabled by -v3 above */ 2671 { "stepping", "1" }, 2672 { "model-id", 2673 "Intel Core Processor (Haswell, no TSX, IBRS)" }, 2674 { /* end of list */ } 2675 } 2676 }, 2677 { /* end of list */ } 2678 } 2679 }, 2680 { 2681 .name = "Broadwell", 2682 .level = 0xd, 2683 .vendor = CPUID_VENDOR_INTEL, 2684 .family = 6, 2685 .model = 61, 2686 .stepping = 2, 2687 .features[FEAT_1_EDX] = 2688 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2689 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2690 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2691 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2692 CPUID_DE | CPUID_FP87, 2693 .features[FEAT_1_ECX] = 2694 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2695 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2696 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2697 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2698 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2699 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2700 .features[FEAT_8000_0001_EDX] = 2701 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2702 CPUID_EXT2_SYSCALL, 2703 .features[FEAT_8000_0001_ECX] = 2704 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2705 .features[FEAT_7_0_EBX] = 2706 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2707 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2708 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2709 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2710 CPUID_7_0_EBX_SMAP, 2711 .features[FEAT_XSAVE] = 2712 CPUID_XSAVE_XSAVEOPT, 2713 .features[FEAT_6_EAX] = 2714 CPUID_6_EAX_ARAT, 2715 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2716 MSR_VMX_BASIC_TRUE_CTLS, 2717 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2718 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2719 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2720 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2721 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2722 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2723 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2724 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2725 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2726 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2727 .features[FEAT_VMX_EXIT_CTLS] = 2728 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2729 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2730 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2731 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2732 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2733 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2734 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2735 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2736 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2737 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2738 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2739 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2740 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2741 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2742 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2743 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2744 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2745 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2746 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2747 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2748 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2749 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2750 .features[FEAT_VMX_SECONDARY_CTLS] = 2751 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2752 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2753 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2754 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2755 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2756 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2757 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2758 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2759 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2760 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2761 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2762 .xlevel = 0x80000008, 2763 .model_id = "Intel Core Processor (Broadwell)", 2764 .versions = (X86CPUVersionDefinition[]) { 2765 { .version = 1 }, 2766 { 2767 .version = 2, 2768 .alias = "Broadwell-noTSX", 2769 .props = (PropValue[]) { 2770 { "hle", "off" }, 2771 { "rtm", "off" }, 2772 { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, 2773 { /* end of list */ } 2774 }, 2775 }, 2776 { 2777 .version = 3, 2778 .alias = "Broadwell-IBRS", 2779 .props = (PropValue[]) { 2780 /* Restore TSX features removed by -v2 above */ 2781 { "hle", "on" }, 2782 { "rtm", "on" }, 2783 { "spec-ctrl", "on" }, 2784 { "model-id", 2785 "Intel Core Processor (Broadwell, IBRS)" }, 2786 { /* end of list */ } 2787 } 2788 }, 2789 { 2790 .version = 4, 2791 .alias = "Broadwell-noTSX-IBRS", 2792 .props = (PropValue[]) { 2793 { "hle", "off" }, 2794 { "rtm", "off" }, 2795 /* spec-ctrl was already enabled by -v3 above */ 2796 { "model-id", 2797 "Intel Core Processor (Broadwell, no TSX, IBRS)" }, 2798 { /* end of list */ } 2799 } 2800 }, 2801 { /* end of list */ } 2802 } 2803 }, 2804 { 2805 .name = "Skylake-Client", 2806 .level = 0xd, 2807 .vendor = CPUID_VENDOR_INTEL, 2808 .family = 6, 2809 .model = 94, 2810 .stepping = 3, 2811 .features[FEAT_1_EDX] = 2812 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2813 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2814 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2815 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2816 CPUID_DE | CPUID_FP87, 2817 .features[FEAT_1_ECX] = 2818 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2819 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2820 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2821 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2822 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2823 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2824 .features[FEAT_8000_0001_EDX] = 2825 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2826 CPUID_EXT2_SYSCALL, 2827 .features[FEAT_8000_0001_ECX] = 2828 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2829 .features[FEAT_7_0_EBX] = 2830 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2831 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2832 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2833 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2834 CPUID_7_0_EBX_SMAP, 2835 /* Missing: XSAVES (not supported by some Linux versions, 2836 * including v4.1 to v4.12). 2837 * KVM doesn't yet expose any XSAVES state save component, 2838 * and the only one defined in Skylake (processor tracing) 2839 * probably will block migration anyway. 2840 */ 2841 .features[FEAT_XSAVE] = 2842 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2843 CPUID_XSAVE_XGETBV1, 2844 .features[FEAT_6_EAX] = 2845 CPUID_6_EAX_ARAT, 2846 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2847 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2848 MSR_VMX_BASIC_TRUE_CTLS, 2849 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2850 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2851 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2852 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2853 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2854 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2855 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2856 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2857 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2858 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2859 .features[FEAT_VMX_EXIT_CTLS] = 2860 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2861 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2862 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2863 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2864 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2865 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2866 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2867 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2868 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2869 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2870 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2871 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2872 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2873 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2874 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2875 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2876 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2877 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2878 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2879 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2880 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2881 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2882 .features[FEAT_VMX_SECONDARY_CTLS] = 2883 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2884 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2885 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2886 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2887 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2888 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2889 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2890 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2891 .xlevel = 0x80000008, 2892 .model_id = "Intel Core Processor (Skylake)", 2893 .versions = (X86CPUVersionDefinition[]) { 2894 { .version = 1 }, 2895 { 2896 .version = 2, 2897 .alias = "Skylake-Client-IBRS", 2898 .props = (PropValue[]) { 2899 { "spec-ctrl", "on" }, 2900 { "model-id", 2901 "Intel Core Processor (Skylake, IBRS)" }, 2902 { /* end of list */ } 2903 } 2904 }, 2905 { 2906 .version = 3, 2907 .alias = "Skylake-Client-noTSX-IBRS", 2908 .props = (PropValue[]) { 2909 { "hle", "off" }, 2910 { "rtm", "off" }, 2911 { /* end of list */ } 2912 } 2913 }, 2914 { /* end of list */ } 2915 } 2916 }, 2917 { 2918 .name = "Skylake-Server", 2919 .level = 0xd, 2920 .vendor = CPUID_VENDOR_INTEL, 2921 .family = 6, 2922 .model = 85, 2923 .stepping = 4, 2924 .features[FEAT_1_EDX] = 2925 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2926 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2927 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2928 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2929 CPUID_DE | CPUID_FP87, 2930 .features[FEAT_1_ECX] = 2931 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2932 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2933 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2934 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2935 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2936 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2937 .features[FEAT_8000_0001_EDX] = 2938 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2939 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2940 .features[FEAT_8000_0001_ECX] = 2941 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2942 .features[FEAT_7_0_EBX] = 2943 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2944 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2945 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2946 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2947 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2948 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2949 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2950 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2951 .features[FEAT_7_0_ECX] = 2952 CPUID_7_0_ECX_PKU, 2953 /* Missing: XSAVES (not supported by some Linux versions, 2954 * including v4.1 to v4.12). 2955 * KVM doesn't yet expose any XSAVES state save component, 2956 * and the only one defined in Skylake (processor tracing) 2957 * probably will block migration anyway. 2958 */ 2959 .features[FEAT_XSAVE] = 2960 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2961 CPUID_XSAVE_XGETBV1, 2962 .features[FEAT_6_EAX] = 2963 CPUID_6_EAX_ARAT, 2964 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2965 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2966 MSR_VMX_BASIC_TRUE_CTLS, 2967 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2968 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2969 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2970 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2971 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2972 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2973 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2974 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2975 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2976 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2977 .features[FEAT_VMX_EXIT_CTLS] = 2978 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2979 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2980 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2981 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2982 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2983 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2984 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2985 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2986 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2987 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2988 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2989 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2990 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2991 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2992 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2993 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2994 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2995 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2996 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2997 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2998 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2999 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3000 .features[FEAT_VMX_SECONDARY_CTLS] = 3001 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3002 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3003 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3004 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3005 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3006 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3007 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3008 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3009 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3010 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3011 .xlevel = 0x80000008, 3012 .model_id = "Intel Xeon Processor (Skylake)", 3013 .versions = (X86CPUVersionDefinition[]) { 3014 { .version = 1 }, 3015 { 3016 .version = 2, 3017 .alias = "Skylake-Server-IBRS", 3018 .props = (PropValue[]) { 3019 /* clflushopt was not added to Skylake-Server-IBRS */ 3020 /* TODO: add -v3 including clflushopt */ 3021 { "clflushopt", "off" }, 3022 { "spec-ctrl", "on" }, 3023 { "model-id", 3024 "Intel Xeon Processor (Skylake, IBRS)" }, 3025 { /* end of list */ } 3026 } 3027 }, 3028 { 3029 .version = 3, 3030 .alias = "Skylake-Server-noTSX-IBRS", 3031 .props = (PropValue[]) { 3032 { "hle", "off" }, 3033 { "rtm", "off" }, 3034 { /* end of list */ } 3035 } 3036 }, 3037 { /* end of list */ } 3038 } 3039 }, 3040 { 3041 .name = "Cascadelake-Server", 3042 .level = 0xd, 3043 .vendor = CPUID_VENDOR_INTEL, 3044 .family = 6, 3045 .model = 85, 3046 .stepping = 6, 3047 .features[FEAT_1_EDX] = 3048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3052 CPUID_DE | CPUID_FP87, 3053 .features[FEAT_1_ECX] = 3054 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3055 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3056 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3057 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3058 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3059 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3060 .features[FEAT_8000_0001_EDX] = 3061 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3062 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3063 .features[FEAT_8000_0001_ECX] = 3064 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3065 .features[FEAT_7_0_EBX] = 3066 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3067 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3068 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3069 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3070 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3071 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3072 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3073 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3074 .features[FEAT_7_0_ECX] = 3075 CPUID_7_0_ECX_PKU | 3076 CPUID_7_0_ECX_AVX512VNNI, 3077 .features[FEAT_7_0_EDX] = 3078 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3079 /* Missing: XSAVES (not supported by some Linux versions, 3080 * including v4.1 to v4.12). 3081 * KVM doesn't yet expose any XSAVES state save component, 3082 * and the only one defined in Skylake (processor tracing) 3083 * probably will block migration anyway. 3084 */ 3085 .features[FEAT_XSAVE] = 3086 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3087 CPUID_XSAVE_XGETBV1, 3088 .features[FEAT_6_EAX] = 3089 CPUID_6_EAX_ARAT, 3090 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3091 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3092 MSR_VMX_BASIC_TRUE_CTLS, 3093 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3094 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3095 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3096 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3097 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3098 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3099 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3100 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3101 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3102 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3103 .features[FEAT_VMX_EXIT_CTLS] = 3104 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3105 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3106 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3107 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3108 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3109 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3110 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3111 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3112 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3113 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3114 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3115 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3116 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3117 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3118 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3119 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3120 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3121 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3122 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3123 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3124 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3125 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3126 .features[FEAT_VMX_SECONDARY_CTLS] = 3127 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3128 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3129 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3130 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3131 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3132 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3133 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3134 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3135 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3136 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3137 .xlevel = 0x80000008, 3138 .model_id = "Intel Xeon Processor (Cascadelake)", 3139 .versions = (X86CPUVersionDefinition[]) { 3140 { .version = 1 }, 3141 { .version = 2, 3142 .props = (PropValue[]) { 3143 { "arch-capabilities", "on" }, 3144 { "rdctl-no", "on" }, 3145 { "ibrs-all", "on" }, 3146 { "skip-l1dfl-vmentry", "on" }, 3147 { "mds-no", "on" }, 3148 { /* end of list */ } 3149 }, 3150 }, 3151 { .version = 3, 3152 .alias = "Cascadelake-Server-noTSX", 3153 .props = (PropValue[]) { 3154 { "hle", "off" }, 3155 { "rtm", "off" }, 3156 { /* end of list */ } 3157 }, 3158 }, 3159 { /* end of list */ } 3160 } 3161 }, 3162 { 3163 .name = "Icelake-Client", 3164 .level = 0xd, 3165 .vendor = CPUID_VENDOR_INTEL, 3166 .family = 6, 3167 .model = 126, 3168 .stepping = 0, 3169 .features[FEAT_1_EDX] = 3170 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3171 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3172 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3173 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3174 CPUID_DE | CPUID_FP87, 3175 .features[FEAT_1_ECX] = 3176 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3177 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3178 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3179 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3180 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3181 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3182 .features[FEAT_8000_0001_EDX] = 3183 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 3184 CPUID_EXT2_SYSCALL, 3185 .features[FEAT_8000_0001_ECX] = 3186 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3187 .features[FEAT_8000_0008_EBX] = 3188 CPUID_8000_0008_EBX_WBNOINVD, 3189 .features[FEAT_7_0_EBX] = 3190 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3191 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3192 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3193 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3194 CPUID_7_0_EBX_SMAP, 3195 .features[FEAT_7_0_ECX] = 3196 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3197 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3198 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3199 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3200 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3201 .features[FEAT_7_0_EDX] = 3202 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3203 /* Missing: XSAVES (not supported by some Linux versions, 3204 * including v4.1 to v4.12). 3205 * KVM doesn't yet expose any XSAVES state save component, 3206 * and the only one defined in Skylake (processor tracing) 3207 * probably will block migration anyway. 3208 */ 3209 .features[FEAT_XSAVE] = 3210 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3211 CPUID_XSAVE_XGETBV1, 3212 .features[FEAT_6_EAX] = 3213 CPUID_6_EAX_ARAT, 3214 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3215 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3216 MSR_VMX_BASIC_TRUE_CTLS, 3217 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3218 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3219 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3220 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3221 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3222 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3223 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3224 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3225 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3226 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3227 .features[FEAT_VMX_EXIT_CTLS] = 3228 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3229 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3230 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3231 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3232 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3233 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3234 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3235 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3236 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3237 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 3238 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3239 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3240 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3241 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3242 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3243 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3244 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3245 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3246 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3247 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3248 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3249 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3250 .features[FEAT_VMX_SECONDARY_CTLS] = 3251 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3252 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3253 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3254 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3255 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3256 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3257 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3258 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3259 .xlevel = 0x80000008, 3260 .model_id = "Intel Core Processor (Icelake)", 3261 .versions = (X86CPUVersionDefinition[]) { 3262 { .version = 1 }, 3263 { 3264 .version = 2, 3265 .alias = "Icelake-Client-noTSX", 3266 .props = (PropValue[]) { 3267 { "hle", "off" }, 3268 { "rtm", "off" }, 3269 { /* end of list */ } 3270 }, 3271 }, 3272 { /* end of list */ } 3273 } 3274 }, 3275 { 3276 .name = "Icelake-Server", 3277 .level = 0xd, 3278 .vendor = CPUID_VENDOR_INTEL, 3279 .family = 6, 3280 .model = 134, 3281 .stepping = 0, 3282 .features[FEAT_1_EDX] = 3283 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3284 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3285 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3286 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3287 CPUID_DE | CPUID_FP87, 3288 .features[FEAT_1_ECX] = 3289 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3290 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3291 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3292 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3293 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3294 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3295 .features[FEAT_8000_0001_EDX] = 3296 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3297 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3298 .features[FEAT_8000_0001_ECX] = 3299 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3300 .features[FEAT_8000_0008_EBX] = 3301 CPUID_8000_0008_EBX_WBNOINVD, 3302 .features[FEAT_7_0_EBX] = 3303 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3304 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3305 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3306 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3307 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3308 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3309 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3310 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3311 .features[FEAT_7_0_ECX] = 3312 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3313 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3314 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3315 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3316 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 3317 .features[FEAT_7_0_EDX] = 3318 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3319 /* Missing: XSAVES (not supported by some Linux versions, 3320 * including v4.1 to v4.12). 3321 * KVM doesn't yet expose any XSAVES state save component, 3322 * and the only one defined in Skylake (processor tracing) 3323 * probably will block migration anyway. 3324 */ 3325 .features[FEAT_XSAVE] = 3326 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3327 CPUID_XSAVE_XGETBV1, 3328 .features[FEAT_6_EAX] = 3329 CPUID_6_EAX_ARAT, 3330 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3331 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3332 MSR_VMX_BASIC_TRUE_CTLS, 3333 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3334 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3335 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3336 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3337 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3338 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3339 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3340 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3341 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3342 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3343 .features[FEAT_VMX_EXIT_CTLS] = 3344 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3345 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3346 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3347 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3348 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3349 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3350 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3351 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3352 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3353 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3354 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3355 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3356 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3357 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3358 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3359 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3360 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3361 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3362 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3363 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3364 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3365 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3366 .features[FEAT_VMX_SECONDARY_CTLS] = 3367 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3368 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3369 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3370 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3371 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3372 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3373 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3374 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3375 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 3376 .xlevel = 0x80000008, 3377 .model_id = "Intel Xeon Processor (Icelake)", 3378 .versions = (X86CPUVersionDefinition[]) { 3379 { .version = 1 }, 3380 { 3381 .version = 2, 3382 .alias = "Icelake-Server-noTSX", 3383 .props = (PropValue[]) { 3384 { "hle", "off" }, 3385 { "rtm", "off" }, 3386 { /* end of list */ } 3387 }, 3388 }, 3389 { /* end of list */ } 3390 } 3391 }, 3392 { 3393 .name = "Denverton", 3394 .level = 21, 3395 .vendor = CPUID_VENDOR_INTEL, 3396 .family = 6, 3397 .model = 95, 3398 .stepping = 1, 3399 .features[FEAT_1_EDX] = 3400 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | 3401 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | 3402 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3403 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | 3404 CPUID_SSE | CPUID_SSE2, 3405 .features[FEAT_1_ECX] = 3406 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3407 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 | 3408 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3409 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | 3410 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND, 3411 .features[FEAT_8000_0001_EDX] = 3412 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | 3413 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, 3414 .features[FEAT_8000_0001_ECX] = 3415 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3416 .features[FEAT_7_0_EBX] = 3417 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS | 3418 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP | 3419 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI, 3420 .features[FEAT_7_0_EDX] = 3421 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | 3422 CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3423 /* 3424 * Missing: XSAVES (not supported by some Linux versions, 3425 * including v4.1 to v4.12). 3426 * KVM doesn't yet expose any XSAVES state save component, 3427 * and the only one defined in Skylake (processor tracing) 3428 * probably will block migration anyway. 3429 */ 3430 .features[FEAT_XSAVE] = 3431 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, 3432 .features[FEAT_6_EAX] = 3433 CPUID_6_EAX_ARAT, 3434 .features[FEAT_ARCH_CAPABILITIES] = 3435 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY, 3436 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3437 MSR_VMX_BASIC_TRUE_CTLS, 3438 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3439 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3440 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3441 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3442 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3443 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3444 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3445 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3446 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3447 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3448 .features[FEAT_VMX_EXIT_CTLS] = 3449 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3450 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3451 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3452 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3453 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3454 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3455 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3456 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3457 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3458 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3459 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3460 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3461 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3462 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3463 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3464 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3465 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3466 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3467 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3468 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3469 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3470 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3471 .features[FEAT_VMX_SECONDARY_CTLS] = 3472 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3473 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3474 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3475 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3476 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3477 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3478 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3479 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3480 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3481 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3482 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3483 .xlevel = 0x80000008, 3484 .model_id = "Intel Atom Processor (Denverton)", 3485 }, 3486 { 3487 .name = "Snowridge", 3488 .level = 27, 3489 .vendor = CPUID_VENDOR_INTEL, 3490 .family = 6, 3491 .model = 134, 3492 .stepping = 1, 3493 .features[FEAT_1_EDX] = 3494 /* missing: CPUID_PN CPUID_IA64 */ 3495 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 3496 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | 3497 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | 3498 CPUID_CX8 | CPUID_APIC | CPUID_SEP | 3499 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3500 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | 3501 CPUID_MMX | 3502 CPUID_FXSR | CPUID_SSE | CPUID_SSE2, 3503 .features[FEAT_1_ECX] = 3504 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3505 CPUID_EXT_SSSE3 | 3506 CPUID_EXT_CX16 | 3507 CPUID_EXT_SSE41 | 3508 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3509 CPUID_EXT_POPCNT | 3510 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | 3511 CPUID_EXT_RDRAND, 3512 .features[FEAT_8000_0001_EDX] = 3513 CPUID_EXT2_SYSCALL | 3514 CPUID_EXT2_NX | 3515 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3516 CPUID_EXT2_LM, 3517 .features[FEAT_8000_0001_ECX] = 3518 CPUID_EXT3_LAHF_LM | 3519 CPUID_EXT3_3DNOWPREFETCH, 3520 .features[FEAT_7_0_EBX] = 3521 CPUID_7_0_EBX_FSGSBASE | 3522 CPUID_7_0_EBX_SMEP | 3523 CPUID_7_0_EBX_ERMS | 3524 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ 3525 CPUID_7_0_EBX_RDSEED | 3526 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3527 CPUID_7_0_EBX_CLWB | 3528 CPUID_7_0_EBX_SHA_NI, 3529 .features[FEAT_7_0_ECX] = 3530 CPUID_7_0_ECX_UMIP | 3531 /* missing bit 5 */ 3532 CPUID_7_0_ECX_GFNI | 3533 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | 3534 CPUID_7_0_ECX_MOVDIR64B, 3535 .features[FEAT_7_0_EDX] = 3536 CPUID_7_0_EDX_SPEC_CTRL | 3537 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | 3538 CPUID_7_0_EDX_CORE_CAPABILITY, 3539 .features[FEAT_CORE_CAPABILITY] = 3540 MSR_CORE_CAP_SPLIT_LOCK_DETECT, 3541 /* 3542 * Missing: XSAVES (not supported by some Linux versions, 3543 * including v4.1 to v4.12). 3544 * KVM doesn't yet expose any XSAVES state save component, 3545 * and the only one defined in Skylake (processor tracing) 3546 * probably will block migration anyway. 3547 */ 3548 .features[FEAT_XSAVE] = 3549 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3550 CPUID_XSAVE_XGETBV1, 3551 .features[FEAT_6_EAX] = 3552 CPUID_6_EAX_ARAT, 3553 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3554 MSR_VMX_BASIC_TRUE_CTLS, 3555 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3556 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3557 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3558 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3559 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3560 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3561 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3562 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3563 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3564 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3565 .features[FEAT_VMX_EXIT_CTLS] = 3566 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3567 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3568 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3569 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3570 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3571 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3572 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3573 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3574 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3575 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3576 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3577 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3578 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3579 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3580 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3581 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3582 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3583 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3584 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3585 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3586 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3587 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3588 .features[FEAT_VMX_SECONDARY_CTLS] = 3589 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3590 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3591 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3592 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3593 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3594 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3595 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3596 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3597 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3598 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3599 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3600 .xlevel = 0x80000008, 3601 .model_id = "Intel Atom Processor (SnowRidge)", 3602 .versions = (X86CPUVersionDefinition[]) { 3603 { .version = 1 }, 3604 { 3605 .version = 2, 3606 .props = (PropValue[]) { 3607 { "mpx", "off" }, 3608 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" }, 3609 { /* end of list */ }, 3610 }, 3611 }, 3612 { /* end of list */ }, 3613 }, 3614 }, 3615 { 3616 .name = "KnightsMill", 3617 .level = 0xd, 3618 .vendor = CPUID_VENDOR_INTEL, 3619 .family = 6, 3620 .model = 133, 3621 .stepping = 0, 3622 .features[FEAT_1_EDX] = 3623 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 3624 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 3625 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 3626 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 3627 CPUID_PSE | CPUID_DE | CPUID_FP87, 3628 .features[FEAT_1_ECX] = 3629 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3630 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3631 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3632 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3633 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3634 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3635 .features[FEAT_8000_0001_EDX] = 3636 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3637 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3638 .features[FEAT_8000_0001_ECX] = 3639 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3640 .features[FEAT_7_0_EBX] = 3641 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3642 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 3643 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 3644 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 3645 CPUID_7_0_EBX_AVX512ER, 3646 .features[FEAT_7_0_ECX] = 3647 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3648 .features[FEAT_7_0_EDX] = 3649 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 3650 .features[FEAT_XSAVE] = 3651 CPUID_XSAVE_XSAVEOPT, 3652 .features[FEAT_6_EAX] = 3653 CPUID_6_EAX_ARAT, 3654 .xlevel = 0x80000008, 3655 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 3656 }, 3657 { 3658 .name = "Opteron_G1", 3659 .level = 5, 3660 .vendor = CPUID_VENDOR_AMD, 3661 .family = 15, 3662 .model = 6, 3663 .stepping = 1, 3664 .features[FEAT_1_EDX] = 3665 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3666 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3667 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3668 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3669 CPUID_DE | CPUID_FP87, 3670 .features[FEAT_1_ECX] = 3671 CPUID_EXT_SSE3, 3672 .features[FEAT_8000_0001_EDX] = 3673 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3674 .xlevel = 0x80000008, 3675 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 3676 }, 3677 { 3678 .name = "Opteron_G2", 3679 .level = 5, 3680 .vendor = CPUID_VENDOR_AMD, 3681 .family = 15, 3682 .model = 6, 3683 .stepping = 1, 3684 .features[FEAT_1_EDX] = 3685 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3686 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3687 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3688 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3689 CPUID_DE | CPUID_FP87, 3690 .features[FEAT_1_ECX] = 3691 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 3692 .features[FEAT_8000_0001_EDX] = 3693 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3694 .features[FEAT_8000_0001_ECX] = 3695 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3696 .xlevel = 0x80000008, 3697 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 3698 }, 3699 { 3700 .name = "Opteron_G3", 3701 .level = 5, 3702 .vendor = CPUID_VENDOR_AMD, 3703 .family = 16, 3704 .model = 2, 3705 .stepping = 3, 3706 .features[FEAT_1_EDX] = 3707 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3708 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3709 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3710 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3711 CPUID_DE | CPUID_FP87, 3712 .features[FEAT_1_ECX] = 3713 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 3714 CPUID_EXT_SSE3, 3715 .features[FEAT_8000_0001_EDX] = 3716 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 3717 CPUID_EXT2_RDTSCP, 3718 .features[FEAT_8000_0001_ECX] = 3719 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 3720 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3721 .xlevel = 0x80000008, 3722 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 3723 }, 3724 { 3725 .name = "Opteron_G4", 3726 .level = 0xd, 3727 .vendor = CPUID_VENDOR_AMD, 3728 .family = 21, 3729 .model = 1, 3730 .stepping = 2, 3731 .features[FEAT_1_EDX] = 3732 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3733 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3734 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3735 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3736 CPUID_DE | CPUID_FP87, 3737 .features[FEAT_1_ECX] = 3738 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3739 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3740 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 3741 CPUID_EXT_SSE3, 3742 .features[FEAT_8000_0001_EDX] = 3743 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3744 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3745 .features[FEAT_8000_0001_ECX] = 3746 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3747 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3748 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3749 CPUID_EXT3_LAHF_LM, 3750 .features[FEAT_SVM] = 3751 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3752 /* no xsaveopt! */ 3753 .xlevel = 0x8000001A, 3754 .model_id = "AMD Opteron 62xx class CPU", 3755 }, 3756 { 3757 .name = "Opteron_G5", 3758 .level = 0xd, 3759 .vendor = CPUID_VENDOR_AMD, 3760 .family = 21, 3761 .model = 2, 3762 .stepping = 0, 3763 .features[FEAT_1_EDX] = 3764 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3765 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3766 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3767 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3768 CPUID_DE | CPUID_FP87, 3769 .features[FEAT_1_ECX] = 3770 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 3771 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 3772 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 3773 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3774 .features[FEAT_8000_0001_EDX] = 3775 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3776 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3777 .features[FEAT_8000_0001_ECX] = 3778 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3779 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3780 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3781 CPUID_EXT3_LAHF_LM, 3782 .features[FEAT_SVM] = 3783 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3784 /* no xsaveopt! */ 3785 .xlevel = 0x8000001A, 3786 .model_id = "AMD Opteron 63xx class CPU", 3787 }, 3788 { 3789 .name = "EPYC", 3790 .level = 0xd, 3791 .vendor = CPUID_VENDOR_AMD, 3792 .family = 23, 3793 .model = 1, 3794 .stepping = 2, 3795 .features[FEAT_1_EDX] = 3796 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 3797 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 3798 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 3799 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 3800 CPUID_VME | CPUID_FP87, 3801 .features[FEAT_1_ECX] = 3802 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 3803 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 3804 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3805 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 3806 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3807 .features[FEAT_8000_0001_EDX] = 3808 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 3809 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 3810 CPUID_EXT2_SYSCALL, 3811 .features[FEAT_8000_0001_ECX] = 3812 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 3813 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 3814 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 3815 CPUID_EXT3_TOPOEXT, 3816 .features[FEAT_7_0_EBX] = 3817 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3818 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 3819 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3820 CPUID_7_0_EBX_SHA_NI, 3821 /* Missing: XSAVES (not supported by some Linux versions, 3822 * including v4.1 to v4.12). 3823 * KVM doesn't yet expose any XSAVES state save component. 3824 */ 3825 .features[FEAT_XSAVE] = 3826 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3827 CPUID_XSAVE_XGETBV1, 3828 .features[FEAT_6_EAX] = 3829 CPUID_6_EAX_ARAT, 3830 .features[FEAT_SVM] = 3831 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3832 .xlevel = 0x8000001E, 3833 .model_id = "AMD EPYC Processor", 3834 .cache_info = &epyc_cache_info, 3835 .versions = (X86CPUVersionDefinition[]) { 3836 { .version = 1 }, 3837 { 3838 .version = 2, 3839 .alias = "EPYC-IBPB", 3840 .props = (PropValue[]) { 3841 { "ibpb", "on" }, 3842 { "model-id", 3843 "AMD EPYC Processor (with IBPB)" }, 3844 { /* end of list */ } 3845 } 3846 }, 3847 { /* end of list */ } 3848 } 3849 }, 3850 { 3851 .name = "Dhyana", 3852 .level = 0xd, 3853 .vendor = CPUID_VENDOR_HYGON, 3854 .family = 24, 3855 .model = 0, 3856 .stepping = 1, 3857 .features[FEAT_1_EDX] = 3858 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 3859 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 3860 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 3861 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 3862 CPUID_VME | CPUID_FP87, 3863 .features[FEAT_1_ECX] = 3864 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 3865 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 3866 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3867 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 3868 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 3869 .features[FEAT_8000_0001_EDX] = 3870 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 3871 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 3872 CPUID_EXT2_SYSCALL, 3873 .features[FEAT_8000_0001_ECX] = 3874 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 3875 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 3876 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 3877 CPUID_EXT3_TOPOEXT, 3878 .features[FEAT_8000_0008_EBX] = 3879 CPUID_8000_0008_EBX_IBPB, 3880 .features[FEAT_7_0_EBX] = 3881 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3882 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 3883 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 3884 /* 3885 * Missing: XSAVES (not supported by some Linux versions, 3886 * including v4.1 to v4.12). 3887 * KVM doesn't yet expose any XSAVES state save component. 3888 */ 3889 .features[FEAT_XSAVE] = 3890 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3891 CPUID_XSAVE_XGETBV1, 3892 .features[FEAT_6_EAX] = 3893 CPUID_6_EAX_ARAT, 3894 .features[FEAT_SVM] = 3895 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3896 .xlevel = 0x8000001E, 3897 .model_id = "Hygon Dhyana Processor", 3898 .cache_info = &epyc_cache_info, 3899 }, 3900 }; 3901 3902 /* KVM-specific features that are automatically added/removed 3903 * from all CPU models when KVM is enabled. 3904 */ 3905 static PropValue kvm_default_props[] = { 3906 { "kvmclock", "on" }, 3907 { "kvm-nopiodelay", "on" }, 3908 { "kvm-asyncpf", "on" }, 3909 { "kvm-steal-time", "on" }, 3910 { "kvm-pv-eoi", "on" }, 3911 { "kvmclock-stable-bit", "on" }, 3912 { "x2apic", "on" }, 3913 { "acpi", "off" }, 3914 { "monitor", "off" }, 3915 { "svm", "off" }, 3916 { NULL, NULL }, 3917 }; 3918 3919 /* TCG-specific defaults that override all CPU models when using TCG 3920 */ 3921 static PropValue tcg_default_props[] = { 3922 { "vme", "off" }, 3923 { NULL, NULL }, 3924 }; 3925 3926 3927 X86CPUVersion default_cpu_version = CPU_VERSION_LATEST; 3928 3929 void x86_cpu_set_default_version(X86CPUVersion version) 3930 { 3931 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ 3932 assert(version != CPU_VERSION_AUTO); 3933 default_cpu_version = version; 3934 } 3935 3936 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) 3937 { 3938 int v = 0; 3939 const X86CPUVersionDefinition *vdef = 3940 x86_cpu_def_get_versions(model->cpudef); 3941 while (vdef->version) { 3942 v = vdef->version; 3943 vdef++; 3944 } 3945 return v; 3946 } 3947 3948 /* Return the actual version being used for a specific CPU model */ 3949 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) 3950 { 3951 X86CPUVersion v = model->version; 3952 if (v == CPU_VERSION_AUTO) { 3953 v = default_cpu_version; 3954 } 3955 if (v == CPU_VERSION_LATEST) { 3956 return x86_cpu_model_last_version(model); 3957 } 3958 return v; 3959 } 3960 3961 void x86_cpu_change_kvm_default(const char *prop, const char *value) 3962 { 3963 PropValue *pv; 3964 for (pv = kvm_default_props; pv->prop; pv++) { 3965 if (!strcmp(pv->prop, prop)) { 3966 pv->value = value; 3967 break; 3968 } 3969 } 3970 3971 /* It is valid to call this function only for properties that 3972 * are already present in the kvm_default_props table. 3973 */ 3974 assert(pv->prop); 3975 } 3976 3977 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 3978 bool migratable_only); 3979 3980 static bool lmce_supported(void) 3981 { 3982 uint64_t mce_cap = 0; 3983 3984 #ifdef CONFIG_KVM 3985 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 3986 return false; 3987 } 3988 #endif 3989 3990 return !!(mce_cap & MCG_LMCE_P); 3991 } 3992 3993 #define CPUID_MODEL_ID_SZ 48 3994 3995 /** 3996 * cpu_x86_fill_model_id: 3997 * Get CPUID model ID string from host CPU. 3998 * 3999 * @str should have at least CPUID_MODEL_ID_SZ bytes 4000 * 4001 * The function does NOT add a null terminator to the string 4002 * automatically. 4003 */ 4004 static int cpu_x86_fill_model_id(char *str) 4005 { 4006 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 4007 int i; 4008 4009 for (i = 0; i < 3; i++) { 4010 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 4011 memcpy(str + i * 16 + 0, &eax, 4); 4012 memcpy(str + i * 16 + 4, &ebx, 4); 4013 memcpy(str + i * 16 + 8, &ecx, 4); 4014 memcpy(str + i * 16 + 12, &edx, 4); 4015 } 4016 return 0; 4017 } 4018 4019 static Property max_x86_cpu_properties[] = { 4020 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 4021 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 4022 DEFINE_PROP_END_OF_LIST() 4023 }; 4024 4025 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 4026 { 4027 DeviceClass *dc = DEVICE_CLASS(oc); 4028 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4029 4030 xcc->ordering = 9; 4031 4032 xcc->model_description = 4033 "Enables all features supported by the accelerator in the current host"; 4034 4035 dc->props = max_x86_cpu_properties; 4036 } 4037 4038 static void max_x86_cpu_initfn(Object *obj) 4039 { 4040 X86CPU *cpu = X86_CPU(obj); 4041 CPUX86State *env = &cpu->env; 4042 KVMState *s = kvm_state; 4043 4044 /* We can't fill the features array here because we don't know yet if 4045 * "migratable" is true or false. 4046 */ 4047 cpu->max_features = true; 4048 4049 if (accel_uses_host_cpuid()) { 4050 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 4051 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 4052 int family, model, stepping; 4053 4054 host_vendor_fms(vendor, &family, &model, &stepping); 4055 cpu_x86_fill_model_id(model_id); 4056 4057 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort); 4058 object_property_set_int(OBJECT(cpu), family, "family", &error_abort); 4059 object_property_set_int(OBJECT(cpu), model, "model", &error_abort); 4060 object_property_set_int(OBJECT(cpu), stepping, "stepping", 4061 &error_abort); 4062 object_property_set_str(OBJECT(cpu), model_id, "model-id", 4063 &error_abort); 4064 4065 if (kvm_enabled()) { 4066 env->cpuid_min_level = 4067 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 4068 env->cpuid_min_xlevel = 4069 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 4070 env->cpuid_min_xlevel2 = 4071 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 4072 } else { 4073 env->cpuid_min_level = 4074 hvf_get_supported_cpuid(0x0, 0, R_EAX); 4075 env->cpuid_min_xlevel = 4076 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 4077 env->cpuid_min_xlevel2 = 4078 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 4079 } 4080 4081 if (lmce_supported()) { 4082 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 4083 } 4084 } else { 4085 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 4086 "vendor", &error_abort); 4087 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 4088 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 4089 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 4090 object_property_set_str(OBJECT(cpu), 4091 "QEMU TCG CPU version " QEMU_HW_VERSION, 4092 "model-id", &error_abort); 4093 } 4094 4095 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 4096 } 4097 4098 static const TypeInfo max_x86_cpu_type_info = { 4099 .name = X86_CPU_TYPE_NAME("max"), 4100 .parent = TYPE_X86_CPU, 4101 .instance_init = max_x86_cpu_initfn, 4102 .class_init = max_x86_cpu_class_init, 4103 }; 4104 4105 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 4106 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 4107 { 4108 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4109 4110 xcc->host_cpuid_required = true; 4111 xcc->ordering = 8; 4112 4113 #if defined(CONFIG_KVM) 4114 xcc->model_description = 4115 "KVM processor with all supported host features "; 4116 #elif defined(CONFIG_HVF) 4117 xcc->model_description = 4118 "HVF processor with all supported host features "; 4119 #endif 4120 } 4121 4122 static const TypeInfo host_x86_cpu_type_info = { 4123 .name = X86_CPU_TYPE_NAME("host"), 4124 .parent = X86_CPU_TYPE_NAME("max"), 4125 .class_init = host_x86_cpu_class_init, 4126 }; 4127 4128 #endif 4129 4130 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 4131 { 4132 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 4133 4134 switch (f->type) { 4135 case CPUID_FEATURE_WORD: 4136 { 4137 const char *reg = get_register_name_32(f->cpuid.reg); 4138 assert(reg); 4139 return g_strdup_printf("CPUID.%02XH:%s", 4140 f->cpuid.eax, reg); 4141 } 4142 case MSR_FEATURE_WORD: 4143 return g_strdup_printf("MSR(%02XH)", 4144 f->msr.index); 4145 } 4146 4147 return NULL; 4148 } 4149 4150 static bool x86_cpu_have_filtered_features(X86CPU *cpu) 4151 { 4152 FeatureWord w; 4153 4154 for (w = 0; w < FEATURE_WORDS; w++) { 4155 if (cpu->filtered_features[w]) { 4156 return true; 4157 } 4158 } 4159 4160 return false; 4161 } 4162 4163 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, 4164 const char *verbose_prefix) 4165 { 4166 CPUX86State *env = &cpu->env; 4167 FeatureWordInfo *f = &feature_word_info[w]; 4168 int i; 4169 char *feat_word_str; 4170 4171 if (!cpu->force_features) { 4172 env->features[w] &= ~mask; 4173 } 4174 cpu->filtered_features[w] |= mask; 4175 4176 if (!verbose_prefix) { 4177 return; 4178 } 4179 4180 for (i = 0; i < 64; ++i) { 4181 if ((1ULL << i) & mask) { 4182 feat_word_str = feature_word_description(f, i); 4183 warn_report("%s: %s%s%s [bit %d]", 4184 verbose_prefix, 4185 feat_word_str, 4186 f->feat_names[i] ? "." : "", 4187 f->feat_names[i] ? f->feat_names[i] : "", i); 4188 g_free(feat_word_str); 4189 } 4190 } 4191 } 4192 4193 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 4194 const char *name, void *opaque, 4195 Error **errp) 4196 { 4197 X86CPU *cpu = X86_CPU(obj); 4198 CPUX86State *env = &cpu->env; 4199 int64_t value; 4200 4201 value = (env->cpuid_version >> 8) & 0xf; 4202 if (value == 0xf) { 4203 value += (env->cpuid_version >> 20) & 0xff; 4204 } 4205 visit_type_int(v, name, &value, errp); 4206 } 4207 4208 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 4209 const char *name, void *opaque, 4210 Error **errp) 4211 { 4212 X86CPU *cpu = X86_CPU(obj); 4213 CPUX86State *env = &cpu->env; 4214 const int64_t min = 0; 4215 const int64_t max = 0xff + 0xf; 4216 Error *local_err = NULL; 4217 int64_t value; 4218 4219 visit_type_int(v, name, &value, &local_err); 4220 if (local_err) { 4221 error_propagate(errp, local_err); 4222 return; 4223 } 4224 if (value < min || value > max) { 4225 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4226 name ? name : "null", value, min, max); 4227 return; 4228 } 4229 4230 env->cpuid_version &= ~0xff00f00; 4231 if (value > 0x0f) { 4232 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 4233 } else { 4234 env->cpuid_version |= value << 8; 4235 } 4236 } 4237 4238 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 4239 const char *name, void *opaque, 4240 Error **errp) 4241 { 4242 X86CPU *cpu = X86_CPU(obj); 4243 CPUX86State *env = &cpu->env; 4244 int64_t value; 4245 4246 value = (env->cpuid_version >> 4) & 0xf; 4247 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 4248 visit_type_int(v, name, &value, errp); 4249 } 4250 4251 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 4252 const char *name, void *opaque, 4253 Error **errp) 4254 { 4255 X86CPU *cpu = X86_CPU(obj); 4256 CPUX86State *env = &cpu->env; 4257 const int64_t min = 0; 4258 const int64_t max = 0xff; 4259 Error *local_err = NULL; 4260 int64_t value; 4261 4262 visit_type_int(v, name, &value, &local_err); 4263 if (local_err) { 4264 error_propagate(errp, local_err); 4265 return; 4266 } 4267 if (value < min || value > max) { 4268 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4269 name ? name : "null", value, min, max); 4270 return; 4271 } 4272 4273 env->cpuid_version &= ~0xf00f0; 4274 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 4275 } 4276 4277 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 4278 const char *name, void *opaque, 4279 Error **errp) 4280 { 4281 X86CPU *cpu = X86_CPU(obj); 4282 CPUX86State *env = &cpu->env; 4283 int64_t value; 4284 4285 value = env->cpuid_version & 0xf; 4286 visit_type_int(v, name, &value, errp); 4287 } 4288 4289 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 4290 const char *name, void *opaque, 4291 Error **errp) 4292 { 4293 X86CPU *cpu = X86_CPU(obj); 4294 CPUX86State *env = &cpu->env; 4295 const int64_t min = 0; 4296 const int64_t max = 0xf; 4297 Error *local_err = NULL; 4298 int64_t value; 4299 4300 visit_type_int(v, name, &value, &local_err); 4301 if (local_err) { 4302 error_propagate(errp, local_err); 4303 return; 4304 } 4305 if (value < min || value > max) { 4306 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4307 name ? name : "null", value, min, max); 4308 return; 4309 } 4310 4311 env->cpuid_version &= ~0xf; 4312 env->cpuid_version |= value & 0xf; 4313 } 4314 4315 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 4316 { 4317 X86CPU *cpu = X86_CPU(obj); 4318 CPUX86State *env = &cpu->env; 4319 char *value; 4320 4321 value = g_malloc(CPUID_VENDOR_SZ + 1); 4322 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 4323 env->cpuid_vendor3); 4324 return value; 4325 } 4326 4327 static void x86_cpuid_set_vendor(Object *obj, const char *value, 4328 Error **errp) 4329 { 4330 X86CPU *cpu = X86_CPU(obj); 4331 CPUX86State *env = &cpu->env; 4332 int i; 4333 4334 if (strlen(value) != CPUID_VENDOR_SZ) { 4335 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 4336 return; 4337 } 4338 4339 env->cpuid_vendor1 = 0; 4340 env->cpuid_vendor2 = 0; 4341 env->cpuid_vendor3 = 0; 4342 for (i = 0; i < 4; i++) { 4343 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 4344 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 4345 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 4346 } 4347 } 4348 4349 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 4350 { 4351 X86CPU *cpu = X86_CPU(obj); 4352 CPUX86State *env = &cpu->env; 4353 char *value; 4354 int i; 4355 4356 value = g_malloc(48 + 1); 4357 for (i = 0; i < 48; i++) { 4358 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 4359 } 4360 value[48] = '\0'; 4361 return value; 4362 } 4363 4364 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 4365 Error **errp) 4366 { 4367 X86CPU *cpu = X86_CPU(obj); 4368 CPUX86State *env = &cpu->env; 4369 int c, len, i; 4370 4371 if (model_id == NULL) { 4372 model_id = ""; 4373 } 4374 len = strlen(model_id); 4375 memset(env->cpuid_model, 0, 48); 4376 for (i = 0; i < 48; i++) { 4377 if (i >= len) { 4378 c = '\0'; 4379 } else { 4380 c = (uint8_t)model_id[i]; 4381 } 4382 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 4383 } 4384 } 4385 4386 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 4387 void *opaque, Error **errp) 4388 { 4389 X86CPU *cpu = X86_CPU(obj); 4390 int64_t value; 4391 4392 value = cpu->env.tsc_khz * 1000; 4393 visit_type_int(v, name, &value, errp); 4394 } 4395 4396 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 4397 void *opaque, Error **errp) 4398 { 4399 X86CPU *cpu = X86_CPU(obj); 4400 const int64_t min = 0; 4401 const int64_t max = INT64_MAX; 4402 Error *local_err = NULL; 4403 int64_t value; 4404 4405 visit_type_int(v, name, &value, &local_err); 4406 if (local_err) { 4407 error_propagate(errp, local_err); 4408 return; 4409 } 4410 if (value < min || value > max) { 4411 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4412 name ? name : "null", value, min, max); 4413 return; 4414 } 4415 4416 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 4417 } 4418 4419 /* Generic getter for "feature-words" and "filtered-features" properties */ 4420 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 4421 const char *name, void *opaque, 4422 Error **errp) 4423 { 4424 uint64_t *array = (uint64_t *)opaque; 4425 FeatureWord w; 4426 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 4427 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 4428 X86CPUFeatureWordInfoList *list = NULL; 4429 4430 for (w = 0; w < FEATURE_WORDS; w++) { 4431 FeatureWordInfo *wi = &feature_word_info[w]; 4432 /* 4433 * We didn't have MSR features when "feature-words" was 4434 * introduced. Therefore skipped other type entries. 4435 */ 4436 if (wi->type != CPUID_FEATURE_WORD) { 4437 continue; 4438 } 4439 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 4440 qwi->cpuid_input_eax = wi->cpuid.eax; 4441 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 4442 qwi->cpuid_input_ecx = wi->cpuid.ecx; 4443 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 4444 qwi->features = array[w]; 4445 4446 /* List will be in reverse order, but order shouldn't matter */ 4447 list_entries[w].next = list; 4448 list_entries[w].value = &word_infos[w]; 4449 list = &list_entries[w]; 4450 } 4451 4452 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 4453 } 4454 4455 /* Convert all '_' in a feature string option name to '-', to make feature 4456 * name conform to QOM property naming rule, which uses '-' instead of '_'. 4457 */ 4458 static inline void feat2prop(char *s) 4459 { 4460 while ((s = strchr(s, '_'))) { 4461 *s = '-'; 4462 } 4463 } 4464 4465 /* Return the feature property name for a feature flag bit */ 4466 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 4467 { 4468 const char *name; 4469 /* XSAVE components are automatically enabled by other features, 4470 * so return the original feature name instead 4471 */ 4472 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 4473 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 4474 4475 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 4476 x86_ext_save_areas[comp].bits) { 4477 w = x86_ext_save_areas[comp].feature; 4478 bitnr = ctz32(x86_ext_save_areas[comp].bits); 4479 } 4480 } 4481 4482 assert(bitnr < 64); 4483 assert(w < FEATURE_WORDS); 4484 name = feature_word_info[w].feat_names[bitnr]; 4485 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD)); 4486 return name; 4487 } 4488 4489 /* Compatibily hack to maintain legacy +-feat semantic, 4490 * where +-feat overwrites any feature set by 4491 * feat=on|feat even if the later is parsed after +-feat 4492 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 4493 */ 4494 static GList *plus_features, *minus_features; 4495 4496 static gint compare_string(gconstpointer a, gconstpointer b) 4497 { 4498 return g_strcmp0(a, b); 4499 } 4500 4501 /* Parse "+feature,-feature,feature=foo" CPU feature string 4502 */ 4503 static void x86_cpu_parse_featurestr(const char *typename, char *features, 4504 Error **errp) 4505 { 4506 char *featurestr; /* Single 'key=value" string being parsed */ 4507 static bool cpu_globals_initialized; 4508 bool ambiguous = false; 4509 4510 if (cpu_globals_initialized) { 4511 return; 4512 } 4513 cpu_globals_initialized = true; 4514 4515 if (!features) { 4516 return; 4517 } 4518 4519 for (featurestr = strtok(features, ","); 4520 featurestr; 4521 featurestr = strtok(NULL, ",")) { 4522 const char *name; 4523 const char *val = NULL; 4524 char *eq = NULL; 4525 char num[32]; 4526 GlobalProperty *prop; 4527 4528 /* Compatibility syntax: */ 4529 if (featurestr[0] == '+') { 4530 plus_features = g_list_append(plus_features, 4531 g_strdup(featurestr + 1)); 4532 continue; 4533 } else if (featurestr[0] == '-') { 4534 minus_features = g_list_append(minus_features, 4535 g_strdup(featurestr + 1)); 4536 continue; 4537 } 4538 4539 eq = strchr(featurestr, '='); 4540 if (eq) { 4541 *eq++ = 0; 4542 val = eq; 4543 } else { 4544 val = "on"; 4545 } 4546 4547 feat2prop(featurestr); 4548 name = featurestr; 4549 4550 if (g_list_find_custom(plus_features, name, compare_string)) { 4551 warn_report("Ambiguous CPU model string. " 4552 "Don't mix both \"+%s\" and \"%s=%s\"", 4553 name, name, val); 4554 ambiguous = true; 4555 } 4556 if (g_list_find_custom(minus_features, name, compare_string)) { 4557 warn_report("Ambiguous CPU model string. " 4558 "Don't mix both \"-%s\" and \"%s=%s\"", 4559 name, name, val); 4560 ambiguous = true; 4561 } 4562 4563 /* Special case: */ 4564 if (!strcmp(name, "tsc-freq")) { 4565 int ret; 4566 uint64_t tsc_freq; 4567 4568 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 4569 if (ret < 0 || tsc_freq > INT64_MAX) { 4570 error_setg(errp, "bad numerical value %s", val); 4571 return; 4572 } 4573 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 4574 val = num; 4575 name = "tsc-frequency"; 4576 } 4577 4578 prop = g_new0(typeof(*prop), 1); 4579 prop->driver = typename; 4580 prop->property = g_strdup(name); 4581 prop->value = g_strdup(val); 4582 qdev_prop_register_global(prop); 4583 } 4584 4585 if (ambiguous) { 4586 warn_report("Compatibility of ambiguous CPU model " 4587 "strings won't be kept on future QEMU versions"); 4588 } 4589 } 4590 4591 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 4592 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); 4593 4594 /* Build a list with the name of all features on a feature word array */ 4595 static void x86_cpu_list_feature_names(FeatureWordArray features, 4596 strList **feat_names) 4597 { 4598 FeatureWord w; 4599 strList **next = feat_names; 4600 4601 for (w = 0; w < FEATURE_WORDS; w++) { 4602 uint64_t filtered = features[w]; 4603 int i; 4604 for (i = 0; i < 64; i++) { 4605 if (filtered & (1ULL << i)) { 4606 strList *new = g_new0(strList, 1); 4607 new->value = g_strdup(x86_cpu_feature_name(w, i)); 4608 *next = new; 4609 next = &new->next; 4610 } 4611 } 4612 } 4613 } 4614 4615 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 4616 const char *name, void *opaque, 4617 Error **errp) 4618 { 4619 X86CPU *xc = X86_CPU(obj); 4620 strList *result = NULL; 4621 4622 x86_cpu_list_feature_names(xc->filtered_features, &result); 4623 visit_type_strList(v, "unavailable-features", &result, errp); 4624 } 4625 4626 /* Check for missing features that may prevent the CPU class from 4627 * running using the current machine and accelerator. 4628 */ 4629 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 4630 strList **missing_feats) 4631 { 4632 X86CPU *xc; 4633 Error *err = NULL; 4634 strList **next = missing_feats; 4635 4636 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 4637 strList *new = g_new0(strList, 1); 4638 new->value = g_strdup("kvm"); 4639 *missing_feats = new; 4640 return; 4641 } 4642 4643 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 4644 4645 x86_cpu_expand_features(xc, &err); 4646 if (err) { 4647 /* Errors at x86_cpu_expand_features should never happen, 4648 * but in case it does, just report the model as not 4649 * runnable at all using the "type" property. 4650 */ 4651 strList *new = g_new0(strList, 1); 4652 new->value = g_strdup("type"); 4653 *next = new; 4654 next = &new->next; 4655 } 4656 4657 x86_cpu_filter_features(xc, false); 4658 4659 x86_cpu_list_feature_names(xc->filtered_features, next); 4660 4661 object_unref(OBJECT(xc)); 4662 } 4663 4664 /* Print all cpuid feature names in featureset 4665 */ 4666 static void listflags(GList *features) 4667 { 4668 size_t len = 0; 4669 GList *tmp; 4670 4671 for (tmp = features; tmp; tmp = tmp->next) { 4672 const char *name = tmp->data; 4673 if ((len + strlen(name) + 1) >= 75) { 4674 qemu_printf("\n"); 4675 len = 0; 4676 } 4677 qemu_printf("%s%s", len == 0 ? " " : " ", name); 4678 len += strlen(name) + 1; 4679 } 4680 qemu_printf("\n"); 4681 } 4682 4683 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 4684 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 4685 { 4686 ObjectClass *class_a = (ObjectClass *)a; 4687 ObjectClass *class_b = (ObjectClass *)b; 4688 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 4689 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 4690 char *name_a, *name_b; 4691 int ret; 4692 4693 if (cc_a->ordering != cc_b->ordering) { 4694 ret = cc_a->ordering - cc_b->ordering; 4695 } else { 4696 name_a = x86_cpu_class_get_model_name(cc_a); 4697 name_b = x86_cpu_class_get_model_name(cc_b); 4698 ret = strcmp(name_a, name_b); 4699 g_free(name_a); 4700 g_free(name_b); 4701 } 4702 return ret; 4703 } 4704 4705 static GSList *get_sorted_cpu_model_list(void) 4706 { 4707 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 4708 list = g_slist_sort(list, x86_cpu_list_compare); 4709 return list; 4710 } 4711 4712 static char *x86_cpu_class_get_model_id(X86CPUClass *xc) 4713 { 4714 Object *obj = object_new(object_class_get_name(OBJECT_CLASS(xc))); 4715 char *r = object_property_get_str(obj, "model-id", &error_abort); 4716 object_unref(obj); 4717 return r; 4718 } 4719 4720 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) 4721 { 4722 X86CPUVersion version; 4723 4724 if (!cc->model || !cc->model->is_alias) { 4725 return NULL; 4726 } 4727 version = x86_cpu_model_resolve_version(cc->model); 4728 if (version <= 0) { 4729 return NULL; 4730 } 4731 return x86_cpu_versioned_model_name(cc->model->cpudef, version); 4732 } 4733 4734 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 4735 { 4736 ObjectClass *oc = data; 4737 X86CPUClass *cc = X86_CPU_CLASS(oc); 4738 char *name = x86_cpu_class_get_model_name(cc); 4739 char *desc = g_strdup(cc->model_description); 4740 char *alias_of = x86_cpu_class_get_alias_of(cc); 4741 4742 if (!desc && alias_of) { 4743 if (cc->model && cc->model->version == CPU_VERSION_AUTO) { 4744 desc = g_strdup("(alias configured by machine type)"); 4745 } else { 4746 desc = g_strdup_printf("(alias of %s)", alias_of); 4747 } 4748 } 4749 if (!desc) { 4750 desc = x86_cpu_class_get_model_id(cc); 4751 } 4752 4753 qemu_printf("x86 %-20s %-48s\n", name, desc); 4754 g_free(name); 4755 g_free(desc); 4756 g_free(alias_of); 4757 } 4758 4759 /* list available CPU models and flags */ 4760 void x86_cpu_list(void) 4761 { 4762 int i, j; 4763 GSList *list; 4764 GList *names = NULL; 4765 4766 qemu_printf("Available CPUs:\n"); 4767 list = get_sorted_cpu_model_list(); 4768 g_slist_foreach(list, x86_cpu_list_entry, NULL); 4769 g_slist_free(list); 4770 4771 names = NULL; 4772 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 4773 FeatureWordInfo *fw = &feature_word_info[i]; 4774 for (j = 0; j < 64; j++) { 4775 if (fw->feat_names[j]) { 4776 names = g_list_append(names, (gpointer)fw->feat_names[j]); 4777 } 4778 } 4779 } 4780 4781 names = g_list_sort(names, (GCompareFunc)strcmp); 4782 4783 qemu_printf("\nRecognized CPUID flags:\n"); 4784 listflags(names); 4785 qemu_printf("\n"); 4786 g_list_free(names); 4787 } 4788 4789 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 4790 { 4791 ObjectClass *oc = data; 4792 X86CPUClass *cc = X86_CPU_CLASS(oc); 4793 CpuDefinitionInfoList **cpu_list = user_data; 4794 CpuDefinitionInfoList *entry; 4795 CpuDefinitionInfo *info; 4796 4797 info = g_malloc0(sizeof(*info)); 4798 info->name = x86_cpu_class_get_model_name(cc); 4799 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 4800 info->has_unavailable_features = true; 4801 info->q_typename = g_strdup(object_class_get_name(oc)); 4802 info->migration_safe = cc->migration_safe; 4803 info->has_migration_safe = true; 4804 info->q_static = cc->static_model; 4805 /* 4806 * Old machine types won't report aliases, so that alias translation 4807 * doesn't break compatibility with previous QEMU versions. 4808 */ 4809 if (default_cpu_version != CPU_VERSION_LEGACY) { 4810 info->alias_of = x86_cpu_class_get_alias_of(cc); 4811 info->has_alias_of = !!info->alias_of; 4812 } 4813 4814 entry = g_malloc0(sizeof(*entry)); 4815 entry->value = info; 4816 entry->next = *cpu_list; 4817 *cpu_list = entry; 4818 } 4819 4820 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 4821 { 4822 CpuDefinitionInfoList *cpu_list = NULL; 4823 GSList *list = get_sorted_cpu_model_list(); 4824 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 4825 g_slist_free(list); 4826 return cpu_list; 4827 } 4828 4829 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 4830 bool migratable_only) 4831 { 4832 FeatureWordInfo *wi = &feature_word_info[w]; 4833 uint64_t r = 0; 4834 4835 if (kvm_enabled()) { 4836 switch (wi->type) { 4837 case CPUID_FEATURE_WORD: 4838 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 4839 wi->cpuid.ecx, 4840 wi->cpuid.reg); 4841 break; 4842 case MSR_FEATURE_WORD: 4843 r = kvm_arch_get_supported_msr_feature(kvm_state, 4844 wi->msr.index); 4845 break; 4846 } 4847 } else if (hvf_enabled()) { 4848 if (wi->type != CPUID_FEATURE_WORD) { 4849 return 0; 4850 } 4851 r = hvf_get_supported_cpuid(wi->cpuid.eax, 4852 wi->cpuid.ecx, 4853 wi->cpuid.reg); 4854 } else if (tcg_enabled()) { 4855 r = wi->tcg_features; 4856 } else { 4857 return ~0; 4858 } 4859 if (migratable_only) { 4860 r &= x86_cpu_get_migratable_flags(w); 4861 } 4862 return r; 4863 } 4864 4865 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 4866 { 4867 PropValue *pv; 4868 for (pv = props; pv->prop; pv++) { 4869 if (!pv->value) { 4870 continue; 4871 } 4872 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 4873 &error_abort); 4874 } 4875 } 4876 4877 /* Apply properties for the CPU model version specified in model */ 4878 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) 4879 { 4880 const X86CPUVersionDefinition *vdef; 4881 X86CPUVersion version = x86_cpu_model_resolve_version(model); 4882 4883 if (version == CPU_VERSION_LEGACY) { 4884 return; 4885 } 4886 4887 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { 4888 PropValue *p; 4889 4890 for (p = vdef->props; p && p->prop; p++) { 4891 object_property_parse(OBJECT(cpu), p->value, p->prop, 4892 &error_abort); 4893 } 4894 4895 if (vdef->version == version) { 4896 break; 4897 } 4898 } 4899 4900 /* 4901 * If we reached the end of the list, version number was invalid 4902 */ 4903 assert(vdef->version == version); 4904 } 4905 4906 /* Load data from X86CPUDefinition into a X86CPU object 4907 */ 4908 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp) 4909 { 4910 X86CPUDefinition *def = model->cpudef; 4911 CPUX86State *env = &cpu->env; 4912 const char *vendor; 4913 char host_vendor[CPUID_VENDOR_SZ + 1]; 4914 FeatureWord w; 4915 4916 /*NOTE: any property set by this function should be returned by 4917 * x86_cpu_static_props(), so static expansion of 4918 * query-cpu-model-expansion is always complete. 4919 */ 4920 4921 /* CPU models only set _minimum_ values for level/xlevel: */ 4922 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp); 4923 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp); 4924 4925 object_property_set_int(OBJECT(cpu), def->family, "family", errp); 4926 object_property_set_int(OBJECT(cpu), def->model, "model", errp); 4927 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp); 4928 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp); 4929 for (w = 0; w < FEATURE_WORDS; w++) { 4930 env->features[w] = def->features[w]; 4931 } 4932 4933 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 4934 cpu->legacy_cache = !def->cache_info; 4935 4936 /* Special cases not set in the X86CPUDefinition structs: */ 4937 /* TODO: in-kernel irqchip for hvf */ 4938 if (kvm_enabled()) { 4939 if (!kvm_irqchip_in_kernel()) { 4940 x86_cpu_change_kvm_default("x2apic", "off"); 4941 } 4942 4943 x86_cpu_apply_props(cpu, kvm_default_props); 4944 } else if (tcg_enabled()) { 4945 x86_cpu_apply_props(cpu, tcg_default_props); 4946 } 4947 4948 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 4949 4950 /* sysenter isn't supported in compatibility mode on AMD, 4951 * syscall isn't supported in compatibility mode on Intel. 4952 * Normally we advertise the actual CPU vendor, but you can 4953 * override this using the 'vendor' property if you want to use 4954 * KVM's sysenter/syscall emulation in compatibility mode and 4955 * when doing cross vendor migration 4956 */ 4957 vendor = def->vendor; 4958 if (accel_uses_host_cpuid()) { 4959 uint32_t ebx = 0, ecx = 0, edx = 0; 4960 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 4961 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 4962 vendor = host_vendor; 4963 } 4964 4965 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp); 4966 4967 x86_cpu_apply_version_props(cpu, model); 4968 } 4969 4970 #ifndef CONFIG_USER_ONLY 4971 /* Return a QDict containing keys for all properties that can be included 4972 * in static expansion of CPU models. All properties set by x86_cpu_load_model() 4973 * must be included in the dictionary. 4974 */ 4975 static QDict *x86_cpu_static_props(void) 4976 { 4977 FeatureWord w; 4978 int i; 4979 static const char *props[] = { 4980 "min-level", 4981 "min-xlevel", 4982 "family", 4983 "model", 4984 "stepping", 4985 "model-id", 4986 "vendor", 4987 "lmce", 4988 NULL, 4989 }; 4990 static QDict *d; 4991 4992 if (d) { 4993 return d; 4994 } 4995 4996 d = qdict_new(); 4997 for (i = 0; props[i]; i++) { 4998 qdict_put_null(d, props[i]); 4999 } 5000 5001 for (w = 0; w < FEATURE_WORDS; w++) { 5002 FeatureWordInfo *fi = &feature_word_info[w]; 5003 int bit; 5004 for (bit = 0; bit < 64; bit++) { 5005 if (!fi->feat_names[bit]) { 5006 continue; 5007 } 5008 qdict_put_null(d, fi->feat_names[bit]); 5009 } 5010 } 5011 5012 return d; 5013 } 5014 5015 /* Add an entry to @props dict, with the value for property. */ 5016 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 5017 { 5018 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 5019 &error_abort); 5020 5021 qdict_put_obj(props, prop, value); 5022 } 5023 5024 /* Convert CPU model data from X86CPU object to a property dictionary 5025 * that can recreate exactly the same CPU model. 5026 */ 5027 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 5028 { 5029 QDict *sprops = x86_cpu_static_props(); 5030 const QDictEntry *e; 5031 5032 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 5033 const char *prop = qdict_entry_key(e); 5034 x86_cpu_expand_prop(cpu, props, prop); 5035 } 5036 } 5037 5038 /* Convert CPU model data from X86CPU object to a property dictionary 5039 * that can recreate exactly the same CPU model, including every 5040 * writeable QOM property. 5041 */ 5042 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 5043 { 5044 ObjectPropertyIterator iter; 5045 ObjectProperty *prop; 5046 5047 object_property_iter_init(&iter, OBJECT(cpu)); 5048 while ((prop = object_property_iter_next(&iter))) { 5049 /* skip read-only or write-only properties */ 5050 if (!prop->get || !prop->set) { 5051 continue; 5052 } 5053 5054 /* "hotplugged" is the only property that is configurable 5055 * on the command-line but will be set differently on CPUs 5056 * created using "-cpu ... -smp ..." and by CPUs created 5057 * on the fly by x86_cpu_from_model() for querying. Skip it. 5058 */ 5059 if (!strcmp(prop->name, "hotplugged")) { 5060 continue; 5061 } 5062 x86_cpu_expand_prop(cpu, props, prop->name); 5063 } 5064 } 5065 5066 static void object_apply_props(Object *obj, QDict *props, Error **errp) 5067 { 5068 const QDictEntry *prop; 5069 Error *err = NULL; 5070 5071 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 5072 object_property_set_qobject(obj, qdict_entry_value(prop), 5073 qdict_entry_key(prop), &err); 5074 if (err) { 5075 break; 5076 } 5077 } 5078 5079 error_propagate(errp, err); 5080 } 5081 5082 /* Create X86CPU object according to model+props specification */ 5083 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 5084 { 5085 X86CPU *xc = NULL; 5086 X86CPUClass *xcc; 5087 Error *err = NULL; 5088 5089 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 5090 if (xcc == NULL) { 5091 error_setg(&err, "CPU model '%s' not found", model); 5092 goto out; 5093 } 5094 5095 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); 5096 if (props) { 5097 object_apply_props(OBJECT(xc), props, &err); 5098 if (err) { 5099 goto out; 5100 } 5101 } 5102 5103 x86_cpu_expand_features(xc, &err); 5104 if (err) { 5105 goto out; 5106 } 5107 5108 out: 5109 if (err) { 5110 error_propagate(errp, err); 5111 object_unref(OBJECT(xc)); 5112 xc = NULL; 5113 } 5114 return xc; 5115 } 5116 5117 CpuModelExpansionInfo * 5118 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 5119 CpuModelInfo *model, 5120 Error **errp) 5121 { 5122 X86CPU *xc = NULL; 5123 Error *err = NULL; 5124 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 5125 QDict *props = NULL; 5126 const char *base_name; 5127 5128 xc = x86_cpu_from_model(model->name, 5129 model->has_props ? 5130 qobject_to(QDict, model->props) : 5131 NULL, &err); 5132 if (err) { 5133 goto out; 5134 } 5135 5136 props = qdict_new(); 5137 ret->model = g_new0(CpuModelInfo, 1); 5138 ret->model->props = QOBJECT(props); 5139 ret->model->has_props = true; 5140 5141 switch (type) { 5142 case CPU_MODEL_EXPANSION_TYPE_STATIC: 5143 /* Static expansion will be based on "base" only */ 5144 base_name = "base"; 5145 x86_cpu_to_dict(xc, props); 5146 break; 5147 case CPU_MODEL_EXPANSION_TYPE_FULL: 5148 /* As we don't return every single property, full expansion needs 5149 * to keep the original model name+props, and add extra 5150 * properties on top of that. 5151 */ 5152 base_name = model->name; 5153 x86_cpu_to_dict_full(xc, props); 5154 break; 5155 default: 5156 error_setg(&err, "Unsupported expansion type"); 5157 goto out; 5158 } 5159 5160 x86_cpu_to_dict(xc, props); 5161 5162 ret->model->name = g_strdup(base_name); 5163 5164 out: 5165 object_unref(OBJECT(xc)); 5166 if (err) { 5167 error_propagate(errp, err); 5168 qapi_free_CpuModelExpansionInfo(ret); 5169 ret = NULL; 5170 } 5171 return ret; 5172 } 5173 #endif /* !CONFIG_USER_ONLY */ 5174 5175 static gchar *x86_gdb_arch_name(CPUState *cs) 5176 { 5177 #ifdef TARGET_X86_64 5178 return g_strdup("i386:x86-64"); 5179 #else 5180 return g_strdup("i386"); 5181 #endif 5182 } 5183 5184 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 5185 { 5186 X86CPUModel *model = data; 5187 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5188 5189 xcc->model = model; 5190 xcc->migration_safe = true; 5191 } 5192 5193 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) 5194 { 5195 char *typename = x86_cpu_type_name(name); 5196 TypeInfo ti = { 5197 .name = typename, 5198 .parent = TYPE_X86_CPU, 5199 .class_init = x86_cpu_cpudef_class_init, 5200 .class_data = model, 5201 }; 5202 5203 type_register(&ti); 5204 g_free(typename); 5205 } 5206 5207 static void x86_register_cpudef_types(X86CPUDefinition *def) 5208 { 5209 X86CPUModel *m; 5210 const X86CPUVersionDefinition *vdef; 5211 char *name; 5212 5213 /* AMD aliases are handled at runtime based on CPUID vendor, so 5214 * they shouldn't be set on the CPU model table. 5215 */ 5216 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 5217 /* catch mistakes instead of silently truncating model_id when too long */ 5218 assert(def->model_id && strlen(def->model_id) <= 48); 5219 5220 /* Unversioned model: */ 5221 m = g_new0(X86CPUModel, 1); 5222 m->cpudef = def; 5223 m->version = CPU_VERSION_AUTO; 5224 m->is_alias = true; 5225 x86_register_cpu_model_type(def->name, m); 5226 5227 /* Versioned models: */ 5228 5229 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { 5230 X86CPUModel *m = g_new0(X86CPUModel, 1); 5231 m->cpudef = def; 5232 m->version = vdef->version; 5233 name = x86_cpu_versioned_model_name(def, vdef->version); 5234 x86_register_cpu_model_type(name, m); 5235 g_free(name); 5236 5237 if (vdef->alias) { 5238 X86CPUModel *am = g_new0(X86CPUModel, 1); 5239 am->cpudef = def; 5240 am->version = vdef->version; 5241 am->is_alias = true; 5242 x86_register_cpu_model_type(vdef->alias, am); 5243 } 5244 } 5245 5246 } 5247 5248 #if !defined(CONFIG_USER_ONLY) 5249 5250 void cpu_clear_apic_feature(CPUX86State *env) 5251 { 5252 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 5253 } 5254 5255 #endif /* !CONFIG_USER_ONLY */ 5256 5257 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 5258 uint32_t *eax, uint32_t *ebx, 5259 uint32_t *ecx, uint32_t *edx) 5260 { 5261 X86CPU *cpu = env_archcpu(env); 5262 CPUState *cs = env_cpu(env); 5263 uint32_t die_offset; 5264 uint32_t limit; 5265 uint32_t signature[3]; 5266 5267 /* Calculate & apply limits for different index ranges */ 5268 if (index >= 0xC0000000) { 5269 limit = env->cpuid_xlevel2; 5270 } else if (index >= 0x80000000) { 5271 limit = env->cpuid_xlevel; 5272 } else if (index >= 0x40000000) { 5273 limit = 0x40000001; 5274 } else { 5275 limit = env->cpuid_level; 5276 } 5277 5278 if (index > limit) { 5279 /* Intel documentation states that invalid EAX input will 5280 * return the same information as EAX=cpuid_level 5281 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 5282 */ 5283 index = env->cpuid_level; 5284 } 5285 5286 switch(index) { 5287 case 0: 5288 *eax = env->cpuid_level; 5289 *ebx = env->cpuid_vendor1; 5290 *edx = env->cpuid_vendor2; 5291 *ecx = env->cpuid_vendor3; 5292 break; 5293 case 1: 5294 *eax = env->cpuid_version; 5295 *ebx = (cpu->apic_id << 24) | 5296 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 5297 *ecx = env->features[FEAT_1_ECX]; 5298 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 5299 *ecx |= CPUID_EXT_OSXSAVE; 5300 } 5301 *edx = env->features[FEAT_1_EDX]; 5302 if (cs->nr_cores * cs->nr_threads > 1) { 5303 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 5304 *edx |= CPUID_HT; 5305 } 5306 break; 5307 case 2: 5308 /* cache info: needed for Pentium Pro compatibility */ 5309 if (cpu->cache_info_passthrough) { 5310 host_cpuid(index, 0, eax, ebx, ecx, edx); 5311 break; 5312 } 5313 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 5314 *ebx = 0; 5315 if (!cpu->enable_l3_cache) { 5316 *ecx = 0; 5317 } else { 5318 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 5319 } 5320 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 5321 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 5322 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 5323 break; 5324 case 4: 5325 /* cache info: needed for Core compatibility */ 5326 if (cpu->cache_info_passthrough) { 5327 host_cpuid(index, count, eax, ebx, ecx, edx); 5328 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 5329 *eax &= ~0xFC000000; 5330 if ((*eax & 31) && cs->nr_cores > 1) { 5331 *eax |= (cs->nr_cores - 1) << 26; 5332 } 5333 } else { 5334 *eax = 0; 5335 switch (count) { 5336 case 0: /* L1 dcache info */ 5337 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 5338 1, cs->nr_cores, 5339 eax, ebx, ecx, edx); 5340 break; 5341 case 1: /* L1 icache info */ 5342 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 5343 1, cs->nr_cores, 5344 eax, ebx, ecx, edx); 5345 break; 5346 case 2: /* L2 cache info */ 5347 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 5348 cs->nr_threads, cs->nr_cores, 5349 eax, ebx, ecx, edx); 5350 break; 5351 case 3: /* L3 cache info */ 5352 die_offset = apicid_die_offset(env->nr_dies, 5353 cs->nr_cores, cs->nr_threads); 5354 if (cpu->enable_l3_cache) { 5355 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 5356 (1 << die_offset), cs->nr_cores, 5357 eax, ebx, ecx, edx); 5358 break; 5359 } 5360 /* fall through */ 5361 default: /* end of info */ 5362 *eax = *ebx = *ecx = *edx = 0; 5363 break; 5364 } 5365 } 5366 break; 5367 case 5: 5368 /* MONITOR/MWAIT Leaf */ 5369 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 5370 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 5371 *ecx = cpu->mwait.ecx; /* flags */ 5372 *edx = cpu->mwait.edx; /* mwait substates */ 5373 break; 5374 case 6: 5375 /* Thermal and Power Leaf */ 5376 *eax = env->features[FEAT_6_EAX]; 5377 *ebx = 0; 5378 *ecx = 0; 5379 *edx = 0; 5380 break; 5381 case 7: 5382 /* Structured Extended Feature Flags Enumeration Leaf */ 5383 if (count == 0) { 5384 /* Maximum ECX value for sub-leaves */ 5385 *eax = env->cpuid_level_func7; 5386 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 5387 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 5388 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 5389 *ecx |= CPUID_7_0_ECX_OSPKE; 5390 } 5391 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 5392 } else if (count == 1) { 5393 *eax = env->features[FEAT_7_1_EAX]; 5394 *ebx = 0; 5395 *ecx = 0; 5396 *edx = 0; 5397 } else { 5398 *eax = 0; 5399 *ebx = 0; 5400 *ecx = 0; 5401 *edx = 0; 5402 } 5403 break; 5404 case 9: 5405 /* Direct Cache Access Information Leaf */ 5406 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 5407 *ebx = 0; 5408 *ecx = 0; 5409 *edx = 0; 5410 break; 5411 case 0xA: 5412 /* Architectural Performance Monitoring Leaf */ 5413 if (kvm_enabled() && cpu->enable_pmu) { 5414 KVMState *s = cs->kvm_state; 5415 5416 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 5417 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 5418 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 5419 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 5420 } else if (hvf_enabled() && cpu->enable_pmu) { 5421 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 5422 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 5423 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 5424 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 5425 } else { 5426 *eax = 0; 5427 *ebx = 0; 5428 *ecx = 0; 5429 *edx = 0; 5430 } 5431 break; 5432 case 0xB: 5433 /* Extended Topology Enumeration Leaf */ 5434 if (!cpu->enable_cpuid_0xb) { 5435 *eax = *ebx = *ecx = *edx = 0; 5436 break; 5437 } 5438 5439 *ecx = count & 0xff; 5440 *edx = cpu->apic_id; 5441 5442 switch (count) { 5443 case 0: 5444 *eax = apicid_core_offset(env->nr_dies, 5445 cs->nr_cores, cs->nr_threads); 5446 *ebx = cs->nr_threads; 5447 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5448 break; 5449 case 1: 5450 *eax = apicid_pkg_offset(env->nr_dies, 5451 cs->nr_cores, cs->nr_threads); 5452 *ebx = cs->nr_cores * cs->nr_threads; 5453 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5454 break; 5455 default: 5456 *eax = 0; 5457 *ebx = 0; 5458 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5459 } 5460 5461 assert(!(*eax & ~0x1f)); 5462 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5463 break; 5464 case 0x1F: 5465 /* V2 Extended Topology Enumeration Leaf */ 5466 if (env->nr_dies < 2) { 5467 *eax = *ebx = *ecx = *edx = 0; 5468 break; 5469 } 5470 5471 *ecx = count & 0xff; 5472 *edx = cpu->apic_id; 5473 switch (count) { 5474 case 0: 5475 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores, 5476 cs->nr_threads); 5477 *ebx = cs->nr_threads; 5478 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5479 break; 5480 case 1: 5481 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores, 5482 cs->nr_threads); 5483 *ebx = cs->nr_cores * cs->nr_threads; 5484 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5485 break; 5486 case 2: 5487 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores, 5488 cs->nr_threads); 5489 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; 5490 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; 5491 break; 5492 default: 5493 *eax = 0; 5494 *ebx = 0; 5495 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5496 } 5497 assert(!(*eax & ~0x1f)); 5498 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5499 break; 5500 case 0xD: { 5501 /* Processor Extended State */ 5502 *eax = 0; 5503 *ebx = 0; 5504 *ecx = 0; 5505 *edx = 0; 5506 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 5507 break; 5508 } 5509 5510 if (count == 0) { 5511 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 5512 *eax = env->features[FEAT_XSAVE_COMP_LO]; 5513 *edx = env->features[FEAT_XSAVE_COMP_HI]; 5514 /* 5515 * The initial value of xcr0 and ebx == 0, On host without kvm 5516 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 5517 * even through guest update xcr0, this will crash some legacy guest 5518 * (e.g., CentOS 6), So set ebx == ecx to workaroud it. 5519 */ 5520 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0); 5521 } else if (count == 1) { 5522 *eax = env->features[FEAT_XSAVE]; 5523 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 5524 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 5525 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 5526 *eax = esa->size; 5527 *ebx = esa->offset; 5528 } 5529 } 5530 break; 5531 } 5532 case 0x14: { 5533 /* Intel Processor Trace Enumeration */ 5534 *eax = 0; 5535 *ebx = 0; 5536 *ecx = 0; 5537 *edx = 0; 5538 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 5539 !kvm_enabled()) { 5540 break; 5541 } 5542 5543 if (count == 0) { 5544 *eax = INTEL_PT_MAX_SUBLEAF; 5545 *ebx = INTEL_PT_MINIMAL_EBX; 5546 *ecx = INTEL_PT_MINIMAL_ECX; 5547 } else if (count == 1) { 5548 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 5549 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 5550 } 5551 break; 5552 } 5553 case 0x40000000: 5554 /* 5555 * CPUID code in kvm_arch_init_vcpu() ignores stuff 5556 * set here, but we restrict to TCG none the less. 5557 */ 5558 if (tcg_enabled() && cpu->expose_tcg) { 5559 memcpy(signature, "TCGTCGTCGTCG", 12); 5560 *eax = 0x40000001; 5561 *ebx = signature[0]; 5562 *ecx = signature[1]; 5563 *edx = signature[2]; 5564 } else { 5565 *eax = 0; 5566 *ebx = 0; 5567 *ecx = 0; 5568 *edx = 0; 5569 } 5570 break; 5571 case 0x40000001: 5572 *eax = 0; 5573 *ebx = 0; 5574 *ecx = 0; 5575 *edx = 0; 5576 break; 5577 case 0x80000000: 5578 *eax = env->cpuid_xlevel; 5579 *ebx = env->cpuid_vendor1; 5580 *edx = env->cpuid_vendor2; 5581 *ecx = env->cpuid_vendor3; 5582 break; 5583 case 0x80000001: 5584 *eax = env->cpuid_version; 5585 *ebx = 0; 5586 *ecx = env->features[FEAT_8000_0001_ECX]; 5587 *edx = env->features[FEAT_8000_0001_EDX]; 5588 5589 /* The Linux kernel checks for the CMPLegacy bit and 5590 * discards multiple thread information if it is set. 5591 * So don't set it here for Intel to make Linux guests happy. 5592 */ 5593 if (cs->nr_cores * cs->nr_threads > 1) { 5594 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 5595 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 5596 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 5597 *ecx |= 1 << 1; /* CmpLegacy bit */ 5598 } 5599 } 5600 break; 5601 case 0x80000002: 5602 case 0x80000003: 5603 case 0x80000004: 5604 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 5605 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 5606 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 5607 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 5608 break; 5609 case 0x80000005: 5610 /* cache info (L1 cache) */ 5611 if (cpu->cache_info_passthrough) { 5612 host_cpuid(index, 0, eax, ebx, ecx, edx); 5613 break; 5614 } 5615 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ 5616 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 5617 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ 5618 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 5619 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 5620 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 5621 break; 5622 case 0x80000006: 5623 /* cache info (L2 cache) */ 5624 if (cpu->cache_info_passthrough) { 5625 host_cpuid(index, 0, eax, ebx, ecx, edx); 5626 break; 5627 } 5628 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ 5629 (L2_DTLB_2M_ENTRIES << 16) | \ 5630 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ 5631 (L2_ITLB_2M_ENTRIES); 5632 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ 5633 (L2_DTLB_4K_ENTRIES << 16) | \ 5634 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ 5635 (L2_ITLB_4K_ENTRIES); 5636 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 5637 cpu->enable_l3_cache ? 5638 env->cache_info_amd.l3_cache : NULL, 5639 ecx, edx); 5640 break; 5641 case 0x80000007: 5642 *eax = 0; 5643 *ebx = 0; 5644 *ecx = 0; 5645 *edx = env->features[FEAT_8000_0007_EDX]; 5646 break; 5647 case 0x80000008: 5648 /* virtual & phys address size in low 2 bytes. */ 5649 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5650 /* 64 bit processor */ 5651 *eax = cpu->phys_bits; /* configurable physical bits */ 5652 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 5653 *eax |= 0x00003900; /* 57 bits virtual */ 5654 } else { 5655 *eax |= 0x00003000; /* 48 bits virtual */ 5656 } 5657 } else { 5658 *eax = cpu->phys_bits; 5659 } 5660 *ebx = env->features[FEAT_8000_0008_EBX]; 5661 *ecx = 0; 5662 *edx = 0; 5663 if (cs->nr_cores * cs->nr_threads > 1) { 5664 *ecx |= (cs->nr_cores * cs->nr_threads) - 1; 5665 } 5666 break; 5667 case 0x8000000A: 5668 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5669 *eax = 0x00000001; /* SVM Revision */ 5670 *ebx = 0x00000010; /* nr of ASIDs */ 5671 *ecx = 0; 5672 *edx = env->features[FEAT_SVM]; /* optional features */ 5673 } else { 5674 *eax = 0; 5675 *ebx = 0; 5676 *ecx = 0; 5677 *edx = 0; 5678 } 5679 break; 5680 case 0x8000001D: 5681 *eax = 0; 5682 if (cpu->cache_info_passthrough) { 5683 host_cpuid(index, count, eax, ebx, ecx, edx); 5684 break; 5685 } 5686 switch (count) { 5687 case 0: /* L1 dcache info */ 5688 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs, 5689 eax, ebx, ecx, edx); 5690 break; 5691 case 1: /* L1 icache info */ 5692 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs, 5693 eax, ebx, ecx, edx); 5694 break; 5695 case 2: /* L2 cache info */ 5696 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs, 5697 eax, ebx, ecx, edx); 5698 break; 5699 case 3: /* L3 cache info */ 5700 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs, 5701 eax, ebx, ecx, edx); 5702 break; 5703 default: /* end of info */ 5704 *eax = *ebx = *ecx = *edx = 0; 5705 break; 5706 } 5707 break; 5708 case 0x8000001E: 5709 assert(cpu->core_id <= 255); 5710 encode_topo_cpuid8000001e(cs, cpu, 5711 eax, ebx, ecx, edx); 5712 break; 5713 case 0xC0000000: 5714 *eax = env->cpuid_xlevel2; 5715 *ebx = 0; 5716 *ecx = 0; 5717 *edx = 0; 5718 break; 5719 case 0xC0000001: 5720 /* Support for VIA CPU's CPUID instruction */ 5721 *eax = env->cpuid_version; 5722 *ebx = 0; 5723 *ecx = 0; 5724 *edx = env->features[FEAT_C000_0001_EDX]; 5725 break; 5726 case 0xC0000002: 5727 case 0xC0000003: 5728 case 0xC0000004: 5729 /* Reserved for the future, and now filled with zero */ 5730 *eax = 0; 5731 *ebx = 0; 5732 *ecx = 0; 5733 *edx = 0; 5734 break; 5735 case 0x8000001F: 5736 *eax = sev_enabled() ? 0x2 : 0; 5737 *ebx = sev_get_cbit_position(); 5738 *ebx |= sev_get_reduced_phys_bits() << 6; 5739 *ecx = 0; 5740 *edx = 0; 5741 break; 5742 default: 5743 /* reserved values: zero */ 5744 *eax = 0; 5745 *ebx = 0; 5746 *ecx = 0; 5747 *edx = 0; 5748 break; 5749 } 5750 } 5751 5752 /* CPUClass::reset() */ 5753 static void x86_cpu_reset(CPUState *s) 5754 { 5755 X86CPU *cpu = X86_CPU(s); 5756 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 5757 CPUX86State *env = &cpu->env; 5758 target_ulong cr4; 5759 uint64_t xcr0; 5760 int i; 5761 5762 xcc->parent_reset(s); 5763 5764 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 5765 5766 env->old_exception = -1; 5767 5768 /* init to reset state */ 5769 5770 env->hflags2 |= HF2_GIF_MASK; 5771 5772 cpu_x86_update_cr0(env, 0x60000010); 5773 env->a20_mask = ~0x0; 5774 env->smbase = 0x30000; 5775 env->msr_smi_count = 0; 5776 5777 env->idt.limit = 0xffff; 5778 env->gdt.limit = 0xffff; 5779 env->ldt.limit = 0xffff; 5780 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 5781 env->tr.limit = 0xffff; 5782 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 5783 5784 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 5785 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 5786 DESC_R_MASK | DESC_A_MASK); 5787 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 5788 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 5789 DESC_A_MASK); 5790 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 5791 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 5792 DESC_A_MASK); 5793 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 5794 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 5795 DESC_A_MASK); 5796 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 5797 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 5798 DESC_A_MASK); 5799 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 5800 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 5801 DESC_A_MASK); 5802 5803 env->eip = 0xfff0; 5804 env->regs[R_EDX] = env->cpuid_version; 5805 5806 env->eflags = 0x2; 5807 5808 /* FPU init */ 5809 for (i = 0; i < 8; i++) { 5810 env->fptags[i] = 1; 5811 } 5812 cpu_set_fpuc(env, 0x37f); 5813 5814 env->mxcsr = 0x1f80; 5815 /* All units are in INIT state. */ 5816 env->xstate_bv = 0; 5817 5818 env->pat = 0x0007040600070406ULL; 5819 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 5820 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 5821 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 5822 } 5823 5824 memset(env->dr, 0, sizeof(env->dr)); 5825 env->dr[6] = DR6_FIXED_1; 5826 env->dr[7] = DR7_FIXED_1; 5827 cpu_breakpoint_remove_all(s, BP_CPU); 5828 cpu_watchpoint_remove_all(s, BP_CPU); 5829 5830 cr4 = 0; 5831 xcr0 = XSTATE_FP_MASK; 5832 5833 #ifdef CONFIG_USER_ONLY 5834 /* Enable all the features for user-mode. */ 5835 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 5836 xcr0 |= XSTATE_SSE_MASK; 5837 } 5838 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 5839 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 5840 if (env->features[esa->feature] & esa->bits) { 5841 xcr0 |= 1ull << i; 5842 } 5843 } 5844 5845 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 5846 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 5847 } 5848 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 5849 cr4 |= CR4_FSGSBASE_MASK; 5850 } 5851 #endif 5852 5853 env->xcr0 = xcr0; 5854 cpu_x86_update_cr4(env, cr4); 5855 5856 /* 5857 * SDM 11.11.5 requires: 5858 * - IA32_MTRR_DEF_TYPE MSR.E = 0 5859 * - IA32_MTRR_PHYSMASKn.V = 0 5860 * All other bits are undefined. For simplification, zero it all. 5861 */ 5862 env->mtrr_deftype = 0; 5863 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 5864 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 5865 5866 env->interrupt_injected = -1; 5867 env->exception_nr = -1; 5868 env->exception_pending = 0; 5869 env->exception_injected = 0; 5870 env->exception_has_payload = false; 5871 env->exception_payload = 0; 5872 env->nmi_injected = false; 5873 #if !defined(CONFIG_USER_ONLY) 5874 /* We hard-wire the BSP to the first CPU. */ 5875 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 5876 5877 s->halted = !cpu_is_bsp(cpu); 5878 5879 if (kvm_enabled()) { 5880 kvm_arch_reset_vcpu(cpu); 5881 } 5882 else if (hvf_enabled()) { 5883 hvf_reset_vcpu(s); 5884 } 5885 #endif 5886 } 5887 5888 #ifndef CONFIG_USER_ONLY 5889 bool cpu_is_bsp(X86CPU *cpu) 5890 { 5891 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 5892 } 5893 5894 /* TODO: remove me, when reset over QOM tree is implemented */ 5895 static void x86_cpu_machine_reset_cb(void *opaque) 5896 { 5897 X86CPU *cpu = opaque; 5898 cpu_reset(CPU(cpu)); 5899 } 5900 #endif 5901 5902 static void mce_init(X86CPU *cpu) 5903 { 5904 CPUX86State *cenv = &cpu->env; 5905 unsigned int bank; 5906 5907 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 5908 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 5909 (CPUID_MCE | CPUID_MCA)) { 5910 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 5911 (cpu->enable_lmce ? MCG_LMCE_P : 0); 5912 cenv->mcg_ctl = ~(uint64_t)0; 5913 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 5914 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 5915 } 5916 } 5917 } 5918 5919 #ifndef CONFIG_USER_ONLY 5920 APICCommonClass *apic_get_class(void) 5921 { 5922 const char *apic_type = "apic"; 5923 5924 /* TODO: in-kernel irqchip for hvf */ 5925 if (kvm_apic_in_kernel()) { 5926 apic_type = "kvm-apic"; 5927 } else if (xen_enabled()) { 5928 apic_type = "xen-apic"; 5929 } 5930 5931 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 5932 } 5933 5934 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 5935 { 5936 APICCommonState *apic; 5937 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 5938 5939 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class))); 5940 5941 object_property_add_child(OBJECT(cpu), "lapic", 5942 OBJECT(cpu->apic_state), &error_abort); 5943 object_unref(OBJECT(cpu->apic_state)); 5944 5945 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 5946 /* TODO: convert to link<> */ 5947 apic = APIC_COMMON(cpu->apic_state); 5948 apic->cpu = cpu; 5949 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 5950 } 5951 5952 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 5953 { 5954 APICCommonState *apic; 5955 static bool apic_mmio_map_once; 5956 5957 if (cpu->apic_state == NULL) { 5958 return; 5959 } 5960 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized", 5961 errp); 5962 5963 /* Map APIC MMIO area */ 5964 apic = APIC_COMMON(cpu->apic_state); 5965 if (!apic_mmio_map_once) { 5966 memory_region_add_subregion_overlap(get_system_memory(), 5967 apic->apicbase & 5968 MSR_IA32_APICBASE_BASE, 5969 &apic->io_memory, 5970 0x1000); 5971 apic_mmio_map_once = true; 5972 } 5973 } 5974 5975 static void x86_cpu_machine_done(Notifier *n, void *unused) 5976 { 5977 X86CPU *cpu = container_of(n, X86CPU, machine_done); 5978 MemoryRegion *smram = 5979 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 5980 5981 if (smram) { 5982 cpu->smram = g_new(MemoryRegion, 1); 5983 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 5984 smram, 0, 1ull << 32); 5985 memory_region_set_enabled(cpu->smram, true); 5986 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 5987 } 5988 } 5989 #else 5990 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 5991 { 5992 } 5993 #endif 5994 5995 /* Note: Only safe for use on x86(-64) hosts */ 5996 static uint32_t x86_host_phys_bits(void) 5997 { 5998 uint32_t eax; 5999 uint32_t host_phys_bits; 6000 6001 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 6002 if (eax >= 0x80000008) { 6003 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 6004 /* Note: According to AMD doc 25481 rev 2.34 they have a field 6005 * at 23:16 that can specify a maximum physical address bits for 6006 * the guest that can override this value; but I've not seen 6007 * anything with that set. 6008 */ 6009 host_phys_bits = eax & 0xff; 6010 } else { 6011 /* It's an odd 64 bit machine that doesn't have the leaf for 6012 * physical address bits; fall back to 36 that's most older 6013 * Intel. 6014 */ 6015 host_phys_bits = 36; 6016 } 6017 6018 return host_phys_bits; 6019 } 6020 6021 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 6022 { 6023 if (*min < value) { 6024 *min = value; 6025 } 6026 } 6027 6028 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 6029 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 6030 { 6031 CPUX86State *env = &cpu->env; 6032 FeatureWordInfo *fi = &feature_word_info[w]; 6033 uint32_t eax = fi->cpuid.eax; 6034 uint32_t region = eax & 0xF0000000; 6035 6036 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 6037 if (!env->features[w]) { 6038 return; 6039 } 6040 6041 switch (region) { 6042 case 0x00000000: 6043 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 6044 break; 6045 case 0x80000000: 6046 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 6047 break; 6048 case 0xC0000000: 6049 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 6050 break; 6051 } 6052 6053 if (eax == 7) { 6054 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7, 6055 fi->cpuid.ecx); 6056 } 6057 } 6058 6059 /* Calculate XSAVE components based on the configured CPU feature flags */ 6060 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 6061 { 6062 CPUX86State *env = &cpu->env; 6063 int i; 6064 uint64_t mask; 6065 6066 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 6067 return; 6068 } 6069 6070 mask = 0; 6071 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6072 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6073 if (env->features[esa->feature] & esa->bits) { 6074 mask |= (1ULL << i); 6075 } 6076 } 6077 6078 env->features[FEAT_XSAVE_COMP_LO] = mask; 6079 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 6080 } 6081 6082 /***** Steps involved on loading and filtering CPUID data 6083 * 6084 * When initializing and realizing a CPU object, the steps 6085 * involved in setting up CPUID data are: 6086 * 6087 * 1) Loading CPU model definition (X86CPUDefinition). This is 6088 * implemented by x86_cpu_load_model() and should be completely 6089 * transparent, as it is done automatically by instance_init. 6090 * No code should need to look at X86CPUDefinition structs 6091 * outside instance_init. 6092 * 6093 * 2) CPU expansion. This is done by realize before CPUID 6094 * filtering, and will make sure host/accelerator data is 6095 * loaded for CPU models that depend on host capabilities 6096 * (e.g. "host"). Done by x86_cpu_expand_features(). 6097 * 6098 * 3) CPUID filtering. This initializes extra data related to 6099 * CPUID, and checks if the host supports all capabilities 6100 * required by the CPU. Runnability of a CPU model is 6101 * determined at this step. Done by x86_cpu_filter_features(). 6102 * 6103 * Some operations don't require all steps to be performed. 6104 * More precisely: 6105 * 6106 * - CPU instance creation (instance_init) will run only CPU 6107 * model loading. CPU expansion can't run at instance_init-time 6108 * because host/accelerator data may be not available yet. 6109 * - CPU realization will perform both CPU model expansion and CPUID 6110 * filtering, and return an error in case one of them fails. 6111 * - query-cpu-definitions needs to run all 3 steps. It needs 6112 * to run CPUID filtering, as the 'unavailable-features' 6113 * field is set based on the filtering results. 6114 * - The query-cpu-model-expansion QMP command only needs to run 6115 * CPU model loading and CPU expansion. It should not filter 6116 * any CPUID data based on host capabilities. 6117 */ 6118 6119 /* Expand CPU configuration data, based on configured features 6120 * and host/accelerator capabilities when appropriate. 6121 */ 6122 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 6123 { 6124 CPUX86State *env = &cpu->env; 6125 FeatureWord w; 6126 int i; 6127 GList *l; 6128 Error *local_err = NULL; 6129 6130 for (l = plus_features; l; l = l->next) { 6131 const char *prop = l->data; 6132 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 6133 if (local_err) { 6134 goto out; 6135 } 6136 } 6137 6138 for (l = minus_features; l; l = l->next) { 6139 const char *prop = l->data; 6140 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 6141 if (local_err) { 6142 goto out; 6143 } 6144 } 6145 6146 /*TODO: Now cpu->max_features doesn't overwrite features 6147 * set using QOM properties, and we can convert 6148 * plus_features & minus_features to global properties 6149 * inside x86_cpu_parse_featurestr() too. 6150 */ 6151 if (cpu->max_features) { 6152 for (w = 0; w < FEATURE_WORDS; w++) { 6153 /* Override only features that weren't set explicitly 6154 * by the user. 6155 */ 6156 env->features[w] |= 6157 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 6158 ~env->user_features[w] & \ 6159 ~feature_word_info[w].no_autoenable_flags; 6160 } 6161 } 6162 6163 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) { 6164 FeatureDep *d = &feature_dependencies[i]; 6165 if (!(env->features[d->from.index] & d->from.mask)) { 6166 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask; 6167 6168 /* Not an error unless the dependent feature was added explicitly. */ 6169 mark_unavailable_features(cpu, d->to.index, 6170 unavailable_features & env->user_features[d->to.index], 6171 "This feature depends on other features that were not requested"); 6172 6173 env->user_features[d->to.index] |= unavailable_features; 6174 env->features[d->to.index] &= ~unavailable_features; 6175 } 6176 } 6177 6178 if (!kvm_enabled() || !cpu->expose_kvm) { 6179 env->features[FEAT_KVM] = 0; 6180 } 6181 6182 x86_cpu_enable_xsave_components(cpu); 6183 6184 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 6185 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 6186 if (cpu->full_cpuid_auto_level) { 6187 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 6188 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 6189 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 6190 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 6191 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); 6192 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 6193 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 6194 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 6195 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 6196 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 6197 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 6198 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 6199 6200 /* Intel Processor Trace requires CPUID[0x14] */ 6201 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 6202 kvm_enabled() && cpu->intel_pt_auto_level) { 6203 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 6204 } 6205 6206 /* CPU topology with multi-dies support requires CPUID[0x1F] */ 6207 if (env->nr_dies > 1) { 6208 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); 6209 } 6210 6211 /* SVM requires CPUID[0x8000000A] */ 6212 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 6213 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 6214 } 6215 6216 /* SEV requires CPUID[0x8000001F] */ 6217 if (sev_enabled()) { 6218 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 6219 } 6220 } 6221 6222 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 6223 if (env->cpuid_level_func7 == UINT32_MAX) { 6224 env->cpuid_level_func7 = env->cpuid_min_level_func7; 6225 } 6226 if (env->cpuid_level == UINT32_MAX) { 6227 env->cpuid_level = env->cpuid_min_level; 6228 } 6229 if (env->cpuid_xlevel == UINT32_MAX) { 6230 env->cpuid_xlevel = env->cpuid_min_xlevel; 6231 } 6232 if (env->cpuid_xlevel2 == UINT32_MAX) { 6233 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 6234 } 6235 6236 out: 6237 if (local_err != NULL) { 6238 error_propagate(errp, local_err); 6239 } 6240 } 6241 6242 /* 6243 * Finishes initialization of CPUID data, filters CPU feature 6244 * words based on host availability of each feature. 6245 * 6246 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 6247 */ 6248 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) 6249 { 6250 CPUX86State *env = &cpu->env; 6251 FeatureWord w; 6252 const char *prefix = NULL; 6253 6254 if (verbose) { 6255 prefix = accel_uses_host_cpuid() 6256 ? "host doesn't support requested feature" 6257 : "TCG doesn't support requested feature"; 6258 } 6259 6260 for (w = 0; w < FEATURE_WORDS; w++) { 6261 uint64_t host_feat = 6262 x86_cpu_get_supported_feature_word(w, false); 6263 uint64_t requested_features = env->features[w]; 6264 uint64_t unavailable_features = requested_features & ~host_feat; 6265 mark_unavailable_features(cpu, w, unavailable_features, prefix); 6266 } 6267 6268 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 6269 kvm_enabled()) { 6270 KVMState *s = CPU(cpu)->kvm_state; 6271 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 6272 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 6273 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 6274 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 6275 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 6276 6277 if (!eax_0 || 6278 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 6279 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 6280 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 6281 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 6282 INTEL_PT_ADDR_RANGES_NUM) || 6283 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 6284 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 6285 (ecx_0 & INTEL_PT_IP_LIP)) { 6286 /* 6287 * Processor Trace capabilities aren't configurable, so if the 6288 * host can't emulate the capabilities we report on 6289 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 6290 */ 6291 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix); 6292 } 6293 } 6294 } 6295 6296 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 6297 { 6298 CPUState *cs = CPU(dev); 6299 X86CPU *cpu = X86_CPU(dev); 6300 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6301 CPUX86State *env = &cpu->env; 6302 Error *local_err = NULL; 6303 static bool ht_warned; 6304 6305 if (xcc->host_cpuid_required) { 6306 if (!accel_uses_host_cpuid()) { 6307 char *name = x86_cpu_class_get_model_name(xcc); 6308 error_setg(&local_err, "CPU model '%s' requires KVM", name); 6309 g_free(name); 6310 goto out; 6311 } 6312 6313 if (enable_cpu_pm) { 6314 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 6315 &cpu->mwait.ecx, &cpu->mwait.edx); 6316 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 6317 } 6318 } 6319 6320 /* mwait extended info: needed for Core compatibility */ 6321 /* We always wake on interrupt even if host does not have the capability */ 6322 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 6323 6324 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 6325 error_setg(errp, "apic-id property was not initialized properly"); 6326 return; 6327 } 6328 6329 x86_cpu_expand_features(cpu, &local_err); 6330 if (local_err) { 6331 goto out; 6332 } 6333 6334 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); 6335 6336 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { 6337 error_setg(&local_err, 6338 accel_uses_host_cpuid() ? 6339 "Host doesn't support requested features" : 6340 "TCG doesn't support requested features"); 6341 goto out; 6342 } 6343 6344 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 6345 * CPUID[1].EDX. 6346 */ 6347 if (IS_AMD_CPU(env)) { 6348 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 6349 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 6350 & CPUID_EXT2_AMD_ALIASES); 6351 } 6352 6353 /* For 64bit systems think about the number of physical bits to present. 6354 * ideally this should be the same as the host; anything other than matching 6355 * the host can cause incorrect guest behaviour. 6356 * QEMU used to pick the magic value of 40 bits that corresponds to 6357 * consumer AMD devices but nothing else. 6358 */ 6359 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 6360 if (accel_uses_host_cpuid()) { 6361 uint32_t host_phys_bits = x86_host_phys_bits(); 6362 static bool warned; 6363 6364 /* Print a warning if the user set it to a value that's not the 6365 * host value. 6366 */ 6367 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 6368 !warned) { 6369 warn_report("Host physical bits (%u)" 6370 " does not match phys-bits property (%u)", 6371 host_phys_bits, cpu->phys_bits); 6372 warned = true; 6373 } 6374 6375 if (cpu->host_phys_bits) { 6376 /* The user asked for us to use the host physical bits */ 6377 cpu->phys_bits = host_phys_bits; 6378 if (cpu->host_phys_bits_limit && 6379 cpu->phys_bits > cpu->host_phys_bits_limit) { 6380 cpu->phys_bits = cpu->host_phys_bits_limit; 6381 } 6382 } 6383 6384 if (cpu->phys_bits && 6385 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 6386 cpu->phys_bits < 32)) { 6387 error_setg(errp, "phys-bits should be between 32 and %u " 6388 " (but is %u)", 6389 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 6390 return; 6391 } 6392 } else { 6393 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 6394 error_setg(errp, "TCG only supports phys-bits=%u", 6395 TCG_PHYS_ADDR_BITS); 6396 return; 6397 } 6398 } 6399 /* 0 means it was not explicitly set by the user (or by machine 6400 * compat_props or by the host code above). In this case, the default 6401 * is the value used by TCG (40). 6402 */ 6403 if (cpu->phys_bits == 0) { 6404 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 6405 } 6406 } else { 6407 /* For 32 bit systems don't use the user set value, but keep 6408 * phys_bits consistent with what we tell the guest. 6409 */ 6410 if (cpu->phys_bits != 0) { 6411 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 6412 return; 6413 } 6414 6415 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 6416 cpu->phys_bits = 36; 6417 } else { 6418 cpu->phys_bits = 32; 6419 } 6420 } 6421 6422 /* Cache information initialization */ 6423 if (!cpu->legacy_cache) { 6424 if (!xcc->model || !xcc->model->cpudef->cache_info) { 6425 char *name = x86_cpu_class_get_model_name(xcc); 6426 error_setg(errp, 6427 "CPU model '%s' doesn't support legacy-cache=off", name); 6428 g_free(name); 6429 return; 6430 } 6431 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 6432 *xcc->model->cpudef->cache_info; 6433 } else { 6434 /* Build legacy cache information */ 6435 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 6436 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 6437 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 6438 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 6439 6440 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 6441 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 6442 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 6443 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 6444 6445 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 6446 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 6447 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 6448 env->cache_info_amd.l3_cache = &legacy_l3_cache; 6449 } 6450 6451 6452 cpu_exec_realizefn(cs, &local_err); 6453 if (local_err != NULL) { 6454 error_propagate(errp, local_err); 6455 return; 6456 } 6457 6458 #ifndef CONFIG_USER_ONLY 6459 MachineState *ms = MACHINE(qdev_get_machine()); 6460 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 6461 6462 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { 6463 x86_cpu_apic_create(cpu, &local_err); 6464 if (local_err != NULL) { 6465 goto out; 6466 } 6467 } 6468 #endif 6469 6470 mce_init(cpu); 6471 6472 #ifndef CONFIG_USER_ONLY 6473 if (tcg_enabled()) { 6474 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 6475 cpu->cpu_as_root = g_new(MemoryRegion, 1); 6476 6477 /* Outer container... */ 6478 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 6479 memory_region_set_enabled(cpu->cpu_as_root, true); 6480 6481 /* ... with two regions inside: normal system memory with low 6482 * priority, and... 6483 */ 6484 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 6485 get_system_memory(), 0, ~0ull); 6486 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 6487 memory_region_set_enabled(cpu->cpu_as_mem, true); 6488 6489 cs->num_ases = 2; 6490 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 6491 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 6492 6493 /* ... SMRAM with higher priority, linked from /machine/smram. */ 6494 cpu->machine_done.notify = x86_cpu_machine_done; 6495 qemu_add_machine_init_done_notifier(&cpu->machine_done); 6496 } 6497 #endif 6498 6499 qemu_init_vcpu(cs); 6500 6501 /* 6502 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 6503 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 6504 * based on inputs (sockets,cores,threads), it is still better to give 6505 * users a warning. 6506 * 6507 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 6508 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 6509 */ 6510 if (IS_AMD_CPU(env) && 6511 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 6512 cs->nr_threads > 1 && !ht_warned) { 6513 warn_report("This family of AMD CPU doesn't support " 6514 "hyperthreading(%d)", 6515 cs->nr_threads); 6516 error_printf("Please configure -smp options properly" 6517 " or try enabling topoext feature.\n"); 6518 ht_warned = true; 6519 } 6520 6521 x86_cpu_apic_realize(cpu, &local_err); 6522 if (local_err != NULL) { 6523 goto out; 6524 } 6525 cpu_reset(cs); 6526 6527 xcc->parent_realize(dev, &local_err); 6528 6529 out: 6530 if (local_err != NULL) { 6531 error_propagate(errp, local_err); 6532 return; 6533 } 6534 } 6535 6536 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) 6537 { 6538 X86CPU *cpu = X86_CPU(dev); 6539 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6540 Error *local_err = NULL; 6541 6542 #ifndef CONFIG_USER_ONLY 6543 cpu_remove_sync(CPU(dev)); 6544 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 6545 #endif 6546 6547 if (cpu->apic_state) { 6548 object_unparent(OBJECT(cpu->apic_state)); 6549 cpu->apic_state = NULL; 6550 } 6551 6552 xcc->parent_unrealize(dev, &local_err); 6553 if (local_err != NULL) { 6554 error_propagate(errp, local_err); 6555 return; 6556 } 6557 } 6558 6559 typedef struct BitProperty { 6560 FeatureWord w; 6561 uint64_t mask; 6562 } BitProperty; 6563 6564 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 6565 void *opaque, Error **errp) 6566 { 6567 X86CPU *cpu = X86_CPU(obj); 6568 BitProperty *fp = opaque; 6569 uint64_t f = cpu->env.features[fp->w]; 6570 bool value = (f & fp->mask) == fp->mask; 6571 visit_type_bool(v, name, &value, errp); 6572 } 6573 6574 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 6575 void *opaque, Error **errp) 6576 { 6577 DeviceState *dev = DEVICE(obj); 6578 X86CPU *cpu = X86_CPU(obj); 6579 BitProperty *fp = opaque; 6580 Error *local_err = NULL; 6581 bool value; 6582 6583 if (dev->realized) { 6584 qdev_prop_set_after_realize(dev, name, errp); 6585 return; 6586 } 6587 6588 visit_type_bool(v, name, &value, &local_err); 6589 if (local_err) { 6590 error_propagate(errp, local_err); 6591 return; 6592 } 6593 6594 if (value) { 6595 cpu->env.features[fp->w] |= fp->mask; 6596 } else { 6597 cpu->env.features[fp->w] &= ~fp->mask; 6598 } 6599 cpu->env.user_features[fp->w] |= fp->mask; 6600 } 6601 6602 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 6603 void *opaque) 6604 { 6605 BitProperty *prop = opaque; 6606 g_free(prop); 6607 } 6608 6609 /* Register a boolean property to get/set a single bit in a uint32_t field. 6610 * 6611 * The same property name can be registered multiple times to make it affect 6612 * multiple bits in the same FeatureWord. In that case, the getter will return 6613 * true only if all bits are set. 6614 */ 6615 static void x86_cpu_register_bit_prop(X86CPU *cpu, 6616 const char *prop_name, 6617 FeatureWord w, 6618 int bitnr) 6619 { 6620 BitProperty *fp; 6621 ObjectProperty *op; 6622 uint64_t mask = (1ULL << bitnr); 6623 6624 op = object_property_find(OBJECT(cpu), prop_name, NULL); 6625 if (op) { 6626 fp = op->opaque; 6627 assert(fp->w == w); 6628 fp->mask |= mask; 6629 } else { 6630 fp = g_new0(BitProperty, 1); 6631 fp->w = w; 6632 fp->mask = mask; 6633 object_property_add(OBJECT(cpu), prop_name, "bool", 6634 x86_cpu_get_bit_prop, 6635 x86_cpu_set_bit_prop, 6636 x86_cpu_release_bit_prop, fp, &error_abort); 6637 } 6638 } 6639 6640 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 6641 FeatureWord w, 6642 int bitnr) 6643 { 6644 FeatureWordInfo *fi = &feature_word_info[w]; 6645 const char *name = fi->feat_names[bitnr]; 6646 6647 if (!name) { 6648 return; 6649 } 6650 6651 /* Property names should use "-" instead of "_". 6652 * Old names containing underscores are registered as aliases 6653 * using object_property_add_alias() 6654 */ 6655 assert(!strchr(name, '_')); 6656 /* aliases don't use "|" delimiters anymore, they are registered 6657 * manually using object_property_add_alias() */ 6658 assert(!strchr(name, '|')); 6659 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 6660 } 6661 6662 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 6663 { 6664 X86CPU *cpu = X86_CPU(cs); 6665 CPUX86State *env = &cpu->env; 6666 GuestPanicInformation *panic_info = NULL; 6667 6668 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 6669 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 6670 6671 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 6672 6673 assert(HV_CRASH_PARAMS >= 5); 6674 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 6675 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 6676 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 6677 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 6678 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 6679 } 6680 6681 return panic_info; 6682 } 6683 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 6684 const char *name, void *opaque, 6685 Error **errp) 6686 { 6687 CPUState *cs = CPU(obj); 6688 GuestPanicInformation *panic_info; 6689 6690 if (!cs->crash_occurred) { 6691 error_setg(errp, "No crash occured"); 6692 return; 6693 } 6694 6695 panic_info = x86_cpu_get_crash_info(cs); 6696 if (panic_info == NULL) { 6697 error_setg(errp, "No crash information"); 6698 return; 6699 } 6700 6701 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 6702 errp); 6703 qapi_free_GuestPanicInformation(panic_info); 6704 } 6705 6706 static void x86_cpu_initfn(Object *obj) 6707 { 6708 X86CPU *cpu = X86_CPU(obj); 6709 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 6710 CPUX86State *env = &cpu->env; 6711 FeatureWord w; 6712 6713 env->nr_dies = 1; 6714 cpu_set_cpustate_pointers(cpu); 6715 6716 object_property_add(obj, "family", "int", 6717 x86_cpuid_version_get_family, 6718 x86_cpuid_version_set_family, NULL, NULL, NULL); 6719 object_property_add(obj, "model", "int", 6720 x86_cpuid_version_get_model, 6721 x86_cpuid_version_set_model, NULL, NULL, NULL); 6722 object_property_add(obj, "stepping", "int", 6723 x86_cpuid_version_get_stepping, 6724 x86_cpuid_version_set_stepping, NULL, NULL, NULL); 6725 object_property_add_str(obj, "vendor", 6726 x86_cpuid_get_vendor, 6727 x86_cpuid_set_vendor, NULL); 6728 object_property_add_str(obj, "model-id", 6729 x86_cpuid_get_model_id, 6730 x86_cpuid_set_model_id, NULL); 6731 object_property_add(obj, "tsc-frequency", "int", 6732 x86_cpuid_get_tsc_freq, 6733 x86_cpuid_set_tsc_freq, NULL, NULL, NULL); 6734 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 6735 x86_cpu_get_feature_words, 6736 NULL, NULL, (void *)env->features, NULL); 6737 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 6738 x86_cpu_get_feature_words, 6739 NULL, NULL, (void *)cpu->filtered_features, NULL); 6740 /* 6741 * The "unavailable-features" property has the same semantics as 6742 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 6743 * QMP command: they list the features that would have prevented the 6744 * CPU from running if the "enforce" flag was set. 6745 */ 6746 object_property_add(obj, "unavailable-features", "strList", 6747 x86_cpu_get_unavailable_features, 6748 NULL, NULL, NULL, &error_abort); 6749 6750 object_property_add(obj, "crash-information", "GuestPanicInformation", 6751 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); 6752 6753 for (w = 0; w < FEATURE_WORDS; w++) { 6754 int bitnr; 6755 6756 for (bitnr = 0; bitnr < 64; bitnr++) { 6757 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 6758 } 6759 } 6760 6761 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort); 6762 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort); 6763 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort); 6764 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort); 6765 object_property_add_alias(obj, "xd", obj, "nx", &error_abort); 6766 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort); 6767 object_property_add_alias(obj, "i64", obj, "lm", &error_abort); 6768 6769 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort); 6770 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort); 6771 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort); 6772 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort); 6773 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort); 6774 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort); 6775 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort); 6776 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort); 6777 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort); 6778 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort); 6779 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort); 6780 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort); 6781 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort); 6782 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort); 6783 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control", 6784 &error_abort); 6785 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort); 6786 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort); 6787 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort); 6788 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort); 6789 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort); 6790 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort); 6791 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort); 6792 6793 if (xcc->model) { 6794 x86_cpu_load_model(cpu, xcc->model, &error_abort); 6795 } 6796 } 6797 6798 static int64_t x86_cpu_get_arch_id(CPUState *cs) 6799 { 6800 X86CPU *cpu = X86_CPU(cs); 6801 6802 return cpu->apic_id; 6803 } 6804 6805 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 6806 { 6807 X86CPU *cpu = X86_CPU(cs); 6808 6809 return cpu->env.cr[0] & CR0_PG_MASK; 6810 } 6811 6812 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 6813 { 6814 X86CPU *cpu = X86_CPU(cs); 6815 6816 cpu->env.eip = value; 6817 } 6818 6819 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 6820 { 6821 X86CPU *cpu = X86_CPU(cs); 6822 6823 cpu->env.eip = tb->pc - tb->cs_base; 6824 } 6825 6826 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 6827 { 6828 X86CPU *cpu = X86_CPU(cs); 6829 CPUX86State *env = &cpu->env; 6830 6831 #if !defined(CONFIG_USER_ONLY) 6832 if (interrupt_request & CPU_INTERRUPT_POLL) { 6833 return CPU_INTERRUPT_POLL; 6834 } 6835 #endif 6836 if (interrupt_request & CPU_INTERRUPT_SIPI) { 6837 return CPU_INTERRUPT_SIPI; 6838 } 6839 6840 if (env->hflags2 & HF2_GIF_MASK) { 6841 if ((interrupt_request & CPU_INTERRUPT_SMI) && 6842 !(env->hflags & HF_SMM_MASK)) { 6843 return CPU_INTERRUPT_SMI; 6844 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 6845 !(env->hflags2 & HF2_NMI_MASK)) { 6846 return CPU_INTERRUPT_NMI; 6847 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 6848 return CPU_INTERRUPT_MCE; 6849 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 6850 (((env->hflags2 & HF2_VINTR_MASK) && 6851 (env->hflags2 & HF2_HIF_MASK)) || 6852 (!(env->hflags2 & HF2_VINTR_MASK) && 6853 (env->eflags & IF_MASK && 6854 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 6855 return CPU_INTERRUPT_HARD; 6856 #if !defined(CONFIG_USER_ONLY) 6857 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 6858 (env->eflags & IF_MASK) && 6859 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 6860 return CPU_INTERRUPT_VIRQ; 6861 #endif 6862 } 6863 } 6864 6865 return 0; 6866 } 6867 6868 static bool x86_cpu_has_work(CPUState *cs) 6869 { 6870 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 6871 } 6872 6873 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 6874 { 6875 X86CPU *cpu = X86_CPU(cs); 6876 CPUX86State *env = &cpu->env; 6877 6878 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 6879 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 6880 : bfd_mach_i386_i8086); 6881 info->print_insn = print_insn_i386; 6882 6883 info->cap_arch = CS_ARCH_X86; 6884 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 6885 : env->hflags & HF_CS32_MASK ? CS_MODE_32 6886 : CS_MODE_16); 6887 info->cap_insn_unit = 1; 6888 info->cap_insn_split = 8; 6889 } 6890 6891 void x86_update_hflags(CPUX86State *env) 6892 { 6893 uint32_t hflags; 6894 #define HFLAG_COPY_MASK \ 6895 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 6896 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 6897 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 6898 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 6899 6900 hflags = env->hflags & HFLAG_COPY_MASK; 6901 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 6902 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 6903 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 6904 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 6905 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 6906 6907 if (env->cr[4] & CR4_OSFXSR_MASK) { 6908 hflags |= HF_OSFXSR_MASK; 6909 } 6910 6911 if (env->efer & MSR_EFER_LMA) { 6912 hflags |= HF_LMA_MASK; 6913 } 6914 6915 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 6916 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 6917 } else { 6918 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 6919 (DESC_B_SHIFT - HF_CS32_SHIFT); 6920 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 6921 (DESC_B_SHIFT - HF_SS32_SHIFT); 6922 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 6923 !(hflags & HF_CS32_MASK)) { 6924 hflags |= HF_ADDSEG_MASK; 6925 } else { 6926 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 6927 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 6928 } 6929 } 6930 env->hflags = hflags; 6931 } 6932 6933 static Property x86_cpu_properties[] = { 6934 #ifdef CONFIG_USER_ONLY 6935 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 6936 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 6937 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 6938 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 6939 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0), 6940 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 6941 #else 6942 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 6943 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 6944 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 6945 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1), 6946 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 6947 #endif 6948 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 6949 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 6950 6951 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, 6952 HYPERV_SPINLOCK_NEVER_RETRY), 6953 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 6954 HYPERV_FEAT_RELAXED, 0), 6955 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 6956 HYPERV_FEAT_VAPIC, 0), 6957 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 6958 HYPERV_FEAT_TIME, 0), 6959 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 6960 HYPERV_FEAT_CRASH, 0), 6961 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 6962 HYPERV_FEAT_RESET, 0), 6963 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 6964 HYPERV_FEAT_VPINDEX, 0), 6965 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 6966 HYPERV_FEAT_RUNTIME, 0), 6967 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 6968 HYPERV_FEAT_SYNIC, 0), 6969 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 6970 HYPERV_FEAT_STIMER, 0), 6971 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 6972 HYPERV_FEAT_FREQUENCIES, 0), 6973 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 6974 HYPERV_FEAT_REENLIGHTENMENT, 0), 6975 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 6976 HYPERV_FEAT_TLBFLUSH, 0), 6977 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 6978 HYPERV_FEAT_EVMCS, 0), 6979 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 6980 HYPERV_FEAT_IPI, 0), 6981 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 6982 HYPERV_FEAT_STIMER_DIRECT, 0), 6983 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, 6984 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), 6985 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 6986 6987 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 6988 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 6989 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false), 6990 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 6991 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 6992 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 6993 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 6994 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 6995 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7, 6996 UINT32_MAX), 6997 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 6998 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 6999 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 7000 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 7001 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 7002 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 7003 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 7004 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 7005 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 7006 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 7007 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 7008 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 7009 false), 7010 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 7011 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 7012 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 7013 true), 7014 /* 7015 * lecacy_cache defaults to true unless the CPU model provides its 7016 * own cache information (see x86_cpu_load_def()). 7017 */ 7018 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 7019 7020 /* 7021 * From "Requirements for Implementing the Microsoft 7022 * Hypervisor Interface": 7023 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 7024 * 7025 * "Starting with Windows Server 2012 and Windows 8, if 7026 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 7027 * the hypervisor imposes no specific limit to the number of VPs. 7028 * In this case, Windows Server 2012 guest VMs may use more than 7029 * 64 VPs, up to the maximum supported number of processors applicable 7030 * to the specific Windows version being used." 7031 */ 7032 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 7033 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 7034 false), 7035 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 7036 true), 7037 DEFINE_PROP_END_OF_LIST() 7038 }; 7039 7040 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 7041 { 7042 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7043 CPUClass *cc = CPU_CLASS(oc); 7044 DeviceClass *dc = DEVICE_CLASS(oc); 7045 7046 device_class_set_parent_realize(dc, x86_cpu_realizefn, 7047 &xcc->parent_realize); 7048 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 7049 &xcc->parent_unrealize); 7050 dc->props = x86_cpu_properties; 7051 7052 xcc->parent_reset = cc->reset; 7053 cc->reset = x86_cpu_reset; 7054 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 7055 7056 cc->class_by_name = x86_cpu_class_by_name; 7057 cc->parse_features = x86_cpu_parse_featurestr; 7058 cc->has_work = x86_cpu_has_work; 7059 #ifdef CONFIG_TCG 7060 cc->do_interrupt = x86_cpu_do_interrupt; 7061 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 7062 #endif 7063 cc->dump_state = x86_cpu_dump_state; 7064 cc->get_crash_info = x86_cpu_get_crash_info; 7065 cc->set_pc = x86_cpu_set_pc; 7066 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 7067 cc->gdb_read_register = x86_cpu_gdb_read_register; 7068 cc->gdb_write_register = x86_cpu_gdb_write_register; 7069 cc->get_arch_id = x86_cpu_get_arch_id; 7070 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 7071 #ifndef CONFIG_USER_ONLY 7072 cc->asidx_from_attrs = x86_asidx_from_attrs; 7073 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 7074 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug; 7075 cc->write_elf64_note = x86_cpu_write_elf64_note; 7076 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 7077 cc->write_elf32_note = x86_cpu_write_elf32_note; 7078 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 7079 cc->vmsd = &vmstate_x86_cpu; 7080 #endif 7081 cc->gdb_arch_name = x86_gdb_arch_name; 7082 #ifdef TARGET_X86_64 7083 cc->gdb_core_xml_file = "i386-64bit.xml"; 7084 cc->gdb_num_core_regs = 66; 7085 #else 7086 cc->gdb_core_xml_file = "i386-32bit.xml"; 7087 cc->gdb_num_core_regs = 50; 7088 #endif 7089 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 7090 cc->debug_excp_handler = breakpoint_handler; 7091 #endif 7092 cc->cpu_exec_enter = x86_cpu_exec_enter; 7093 cc->cpu_exec_exit = x86_cpu_exec_exit; 7094 #ifdef CONFIG_TCG 7095 cc->tcg_initialize = tcg_x86_init; 7096 cc->tlb_fill = x86_cpu_tlb_fill; 7097 #endif 7098 cc->disas_set_info = x86_disas_set_info; 7099 7100 dc->user_creatable = true; 7101 } 7102 7103 static const TypeInfo x86_cpu_type_info = { 7104 .name = TYPE_X86_CPU, 7105 .parent = TYPE_CPU, 7106 .instance_size = sizeof(X86CPU), 7107 .instance_init = x86_cpu_initfn, 7108 .abstract = true, 7109 .class_size = sizeof(X86CPUClass), 7110 .class_init = x86_cpu_common_class_init, 7111 }; 7112 7113 7114 /* "base" CPU model, used by query-cpu-model-expansion */ 7115 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 7116 { 7117 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7118 7119 xcc->static_model = true; 7120 xcc->migration_safe = true; 7121 xcc->model_description = "base CPU model type with no features enabled"; 7122 xcc->ordering = 8; 7123 } 7124 7125 static const TypeInfo x86_base_cpu_type_info = { 7126 .name = X86_CPU_TYPE_NAME("base"), 7127 .parent = TYPE_X86_CPU, 7128 .class_init = x86_cpu_base_class_init, 7129 }; 7130 7131 static void x86_cpu_register_types(void) 7132 { 7133 int i; 7134 7135 type_register_static(&x86_cpu_type_info); 7136 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 7137 x86_register_cpudef_types(&builtin_x86_defs[i]); 7138 } 7139 type_register_static(&max_x86_cpu_type_info); 7140 type_register_static(&x86_base_cpu_type_info); 7141 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 7142 type_register_static(&host_x86_cpu_type_info); 7143 #endif 7144 } 7145 7146 type_init(x86_cpu_register_types) 7147