1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "tcg/tcg-cpu.h" 28 #include "tcg/helper-tcg.h" 29 #include "exec/exec-all.h" 30 #include "sysemu/kvm.h" 31 #include "sysemu/reset.h" 32 #include "sysemu/hvf.h" 33 #include "sysemu/cpus.h" 34 #include "sysemu/xen.h" 35 #include "sysemu/whpx.h" 36 #include "kvm/kvm_i386.h" 37 #include "sev_i386.h" 38 39 #include "qemu/error-report.h" 40 #include "qemu/module.h" 41 #include "qemu/option.h" 42 #include "qemu/config-file.h" 43 #include "qapi/error.h" 44 #include "qapi/qapi-visit-machine.h" 45 #include "qapi/qapi-visit-run-state.h" 46 #include "qapi/qmp/qdict.h" 47 #include "qapi/qmp/qerror.h" 48 #include "qapi/visitor.h" 49 #include "qom/qom-qobject.h" 50 #include "sysemu/arch_init.h" 51 #include "qapi/qapi-commands-machine-target.h" 52 53 #include "standard-headers/asm-x86/kvm_para.h" 54 55 #include "sysemu/sysemu.h" 56 #include "sysemu/tcg.h" 57 #include "hw/qdev-properties.h" 58 #include "hw/i386/topology.h" 59 #ifndef CONFIG_USER_ONLY 60 #include "exec/address-spaces.h" 61 #include "hw/i386/apic_internal.h" 62 #include "hw/boards.h" 63 #endif 64 65 #include "disas/capstone.h" 66 67 /* Helpers for building CPUID[2] descriptors: */ 68 69 struct CPUID2CacheDescriptorInfo { 70 enum CacheType type; 71 int level; 72 int size; 73 int line_size; 74 int associativity; 75 }; 76 77 /* 78 * Known CPUID 2 cache descriptors. 79 * From Intel SDM Volume 2A, CPUID instruction 80 */ 81 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 82 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 83 .associativity = 4, .line_size = 32, }, 84 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 85 .associativity = 4, .line_size = 32, }, 86 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 87 .associativity = 4, .line_size = 64, }, 88 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 89 .associativity = 2, .line_size = 32, }, 90 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 91 .associativity = 4, .line_size = 32, }, 92 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 93 .associativity = 4, .line_size = 64, }, 94 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 95 .associativity = 6, .line_size = 64, }, 96 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 97 .associativity = 2, .line_size = 64, }, 98 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 99 .associativity = 8, .line_size = 64, }, 100 /* lines per sector is not supported cpuid2_cache_descriptor(), 101 * so descriptors 0x22, 0x23 are not included 102 */ 103 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 104 .associativity = 16, .line_size = 64, }, 105 /* lines per sector is not supported cpuid2_cache_descriptor(), 106 * so descriptors 0x25, 0x20 are not included 107 */ 108 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 109 .associativity = 8, .line_size = 64, }, 110 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 111 .associativity = 8, .line_size = 64, }, 112 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 113 .associativity = 4, .line_size = 32, }, 114 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 115 .associativity = 4, .line_size = 32, }, 116 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 117 .associativity = 4, .line_size = 32, }, 118 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 119 .associativity = 4, .line_size = 32, }, 120 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 121 .associativity = 4, .line_size = 32, }, 122 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 123 .associativity = 4, .line_size = 64, }, 124 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 125 .associativity = 8, .line_size = 64, }, 126 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 127 .associativity = 12, .line_size = 64, }, 128 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 129 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 130 .associativity = 12, .line_size = 64, }, 131 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 132 .associativity = 16, .line_size = 64, }, 133 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 134 .associativity = 12, .line_size = 64, }, 135 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 136 .associativity = 16, .line_size = 64, }, 137 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 138 .associativity = 24, .line_size = 64, }, 139 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 140 .associativity = 8, .line_size = 64, }, 141 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 142 .associativity = 4, .line_size = 64, }, 143 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 144 .associativity = 4, .line_size = 64, }, 145 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 146 .associativity = 4, .line_size = 64, }, 147 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 148 .associativity = 4, .line_size = 64, }, 149 /* lines per sector is not supported cpuid2_cache_descriptor(), 150 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 151 */ 152 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 153 .associativity = 8, .line_size = 64, }, 154 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 155 .associativity = 2, .line_size = 64, }, 156 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 157 .associativity = 8, .line_size = 64, }, 158 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 159 .associativity = 8, .line_size = 32, }, 160 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 161 .associativity = 8, .line_size = 32, }, 162 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 163 .associativity = 8, .line_size = 32, }, 164 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 165 .associativity = 8, .line_size = 32, }, 166 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 167 .associativity = 4, .line_size = 64, }, 168 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 169 .associativity = 8, .line_size = 64, }, 170 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 171 .associativity = 4, .line_size = 64, }, 172 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 173 .associativity = 4, .line_size = 64, }, 174 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 175 .associativity = 4, .line_size = 64, }, 176 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 177 .associativity = 8, .line_size = 64, }, 178 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 179 .associativity = 8, .line_size = 64, }, 180 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 181 .associativity = 8, .line_size = 64, }, 182 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 183 .associativity = 12, .line_size = 64, }, 184 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 185 .associativity = 12, .line_size = 64, }, 186 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 187 .associativity = 12, .line_size = 64, }, 188 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 189 .associativity = 16, .line_size = 64, }, 190 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 191 .associativity = 16, .line_size = 64, }, 192 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 193 .associativity = 16, .line_size = 64, }, 194 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 195 .associativity = 24, .line_size = 64, }, 196 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 197 .associativity = 24, .line_size = 64, }, 198 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 199 .associativity = 24, .line_size = 64, }, 200 }; 201 202 /* 203 * "CPUID leaf 2 does not report cache descriptor information, 204 * use CPUID leaf 4 to query cache parameters" 205 */ 206 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 207 208 /* 209 * Return a CPUID 2 cache descriptor for a given cache. 210 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 211 */ 212 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 213 { 214 int i; 215 216 assert(cache->size > 0); 217 assert(cache->level > 0); 218 assert(cache->line_size > 0); 219 assert(cache->associativity > 0); 220 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 221 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 222 if (d->level == cache->level && d->type == cache->type && 223 d->size == cache->size && d->line_size == cache->line_size && 224 d->associativity == cache->associativity) { 225 return i; 226 } 227 } 228 229 return CACHE_DESCRIPTOR_UNAVAILABLE; 230 } 231 232 /* CPUID Leaf 4 constants: */ 233 234 /* EAX: */ 235 #define CACHE_TYPE_D 1 236 #define CACHE_TYPE_I 2 237 #define CACHE_TYPE_UNIFIED 3 238 239 #define CACHE_LEVEL(l) (l << 5) 240 241 #define CACHE_SELF_INIT_LEVEL (1 << 8) 242 243 /* EDX: */ 244 #define CACHE_NO_INVD_SHARING (1 << 0) 245 #define CACHE_INCLUSIVE (1 << 1) 246 #define CACHE_COMPLEX_IDX (1 << 2) 247 248 /* Encode CacheType for CPUID[4].EAX */ 249 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 250 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 251 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 252 0 /* Invalid value */) 253 254 255 /* Encode cache info for CPUID[4] */ 256 static void encode_cache_cpuid4(CPUCacheInfo *cache, 257 int num_apic_ids, int num_cores, 258 uint32_t *eax, uint32_t *ebx, 259 uint32_t *ecx, uint32_t *edx) 260 { 261 assert(cache->size == cache->line_size * cache->associativity * 262 cache->partitions * cache->sets); 263 264 assert(num_apic_ids > 0); 265 *eax = CACHE_TYPE(cache->type) | 266 CACHE_LEVEL(cache->level) | 267 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 268 ((num_cores - 1) << 26) | 269 ((num_apic_ids - 1) << 14); 270 271 assert(cache->line_size > 0); 272 assert(cache->partitions > 0); 273 assert(cache->associativity > 0); 274 /* We don't implement fully-associative caches */ 275 assert(cache->associativity < cache->sets); 276 *ebx = (cache->line_size - 1) | 277 ((cache->partitions - 1) << 12) | 278 ((cache->associativity - 1) << 22); 279 280 assert(cache->sets > 0); 281 *ecx = cache->sets - 1; 282 283 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 284 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 285 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 286 } 287 288 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 289 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 290 { 291 assert(cache->size % 1024 == 0); 292 assert(cache->lines_per_tag > 0); 293 assert(cache->associativity > 0); 294 assert(cache->line_size > 0); 295 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 296 (cache->lines_per_tag << 8) | (cache->line_size); 297 } 298 299 #define ASSOC_FULL 0xFF 300 301 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 302 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 303 a == 2 ? 0x2 : \ 304 a == 4 ? 0x4 : \ 305 a == 8 ? 0x6 : \ 306 a == 16 ? 0x8 : \ 307 a == 32 ? 0xA : \ 308 a == 48 ? 0xB : \ 309 a == 64 ? 0xC : \ 310 a == 96 ? 0xD : \ 311 a == 128 ? 0xE : \ 312 a == ASSOC_FULL ? 0xF : \ 313 0 /* invalid value */) 314 315 /* 316 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 317 * @l3 can be NULL. 318 */ 319 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 320 CPUCacheInfo *l3, 321 uint32_t *ecx, uint32_t *edx) 322 { 323 assert(l2->size % 1024 == 0); 324 assert(l2->associativity > 0); 325 assert(l2->lines_per_tag > 0); 326 assert(l2->line_size > 0); 327 *ecx = ((l2->size / 1024) << 16) | 328 (AMD_ENC_ASSOC(l2->associativity) << 12) | 329 (l2->lines_per_tag << 8) | (l2->line_size); 330 331 if (l3) { 332 assert(l3->size % (512 * 1024) == 0); 333 assert(l3->associativity > 0); 334 assert(l3->lines_per_tag > 0); 335 assert(l3->line_size > 0); 336 *edx = ((l3->size / (512 * 1024)) << 18) | 337 (AMD_ENC_ASSOC(l3->associativity) << 12) | 338 (l3->lines_per_tag << 8) | (l3->line_size); 339 } else { 340 *edx = 0; 341 } 342 } 343 344 /* Encode cache info for CPUID[8000001D] */ 345 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, 346 X86CPUTopoInfo *topo_info, 347 uint32_t *eax, uint32_t *ebx, 348 uint32_t *ecx, uint32_t *edx) 349 { 350 uint32_t l3_threads; 351 assert(cache->size == cache->line_size * cache->associativity * 352 cache->partitions * cache->sets); 353 354 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 355 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 356 357 /* L3 is shared among multiple cores */ 358 if (cache->level == 3) { 359 l3_threads = topo_info->cores_per_die * topo_info->threads_per_core; 360 *eax |= (l3_threads - 1) << 14; 361 } else { 362 *eax |= ((topo_info->threads_per_core - 1) << 14); 363 } 364 365 assert(cache->line_size > 0); 366 assert(cache->partitions > 0); 367 assert(cache->associativity > 0); 368 /* We don't implement fully-associative caches */ 369 assert(cache->associativity < cache->sets); 370 *ebx = (cache->line_size - 1) | 371 ((cache->partitions - 1) << 12) | 372 ((cache->associativity - 1) << 22); 373 374 assert(cache->sets > 0); 375 *ecx = cache->sets - 1; 376 377 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 378 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 379 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 380 } 381 382 /* Encode cache info for CPUID[8000001E] */ 383 static void encode_topo_cpuid8000001e(X86CPU *cpu, X86CPUTopoInfo *topo_info, 384 uint32_t *eax, uint32_t *ebx, 385 uint32_t *ecx, uint32_t *edx) 386 { 387 X86CPUTopoIDs topo_ids; 388 389 x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids); 390 391 *eax = cpu->apic_id; 392 393 /* 394 * CPUID_Fn8000001E_EBX [Core Identifiers] (CoreId) 395 * Read-only. Reset: 0000_XXXXh. 396 * See Core::X86::Cpuid::ExtApicId. 397 * Core::X86::Cpuid::CoreId_lthree[1:0]_core[3:0]_thread[1:0]; 398 * Bits Description 399 * 31:16 Reserved. 400 * 15:8 ThreadsPerCore: threads per core. Read-only. Reset: XXh. 401 * The number of threads per core is ThreadsPerCore+1. 402 * 7:0 CoreId: core ID. Read-only. Reset: XXh. 403 * 404 * NOTE: CoreId is already part of apic_id. Just use it. We can 405 * use all the 8 bits to represent the core_id here. 406 */ 407 *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.core_id & 0xFF); 408 409 /* 410 * CPUID_Fn8000001E_ECX [Node Identifiers] (NodeId) 411 * Read-only. Reset: 0000_0XXXh. 412 * Core::X86::Cpuid::NodeId_lthree[1:0]_core[3:0]_thread[1:0]; 413 * Bits Description 414 * 31:11 Reserved. 415 * 10:8 NodesPerProcessor: Node per processor. Read-only. Reset: XXXb. 416 * ValidValues: 417 * Value Description 418 * 000b 1 node per processor. 419 * 001b 2 nodes per processor. 420 * 010b Reserved. 421 * 011b 4 nodes per processor. 422 * 111b-100b Reserved. 423 * 7:0 NodeId: Node ID. Read-only. Reset: XXh. 424 * 425 * NOTE: Hardware reserves 3 bits for number of nodes per processor. 426 * But users can create more nodes than the actual hardware can 427 * support. To genaralize we can use all the upper 8 bits for nodes. 428 * NodeId is combination of node and socket_id which is already decoded 429 * in apic_id. Just use it by shifting. 430 */ 431 *ecx = ((topo_info->dies_per_pkg - 1) << 8) | 432 ((cpu->apic_id >> apicid_die_offset(topo_info)) & 0xFF); 433 434 *edx = 0; 435 } 436 437 /* 438 * Definitions of the hardcoded cache entries we expose: 439 * These are legacy cache values. If there is a need to change any 440 * of these values please use builtin_x86_defs 441 */ 442 443 /* L1 data cache: */ 444 static CPUCacheInfo legacy_l1d_cache = { 445 .type = DATA_CACHE, 446 .level = 1, 447 .size = 32 * KiB, 448 .self_init = 1, 449 .line_size = 64, 450 .associativity = 8, 451 .sets = 64, 452 .partitions = 1, 453 .no_invd_sharing = true, 454 }; 455 456 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 457 static CPUCacheInfo legacy_l1d_cache_amd = { 458 .type = DATA_CACHE, 459 .level = 1, 460 .size = 64 * KiB, 461 .self_init = 1, 462 .line_size = 64, 463 .associativity = 2, 464 .sets = 512, 465 .partitions = 1, 466 .lines_per_tag = 1, 467 .no_invd_sharing = true, 468 }; 469 470 /* L1 instruction cache: */ 471 static CPUCacheInfo legacy_l1i_cache = { 472 .type = INSTRUCTION_CACHE, 473 .level = 1, 474 .size = 32 * KiB, 475 .self_init = 1, 476 .line_size = 64, 477 .associativity = 8, 478 .sets = 64, 479 .partitions = 1, 480 .no_invd_sharing = true, 481 }; 482 483 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 484 static CPUCacheInfo legacy_l1i_cache_amd = { 485 .type = INSTRUCTION_CACHE, 486 .level = 1, 487 .size = 64 * KiB, 488 .self_init = 1, 489 .line_size = 64, 490 .associativity = 2, 491 .sets = 512, 492 .partitions = 1, 493 .lines_per_tag = 1, 494 .no_invd_sharing = true, 495 }; 496 497 /* Level 2 unified cache: */ 498 static CPUCacheInfo legacy_l2_cache = { 499 .type = UNIFIED_CACHE, 500 .level = 2, 501 .size = 4 * MiB, 502 .self_init = 1, 503 .line_size = 64, 504 .associativity = 16, 505 .sets = 4096, 506 .partitions = 1, 507 .no_invd_sharing = true, 508 }; 509 510 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 511 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 512 .type = UNIFIED_CACHE, 513 .level = 2, 514 .size = 2 * MiB, 515 .line_size = 64, 516 .associativity = 8, 517 }; 518 519 520 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 521 static CPUCacheInfo legacy_l2_cache_amd = { 522 .type = UNIFIED_CACHE, 523 .level = 2, 524 .size = 512 * KiB, 525 .line_size = 64, 526 .lines_per_tag = 1, 527 .associativity = 16, 528 .sets = 512, 529 .partitions = 1, 530 }; 531 532 /* Level 3 unified cache: */ 533 static CPUCacheInfo legacy_l3_cache = { 534 .type = UNIFIED_CACHE, 535 .level = 3, 536 .size = 16 * MiB, 537 .line_size = 64, 538 .associativity = 16, 539 .sets = 16384, 540 .partitions = 1, 541 .lines_per_tag = 1, 542 .self_init = true, 543 .inclusive = true, 544 .complex_indexing = true, 545 }; 546 547 /* TLB definitions: */ 548 549 #define L1_DTLB_2M_ASSOC 1 550 #define L1_DTLB_2M_ENTRIES 255 551 #define L1_DTLB_4K_ASSOC 1 552 #define L1_DTLB_4K_ENTRIES 255 553 554 #define L1_ITLB_2M_ASSOC 1 555 #define L1_ITLB_2M_ENTRIES 255 556 #define L1_ITLB_4K_ASSOC 1 557 #define L1_ITLB_4K_ENTRIES 255 558 559 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 560 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 561 #define L2_DTLB_4K_ASSOC 4 562 #define L2_DTLB_4K_ENTRIES 512 563 564 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 565 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 566 #define L2_ITLB_4K_ASSOC 4 567 #define L2_ITLB_4K_ENTRIES 512 568 569 /* CPUID Leaf 0x14 constants: */ 570 #define INTEL_PT_MAX_SUBLEAF 0x1 571 /* 572 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 573 * MSR can be accessed; 574 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 575 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 576 * of Intel PT MSRs across warm reset; 577 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 578 */ 579 #define INTEL_PT_MINIMAL_EBX 0xf 580 /* 581 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 582 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 583 * accessed; 584 * bit[01]: ToPA tables can hold any number of output entries, up to the 585 * maximum allowed by the MaskOrTableOffset field of 586 * IA32_RTIT_OUTPUT_MASK_PTRS; 587 * bit[02]: Support Single-Range Output scheme; 588 */ 589 #define INTEL_PT_MINIMAL_ECX 0x7 590 /* generated packets which contain IP payloads have LIP values */ 591 #define INTEL_PT_IP_LIP (1 << 31) 592 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 593 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 594 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 595 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 596 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 597 598 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 599 uint32_t vendor2, uint32_t vendor3) 600 { 601 int i; 602 for (i = 0; i < 4; i++) { 603 dst[i] = vendor1 >> (8 * i); 604 dst[i + 4] = vendor2 >> (8 * i); 605 dst[i + 8] = vendor3 >> (8 * i); 606 } 607 dst[CPUID_VENDOR_SZ] = '\0'; 608 } 609 610 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 611 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 612 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 613 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 614 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 615 CPUID_PSE36 | CPUID_FXSR) 616 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 617 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 618 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 619 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 620 CPUID_PAE | CPUID_SEP | CPUID_APIC) 621 622 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 623 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 624 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 625 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 626 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 627 /* partly implemented: 628 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 629 /* missing: 630 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 631 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 632 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 633 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 634 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 635 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 636 CPUID_EXT_RDRAND) 637 /* missing: 638 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 639 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 640 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 641 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 642 CPUID_EXT_F16C */ 643 644 #ifdef TARGET_X86_64 645 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 646 #else 647 #define TCG_EXT2_X86_64_FEATURES 0 648 #endif 649 650 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 651 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 652 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 653 TCG_EXT2_X86_64_FEATURES) 654 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 655 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 656 #define TCG_EXT4_FEATURES 0 657 #define TCG_SVM_FEATURES CPUID_SVM_NPT 658 #define TCG_KVM_FEATURES 0 659 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 660 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 661 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 662 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 663 CPUID_7_0_EBX_ERMS) 664 /* missing: 665 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 666 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 667 CPUID_7_0_EBX_RDSEED */ 668 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 669 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 670 CPUID_7_0_ECX_LA57) 671 #define TCG_7_0_EDX_FEATURES 0 672 #define TCG_7_1_EAX_FEATURES 0 673 #define TCG_APM_FEATURES 0 674 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 675 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 676 /* missing: 677 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 678 #define TCG_14_0_ECX_FEATURES 0 679 680 typedef enum FeatureWordType { 681 CPUID_FEATURE_WORD, 682 MSR_FEATURE_WORD, 683 } FeatureWordType; 684 685 typedef struct FeatureWordInfo { 686 FeatureWordType type; 687 /* feature flags names are taken from "Intel Processor Identification and 688 * the CPUID Instruction" and AMD's "CPUID Specification". 689 * In cases of disagreement between feature naming conventions, 690 * aliases may be added. 691 */ 692 const char *feat_names[64]; 693 union { 694 /* If type==CPUID_FEATURE_WORD */ 695 struct { 696 uint32_t eax; /* Input EAX for CPUID */ 697 bool needs_ecx; /* CPUID instruction uses ECX as input */ 698 uint32_t ecx; /* Input ECX value for CPUID */ 699 int reg; /* output register (R_* constant) */ 700 } cpuid; 701 /* If type==MSR_FEATURE_WORD */ 702 struct { 703 uint32_t index; 704 } msr; 705 }; 706 uint64_t tcg_features; /* Feature flags supported by TCG */ 707 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */ 708 uint64_t migratable_flags; /* Feature flags known to be migratable */ 709 /* Features that shouldn't be auto-enabled by "-cpu host" */ 710 uint64_t no_autoenable_flags; 711 } FeatureWordInfo; 712 713 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 714 [FEAT_1_EDX] = { 715 .type = CPUID_FEATURE_WORD, 716 .feat_names = { 717 "fpu", "vme", "de", "pse", 718 "tsc", "msr", "pae", "mce", 719 "cx8", "apic", NULL, "sep", 720 "mtrr", "pge", "mca", "cmov", 721 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 722 NULL, "ds" /* Intel dts */, "acpi", "mmx", 723 "fxsr", "sse", "sse2", "ss", 724 "ht" /* Intel htt */, "tm", "ia64", "pbe", 725 }, 726 .cpuid = {.eax = 1, .reg = R_EDX, }, 727 .tcg_features = TCG_FEATURES, 728 }, 729 [FEAT_1_ECX] = { 730 .type = CPUID_FEATURE_WORD, 731 .feat_names = { 732 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 733 "ds-cpl", "vmx", "smx", "est", 734 "tm2", "ssse3", "cid", NULL, 735 "fma", "cx16", "xtpr", "pdcm", 736 NULL, "pcid", "dca", "sse4.1", 737 "sse4.2", "x2apic", "movbe", "popcnt", 738 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 739 "avx", "f16c", "rdrand", "hypervisor", 740 }, 741 .cpuid = { .eax = 1, .reg = R_ECX, }, 742 .tcg_features = TCG_EXT_FEATURES, 743 }, 744 /* Feature names that are already defined on feature_name[] but 745 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 746 * names on feat_names below. They are copied automatically 747 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 748 */ 749 [FEAT_8000_0001_EDX] = { 750 .type = CPUID_FEATURE_WORD, 751 .feat_names = { 752 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 753 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 754 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 755 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 756 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 757 "nx", NULL, "mmxext", NULL /* mmx */, 758 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 759 NULL, "lm", "3dnowext", "3dnow", 760 }, 761 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 762 .tcg_features = TCG_EXT2_FEATURES, 763 }, 764 [FEAT_8000_0001_ECX] = { 765 .type = CPUID_FEATURE_WORD, 766 .feat_names = { 767 "lahf-lm", "cmp-legacy", "svm", "extapic", 768 "cr8legacy", "abm", "sse4a", "misalignsse", 769 "3dnowprefetch", "osvw", "ibs", "xop", 770 "skinit", "wdt", NULL, "lwp", 771 "fma4", "tce", NULL, "nodeid-msr", 772 NULL, "tbm", "topoext", "perfctr-core", 773 "perfctr-nb", NULL, NULL, NULL, 774 NULL, NULL, NULL, NULL, 775 }, 776 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 777 .tcg_features = TCG_EXT3_FEATURES, 778 /* 779 * TOPOEXT is always allowed but can't be enabled blindly by 780 * "-cpu host", as it requires consistent cache topology info 781 * to be provided so it doesn't confuse guests. 782 */ 783 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 784 }, 785 [FEAT_C000_0001_EDX] = { 786 .type = CPUID_FEATURE_WORD, 787 .feat_names = { 788 NULL, NULL, "xstore", "xstore-en", 789 NULL, NULL, "xcrypt", "xcrypt-en", 790 "ace2", "ace2-en", "phe", "phe-en", 791 "pmm", "pmm-en", NULL, NULL, 792 NULL, NULL, NULL, NULL, 793 NULL, NULL, NULL, NULL, 794 NULL, NULL, NULL, NULL, 795 NULL, NULL, NULL, NULL, 796 }, 797 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 798 .tcg_features = TCG_EXT4_FEATURES, 799 }, 800 [FEAT_KVM] = { 801 .type = CPUID_FEATURE_WORD, 802 .feat_names = { 803 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 804 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 805 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 806 "kvm-poll-control", "kvm-pv-sched-yield", "kvm-asyncpf-int", "kvm-msi-ext-dest-id", 807 NULL, NULL, NULL, NULL, 808 NULL, NULL, NULL, NULL, 809 "kvmclock-stable-bit", NULL, NULL, NULL, 810 NULL, NULL, NULL, NULL, 811 }, 812 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 813 .tcg_features = TCG_KVM_FEATURES, 814 }, 815 [FEAT_KVM_HINTS] = { 816 .type = CPUID_FEATURE_WORD, 817 .feat_names = { 818 "kvm-hint-dedicated", NULL, NULL, NULL, 819 NULL, NULL, NULL, NULL, 820 NULL, NULL, NULL, NULL, 821 NULL, NULL, NULL, NULL, 822 NULL, NULL, NULL, NULL, 823 NULL, NULL, NULL, NULL, 824 NULL, NULL, NULL, NULL, 825 NULL, NULL, NULL, NULL, 826 }, 827 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 828 .tcg_features = TCG_KVM_FEATURES, 829 /* 830 * KVM hints aren't auto-enabled by -cpu host, they need to be 831 * explicitly enabled in the command-line. 832 */ 833 .no_autoenable_flags = ~0U, 834 }, 835 /* 836 * .feat_names are commented out for Hyper-V enlightenments because we 837 * don't want to have two different ways for enabling them on QEMU command 838 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 839 * enabling several feature bits simultaneously, exposing these bits 840 * individually may just confuse guests. 841 */ 842 [FEAT_HYPERV_EAX] = { 843 .type = CPUID_FEATURE_WORD, 844 .feat_names = { 845 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 846 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 847 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 848 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 849 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 850 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 851 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 852 NULL, NULL, 853 NULL, NULL, NULL, NULL, 854 NULL, NULL, NULL, NULL, 855 NULL, NULL, NULL, NULL, 856 NULL, NULL, NULL, NULL, 857 }, 858 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 859 }, 860 [FEAT_HYPERV_EBX] = { 861 .type = CPUID_FEATURE_WORD, 862 .feat_names = { 863 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 864 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 865 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 866 NULL /* hv_create_port */, NULL /* hv_connect_port */, 867 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 868 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 869 NULL, NULL, 870 NULL, NULL, NULL, NULL, 871 NULL, NULL, NULL, NULL, 872 NULL, NULL, NULL, NULL, 873 NULL, NULL, NULL, NULL, 874 }, 875 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 876 }, 877 [FEAT_HYPERV_EDX] = { 878 .type = CPUID_FEATURE_WORD, 879 .feat_names = { 880 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 881 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 882 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 883 NULL, NULL, 884 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 885 NULL, NULL, NULL, NULL, 886 NULL, NULL, NULL, NULL, 887 NULL, NULL, NULL, NULL, 888 NULL, NULL, NULL, NULL, 889 NULL, NULL, NULL, NULL, 890 }, 891 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 892 }, 893 [FEAT_HV_RECOMM_EAX] = { 894 .type = CPUID_FEATURE_WORD, 895 .feat_names = { 896 NULL /* hv_recommend_pv_as_switch */, 897 NULL /* hv_recommend_pv_tlbflush_local */, 898 NULL /* hv_recommend_pv_tlbflush_remote */, 899 NULL /* hv_recommend_msr_apic_access */, 900 NULL /* hv_recommend_msr_reset */, 901 NULL /* hv_recommend_relaxed_timing */, 902 NULL /* hv_recommend_dma_remapping */, 903 NULL /* hv_recommend_int_remapping */, 904 NULL /* hv_recommend_x2apic_msrs */, 905 NULL /* hv_recommend_autoeoi_deprecation */, 906 NULL /* hv_recommend_pv_ipi */, 907 NULL /* hv_recommend_ex_hypercalls */, 908 NULL /* hv_hypervisor_is_nested */, 909 NULL /* hv_recommend_int_mbec */, 910 NULL /* hv_recommend_evmcs */, 911 NULL, 912 NULL, NULL, NULL, NULL, 913 NULL, NULL, NULL, NULL, 914 NULL, NULL, NULL, NULL, 915 NULL, NULL, NULL, NULL, 916 }, 917 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 918 }, 919 [FEAT_HV_NESTED_EAX] = { 920 .type = CPUID_FEATURE_WORD, 921 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 922 }, 923 [FEAT_SVM] = { 924 .type = CPUID_FEATURE_WORD, 925 .feat_names = { 926 "npt", "lbrv", "svm-lock", "nrip-save", 927 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 928 NULL, NULL, "pause-filter", NULL, 929 "pfthreshold", NULL, NULL, NULL, 930 NULL, NULL, NULL, NULL, 931 NULL, NULL, NULL, NULL, 932 NULL, NULL, NULL, NULL, 933 NULL, NULL, NULL, NULL, 934 }, 935 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 936 .tcg_features = TCG_SVM_FEATURES, 937 }, 938 [FEAT_7_0_EBX] = { 939 .type = CPUID_FEATURE_WORD, 940 .feat_names = { 941 "fsgsbase", "tsc-adjust", NULL, "bmi1", 942 "hle", "avx2", NULL, "smep", 943 "bmi2", "erms", "invpcid", "rtm", 944 NULL, NULL, "mpx", NULL, 945 "avx512f", "avx512dq", "rdseed", "adx", 946 "smap", "avx512ifma", "pcommit", "clflushopt", 947 "clwb", "intel-pt", "avx512pf", "avx512er", 948 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 949 }, 950 .cpuid = { 951 .eax = 7, 952 .needs_ecx = true, .ecx = 0, 953 .reg = R_EBX, 954 }, 955 .tcg_features = TCG_7_0_EBX_FEATURES, 956 }, 957 [FEAT_7_0_ECX] = { 958 .type = CPUID_FEATURE_WORD, 959 .feat_names = { 960 NULL, "avx512vbmi", "umip", "pku", 961 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL, 962 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 963 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 964 "la57", NULL, NULL, NULL, 965 NULL, NULL, "rdpid", NULL, 966 NULL, "cldemote", NULL, "movdiri", 967 "movdir64b", NULL, NULL, NULL, 968 }, 969 .cpuid = { 970 .eax = 7, 971 .needs_ecx = true, .ecx = 0, 972 .reg = R_ECX, 973 }, 974 .tcg_features = TCG_7_0_ECX_FEATURES, 975 }, 976 [FEAT_7_0_EDX] = { 977 .type = CPUID_FEATURE_WORD, 978 .feat_names = { 979 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 980 "fsrm", NULL, NULL, NULL, 981 "avx512-vp2intersect", NULL, "md-clear", NULL, 982 NULL, NULL, "serialize", NULL, 983 "tsx-ldtrk", NULL, NULL /* pconfig */, NULL, 984 NULL, NULL, NULL, "avx512-fp16", 985 NULL, NULL, "spec-ctrl", "stibp", 986 NULL, "arch-capabilities", "core-capability", "ssbd", 987 }, 988 .cpuid = { 989 .eax = 7, 990 .needs_ecx = true, .ecx = 0, 991 .reg = R_EDX, 992 }, 993 .tcg_features = TCG_7_0_EDX_FEATURES, 994 }, 995 [FEAT_7_1_EAX] = { 996 .type = CPUID_FEATURE_WORD, 997 .feat_names = { 998 NULL, NULL, NULL, NULL, 999 NULL, "avx512-bf16", NULL, NULL, 1000 NULL, NULL, NULL, NULL, 1001 NULL, NULL, NULL, NULL, 1002 NULL, NULL, NULL, NULL, 1003 NULL, NULL, NULL, NULL, 1004 NULL, NULL, NULL, NULL, 1005 NULL, NULL, NULL, NULL, 1006 }, 1007 .cpuid = { 1008 .eax = 7, 1009 .needs_ecx = true, .ecx = 1, 1010 .reg = R_EAX, 1011 }, 1012 .tcg_features = TCG_7_1_EAX_FEATURES, 1013 }, 1014 [FEAT_8000_0007_EDX] = { 1015 .type = CPUID_FEATURE_WORD, 1016 .feat_names = { 1017 NULL, NULL, NULL, NULL, 1018 NULL, NULL, NULL, NULL, 1019 "invtsc", NULL, NULL, NULL, 1020 NULL, NULL, NULL, NULL, 1021 NULL, NULL, NULL, NULL, 1022 NULL, NULL, NULL, NULL, 1023 NULL, NULL, NULL, NULL, 1024 NULL, NULL, NULL, NULL, 1025 }, 1026 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1027 .tcg_features = TCG_APM_FEATURES, 1028 .unmigratable_flags = CPUID_APM_INVTSC, 1029 }, 1030 [FEAT_8000_0008_EBX] = { 1031 .type = CPUID_FEATURE_WORD, 1032 .feat_names = { 1033 "clzero", NULL, "xsaveerptr", NULL, 1034 NULL, NULL, NULL, NULL, 1035 NULL, "wbnoinvd", NULL, NULL, 1036 "ibpb", NULL, NULL, "amd-stibp", 1037 NULL, NULL, NULL, NULL, 1038 NULL, NULL, NULL, NULL, 1039 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1040 NULL, NULL, NULL, NULL, 1041 }, 1042 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1043 .tcg_features = 0, 1044 .unmigratable_flags = 0, 1045 }, 1046 [FEAT_XSAVE] = { 1047 .type = CPUID_FEATURE_WORD, 1048 .feat_names = { 1049 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1050 NULL, NULL, NULL, NULL, 1051 NULL, NULL, NULL, NULL, 1052 NULL, NULL, NULL, NULL, 1053 NULL, NULL, NULL, NULL, 1054 NULL, NULL, NULL, NULL, 1055 NULL, NULL, NULL, NULL, 1056 NULL, NULL, NULL, NULL, 1057 }, 1058 .cpuid = { 1059 .eax = 0xd, 1060 .needs_ecx = true, .ecx = 1, 1061 .reg = R_EAX, 1062 }, 1063 .tcg_features = TCG_XSAVE_FEATURES, 1064 }, 1065 [FEAT_6_EAX] = { 1066 .type = CPUID_FEATURE_WORD, 1067 .feat_names = { 1068 NULL, NULL, "arat", NULL, 1069 NULL, NULL, NULL, NULL, 1070 NULL, NULL, NULL, NULL, 1071 NULL, NULL, NULL, NULL, 1072 NULL, NULL, NULL, NULL, 1073 NULL, NULL, NULL, NULL, 1074 NULL, NULL, NULL, NULL, 1075 NULL, NULL, NULL, NULL, 1076 }, 1077 .cpuid = { .eax = 6, .reg = R_EAX, }, 1078 .tcg_features = TCG_6_EAX_FEATURES, 1079 }, 1080 [FEAT_XSAVE_COMP_LO] = { 1081 .type = CPUID_FEATURE_WORD, 1082 .cpuid = { 1083 .eax = 0xD, 1084 .needs_ecx = true, .ecx = 0, 1085 .reg = R_EAX, 1086 }, 1087 .tcg_features = ~0U, 1088 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1089 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1090 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1091 XSTATE_PKRU_MASK, 1092 }, 1093 [FEAT_XSAVE_COMP_HI] = { 1094 .type = CPUID_FEATURE_WORD, 1095 .cpuid = { 1096 .eax = 0xD, 1097 .needs_ecx = true, .ecx = 0, 1098 .reg = R_EDX, 1099 }, 1100 .tcg_features = ~0U, 1101 }, 1102 /*Below are MSR exposed features*/ 1103 [FEAT_ARCH_CAPABILITIES] = { 1104 .type = MSR_FEATURE_WORD, 1105 .feat_names = { 1106 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1107 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", 1108 "taa-no", NULL, NULL, NULL, 1109 NULL, NULL, NULL, NULL, 1110 NULL, NULL, NULL, NULL, 1111 NULL, NULL, NULL, NULL, 1112 NULL, NULL, NULL, NULL, 1113 NULL, NULL, NULL, NULL, 1114 }, 1115 .msr = { 1116 .index = MSR_IA32_ARCH_CAPABILITIES, 1117 }, 1118 }, 1119 [FEAT_CORE_CAPABILITY] = { 1120 .type = MSR_FEATURE_WORD, 1121 .feat_names = { 1122 NULL, NULL, NULL, NULL, 1123 NULL, "split-lock-detect", NULL, NULL, 1124 NULL, NULL, NULL, NULL, 1125 NULL, NULL, NULL, NULL, 1126 NULL, NULL, NULL, NULL, 1127 NULL, NULL, NULL, NULL, 1128 NULL, NULL, NULL, NULL, 1129 NULL, NULL, NULL, NULL, 1130 }, 1131 .msr = { 1132 .index = MSR_IA32_CORE_CAPABILITY, 1133 }, 1134 }, 1135 [FEAT_PERF_CAPABILITIES] = { 1136 .type = MSR_FEATURE_WORD, 1137 .feat_names = { 1138 NULL, NULL, NULL, NULL, 1139 NULL, NULL, NULL, NULL, 1140 NULL, NULL, NULL, NULL, 1141 NULL, "full-width-write", NULL, NULL, 1142 NULL, NULL, NULL, NULL, 1143 NULL, NULL, NULL, NULL, 1144 NULL, NULL, NULL, NULL, 1145 NULL, NULL, NULL, NULL, 1146 }, 1147 .msr = { 1148 .index = MSR_IA32_PERF_CAPABILITIES, 1149 }, 1150 }, 1151 1152 [FEAT_VMX_PROCBASED_CTLS] = { 1153 .type = MSR_FEATURE_WORD, 1154 .feat_names = { 1155 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset", 1156 NULL, NULL, NULL, "vmx-hlt-exit", 1157 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit", 1158 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit", 1159 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit", 1160 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit", 1161 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf", 1162 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls", 1163 }, 1164 .msr = { 1165 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1166 } 1167 }, 1168 1169 [FEAT_VMX_SECONDARY_CTLS] = { 1170 .type = MSR_FEATURE_WORD, 1171 .feat_names = { 1172 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit", 1173 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest", 1174 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit", 1175 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit", 1176 "vmx-rdseed-exit", "vmx-pml", NULL, NULL, 1177 "vmx-xsaves", NULL, NULL, NULL, 1178 NULL, NULL, NULL, NULL, 1179 NULL, NULL, NULL, NULL, 1180 }, 1181 .msr = { 1182 .index = MSR_IA32_VMX_PROCBASED_CTLS2, 1183 } 1184 }, 1185 1186 [FEAT_VMX_PINBASED_CTLS] = { 1187 .type = MSR_FEATURE_WORD, 1188 .feat_names = { 1189 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit", 1190 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr", 1191 NULL, NULL, NULL, NULL, 1192 NULL, NULL, NULL, NULL, 1193 NULL, NULL, NULL, NULL, 1194 NULL, NULL, NULL, NULL, 1195 NULL, NULL, NULL, NULL, 1196 NULL, NULL, NULL, NULL, 1197 }, 1198 .msr = { 1199 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1200 } 1201 }, 1202 1203 [FEAT_VMX_EXIT_CTLS] = { 1204 .type = MSR_FEATURE_WORD, 1205 /* 1206 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from 1207 * the LM CPUID bit. 1208 */ 1209 .feat_names = { 1210 NULL, NULL, "vmx-exit-nosave-debugctl", NULL, 1211 NULL, NULL, NULL, NULL, 1212 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL, 1213 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr", 1214 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat", 1215 "vmx-exit-save-efer", "vmx-exit-load-efer", 1216 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", 1217 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, 1218 NULL, NULL, NULL, NULL, 1219 }, 1220 .msr = { 1221 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, 1222 } 1223 }, 1224 1225 [FEAT_VMX_ENTRY_CTLS] = { 1226 .type = MSR_FEATURE_WORD, 1227 .feat_names = { 1228 NULL, NULL, "vmx-entry-noload-debugctl", NULL, 1229 NULL, NULL, NULL, NULL, 1230 NULL, "vmx-entry-ia32e-mode", NULL, NULL, 1231 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", 1232 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, 1233 NULL, NULL, NULL, NULL, 1234 NULL, NULL, NULL, NULL, 1235 NULL, NULL, NULL, NULL, 1236 }, 1237 .msr = { 1238 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1239 } 1240 }, 1241 1242 [FEAT_VMX_MISC] = { 1243 .type = MSR_FEATURE_WORD, 1244 .feat_names = { 1245 NULL, NULL, NULL, NULL, 1246 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown", 1247 "vmx-activity-wait-sipi", NULL, NULL, NULL, 1248 NULL, NULL, NULL, NULL, 1249 NULL, NULL, NULL, NULL, 1250 NULL, NULL, NULL, NULL, 1251 NULL, NULL, NULL, NULL, 1252 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL, 1253 }, 1254 .msr = { 1255 .index = MSR_IA32_VMX_MISC, 1256 } 1257 }, 1258 1259 [FEAT_VMX_EPT_VPID_CAPS] = { 1260 .type = MSR_FEATURE_WORD, 1261 .feat_names = { 1262 "vmx-ept-execonly", NULL, NULL, NULL, 1263 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5", 1264 NULL, NULL, NULL, NULL, 1265 NULL, NULL, NULL, NULL, 1266 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL, 1267 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL, 1268 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL, 1269 NULL, NULL, NULL, NULL, 1270 "vmx-invvpid", NULL, NULL, NULL, 1271 NULL, NULL, NULL, NULL, 1272 "vmx-invvpid-single-addr", "vmx-invept-single-context", 1273 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals", 1274 NULL, NULL, NULL, NULL, 1275 NULL, NULL, NULL, NULL, 1276 NULL, NULL, NULL, NULL, 1277 NULL, NULL, NULL, NULL, 1278 NULL, NULL, NULL, NULL, 1279 }, 1280 .msr = { 1281 .index = MSR_IA32_VMX_EPT_VPID_CAP, 1282 } 1283 }, 1284 1285 [FEAT_VMX_BASIC] = { 1286 .type = MSR_FEATURE_WORD, 1287 .feat_names = { 1288 [54] = "vmx-ins-outs", 1289 [55] = "vmx-true-ctls", 1290 }, 1291 .msr = { 1292 .index = MSR_IA32_VMX_BASIC, 1293 }, 1294 /* Just to be safe - we don't support setting the MSEG version field. */ 1295 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR, 1296 }, 1297 1298 [FEAT_VMX_VMFUNC] = { 1299 .type = MSR_FEATURE_WORD, 1300 .feat_names = { 1301 [0] = "vmx-eptp-switching", 1302 }, 1303 .msr = { 1304 .index = MSR_IA32_VMX_VMFUNC, 1305 } 1306 }, 1307 1308 [FEAT_14_0_ECX] = { 1309 .type = CPUID_FEATURE_WORD, 1310 .feat_names = { 1311 NULL, NULL, NULL, NULL, 1312 NULL, NULL, NULL, NULL, 1313 NULL, NULL, NULL, NULL, 1314 NULL, NULL, NULL, NULL, 1315 NULL, NULL, NULL, NULL, 1316 NULL, NULL, NULL, NULL, 1317 NULL, NULL, NULL, NULL, 1318 NULL, NULL, NULL, "intel-pt-lip", 1319 }, 1320 .cpuid = { 1321 .eax = 0x14, 1322 .needs_ecx = true, .ecx = 0, 1323 .reg = R_ECX, 1324 }, 1325 .tcg_features = TCG_14_0_ECX_FEATURES, 1326 }, 1327 1328 }; 1329 1330 typedef struct FeatureMask { 1331 FeatureWord index; 1332 uint64_t mask; 1333 } FeatureMask; 1334 1335 typedef struct FeatureDep { 1336 FeatureMask from, to; 1337 } FeatureDep; 1338 1339 static FeatureDep feature_dependencies[] = { 1340 { 1341 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES }, 1342 .to = { FEAT_ARCH_CAPABILITIES, ~0ull }, 1343 }, 1344 { 1345 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY }, 1346 .to = { FEAT_CORE_CAPABILITY, ~0ull }, 1347 }, 1348 { 1349 .from = { FEAT_1_ECX, CPUID_EXT_PDCM }, 1350 .to = { FEAT_PERF_CAPABILITIES, ~0ull }, 1351 }, 1352 { 1353 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1354 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull }, 1355 }, 1356 { 1357 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1358 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull }, 1359 }, 1360 { 1361 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1362 .to = { FEAT_VMX_EXIT_CTLS, ~0ull }, 1363 }, 1364 { 1365 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1366 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull }, 1367 }, 1368 { 1369 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1370 .to = { FEAT_VMX_MISC, ~0ull }, 1371 }, 1372 { 1373 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1374 .to = { FEAT_VMX_BASIC, ~0ull }, 1375 }, 1376 { 1377 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM }, 1378 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE }, 1379 }, 1380 { 1381 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS }, 1382 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull }, 1383 }, 1384 { 1385 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES }, 1386 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES }, 1387 }, 1388 { 1389 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND }, 1390 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING }, 1391 }, 1392 { 1393 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID }, 1394 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID }, 1395 }, 1396 { 1397 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED }, 1398 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING }, 1399 }, 1400 { 1401 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT }, 1402 .to = { FEAT_14_0_ECX, ~0ull }, 1403 }, 1404 { 1405 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP }, 1406 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP }, 1407 }, 1408 { 1409 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1410 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull }, 1411 }, 1412 { 1413 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1414 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST }, 1415 }, 1416 { 1417 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID }, 1418 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 }, 1419 }, 1420 { 1421 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC }, 1422 .to = { FEAT_VMX_VMFUNC, ~0ull }, 1423 }, 1424 { 1425 .from = { FEAT_8000_0001_ECX, CPUID_EXT3_SVM }, 1426 .to = { FEAT_SVM, ~0ull }, 1427 }, 1428 }; 1429 1430 typedef struct X86RegisterInfo32 { 1431 /* Name of register */ 1432 const char *name; 1433 /* QAPI enum value register */ 1434 X86CPURegister32 qapi_enum; 1435 } X86RegisterInfo32; 1436 1437 #define REGISTER(reg) \ 1438 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1439 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1440 REGISTER(EAX), 1441 REGISTER(ECX), 1442 REGISTER(EDX), 1443 REGISTER(EBX), 1444 REGISTER(ESP), 1445 REGISTER(EBP), 1446 REGISTER(ESI), 1447 REGISTER(EDI), 1448 }; 1449 #undef REGISTER 1450 1451 typedef struct ExtSaveArea { 1452 uint32_t feature, bits; 1453 uint32_t offset, size; 1454 } ExtSaveArea; 1455 1456 static const ExtSaveArea x86_ext_save_areas[] = { 1457 [XSTATE_FP_BIT] = { 1458 /* x87 FP state component is always enabled if XSAVE is supported */ 1459 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1460 /* x87 state is in the legacy region of the XSAVE area */ 1461 .offset = 0, 1462 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1463 }, 1464 [XSTATE_SSE_BIT] = { 1465 /* SSE state component is always enabled if XSAVE is supported */ 1466 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1467 /* SSE state is in the legacy region of the XSAVE area */ 1468 .offset = 0, 1469 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1470 }, 1471 [XSTATE_YMM_BIT] = 1472 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1473 .offset = offsetof(X86XSaveArea, avx_state), 1474 .size = sizeof(XSaveAVX) }, 1475 [XSTATE_BNDREGS_BIT] = 1476 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1477 .offset = offsetof(X86XSaveArea, bndreg_state), 1478 .size = sizeof(XSaveBNDREG) }, 1479 [XSTATE_BNDCSR_BIT] = 1480 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1481 .offset = offsetof(X86XSaveArea, bndcsr_state), 1482 .size = sizeof(XSaveBNDCSR) }, 1483 [XSTATE_OPMASK_BIT] = 1484 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1485 .offset = offsetof(X86XSaveArea, opmask_state), 1486 .size = sizeof(XSaveOpmask) }, 1487 [XSTATE_ZMM_Hi256_BIT] = 1488 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1489 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1490 .size = sizeof(XSaveZMM_Hi256) }, 1491 [XSTATE_Hi16_ZMM_BIT] = 1492 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1493 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1494 .size = sizeof(XSaveHi16_ZMM) }, 1495 [XSTATE_PKRU_BIT] = 1496 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1497 .offset = offsetof(X86XSaveArea, pkru_state), 1498 .size = sizeof(XSavePKRU) }, 1499 }; 1500 1501 static uint32_t xsave_area_size(uint64_t mask) 1502 { 1503 int i; 1504 uint64_t ret = 0; 1505 1506 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1507 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1508 if ((mask >> i) & 1) { 1509 ret = MAX(ret, esa->offset + esa->size); 1510 } 1511 } 1512 return ret; 1513 } 1514 1515 static inline bool accel_uses_host_cpuid(void) 1516 { 1517 return kvm_enabled() || hvf_enabled(); 1518 } 1519 1520 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1521 { 1522 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1523 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1524 } 1525 1526 /* Return name of 32-bit register, from a R_* constant */ 1527 static const char *get_register_name_32(unsigned int reg) 1528 { 1529 if (reg >= CPU_NB_REGS32) { 1530 return NULL; 1531 } 1532 return x86_reg_info_32[reg].name; 1533 } 1534 1535 /* 1536 * Returns the set of feature flags that are supported and migratable by 1537 * QEMU, for a given FeatureWord. 1538 */ 1539 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) 1540 { 1541 FeatureWordInfo *wi = &feature_word_info[w]; 1542 uint64_t r = 0; 1543 int i; 1544 1545 for (i = 0; i < 64; i++) { 1546 uint64_t f = 1ULL << i; 1547 1548 /* If the feature name is known, it is implicitly considered migratable, 1549 * unless it is explicitly set in unmigratable_flags */ 1550 if ((wi->migratable_flags & f) || 1551 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1552 r |= f; 1553 } 1554 } 1555 return r; 1556 } 1557 1558 void host_cpuid(uint32_t function, uint32_t count, 1559 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1560 { 1561 uint32_t vec[4]; 1562 1563 #ifdef __x86_64__ 1564 asm volatile("cpuid" 1565 : "=a"(vec[0]), "=b"(vec[1]), 1566 "=c"(vec[2]), "=d"(vec[3]) 1567 : "0"(function), "c"(count) : "cc"); 1568 #elif defined(__i386__) 1569 asm volatile("pusha \n\t" 1570 "cpuid \n\t" 1571 "mov %%eax, 0(%2) \n\t" 1572 "mov %%ebx, 4(%2) \n\t" 1573 "mov %%ecx, 8(%2) \n\t" 1574 "mov %%edx, 12(%2) \n\t" 1575 "popa" 1576 : : "a"(function), "c"(count), "S"(vec) 1577 : "memory", "cc"); 1578 #else 1579 abort(); 1580 #endif 1581 1582 if (eax) 1583 *eax = vec[0]; 1584 if (ebx) 1585 *ebx = vec[1]; 1586 if (ecx) 1587 *ecx = vec[2]; 1588 if (edx) 1589 *edx = vec[3]; 1590 } 1591 1592 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1593 { 1594 uint32_t eax, ebx, ecx, edx; 1595 1596 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1597 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1598 1599 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1600 if (family) { 1601 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1602 } 1603 if (model) { 1604 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1605 } 1606 if (stepping) { 1607 *stepping = eax & 0x0F; 1608 } 1609 } 1610 1611 /* CPU class name definitions: */ 1612 1613 /* Return type name for a given CPU model name 1614 * Caller is responsible for freeing the returned string. 1615 */ 1616 static char *x86_cpu_type_name(const char *model_name) 1617 { 1618 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1619 } 1620 1621 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1622 { 1623 g_autofree char *typename = x86_cpu_type_name(cpu_model); 1624 return object_class_by_name(typename); 1625 } 1626 1627 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1628 { 1629 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1630 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1631 return g_strndup(class_name, 1632 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1633 } 1634 1635 typedef struct PropValue { 1636 const char *prop, *value; 1637 } PropValue; 1638 1639 typedef struct X86CPUVersionDefinition { 1640 X86CPUVersion version; 1641 const char *alias; 1642 const char *note; 1643 PropValue *props; 1644 } X86CPUVersionDefinition; 1645 1646 /* Base definition for a CPU model */ 1647 typedef struct X86CPUDefinition { 1648 const char *name; 1649 uint32_t level; 1650 uint32_t xlevel; 1651 /* vendor is zero-terminated, 12 character ASCII string */ 1652 char vendor[CPUID_VENDOR_SZ + 1]; 1653 int family; 1654 int model; 1655 int stepping; 1656 FeatureWordArray features; 1657 const char *model_id; 1658 CPUCaches *cache_info; 1659 /* 1660 * Definitions for alternative versions of CPU model. 1661 * List is terminated by item with version == 0. 1662 * If NULL, version 1 will be registered automatically. 1663 */ 1664 const X86CPUVersionDefinition *versions; 1665 const char *deprecation_note; 1666 } X86CPUDefinition; 1667 1668 /* Reference to a specific CPU model version */ 1669 struct X86CPUModel { 1670 /* Base CPU definition */ 1671 X86CPUDefinition *cpudef; 1672 /* CPU model version */ 1673 X86CPUVersion version; 1674 const char *note; 1675 /* 1676 * If true, this is an alias CPU model. 1677 * This matters only for "-cpu help" and query-cpu-definitions 1678 */ 1679 bool is_alias; 1680 }; 1681 1682 /* Get full model name for CPU version */ 1683 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, 1684 X86CPUVersion version) 1685 { 1686 assert(version > 0); 1687 return g_strdup_printf("%s-v%d", cpudef->name, (int)version); 1688 } 1689 1690 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) 1691 { 1692 /* When X86CPUDefinition::versions is NULL, we register only v1 */ 1693 static const X86CPUVersionDefinition default_version_list[] = { 1694 { 1 }, 1695 { /* end of list */ } 1696 }; 1697 1698 return def->versions ?: default_version_list; 1699 } 1700 1701 static CPUCaches epyc_cache_info = { 1702 .l1d_cache = &(CPUCacheInfo) { 1703 .type = DATA_CACHE, 1704 .level = 1, 1705 .size = 32 * KiB, 1706 .line_size = 64, 1707 .associativity = 8, 1708 .partitions = 1, 1709 .sets = 64, 1710 .lines_per_tag = 1, 1711 .self_init = 1, 1712 .no_invd_sharing = true, 1713 }, 1714 .l1i_cache = &(CPUCacheInfo) { 1715 .type = INSTRUCTION_CACHE, 1716 .level = 1, 1717 .size = 64 * KiB, 1718 .line_size = 64, 1719 .associativity = 4, 1720 .partitions = 1, 1721 .sets = 256, 1722 .lines_per_tag = 1, 1723 .self_init = 1, 1724 .no_invd_sharing = true, 1725 }, 1726 .l2_cache = &(CPUCacheInfo) { 1727 .type = UNIFIED_CACHE, 1728 .level = 2, 1729 .size = 512 * KiB, 1730 .line_size = 64, 1731 .associativity = 8, 1732 .partitions = 1, 1733 .sets = 1024, 1734 .lines_per_tag = 1, 1735 }, 1736 .l3_cache = &(CPUCacheInfo) { 1737 .type = UNIFIED_CACHE, 1738 .level = 3, 1739 .size = 8 * MiB, 1740 .line_size = 64, 1741 .associativity = 16, 1742 .partitions = 1, 1743 .sets = 8192, 1744 .lines_per_tag = 1, 1745 .self_init = true, 1746 .inclusive = true, 1747 .complex_indexing = true, 1748 }, 1749 }; 1750 1751 static CPUCaches epyc_rome_cache_info = { 1752 .l1d_cache = &(CPUCacheInfo) { 1753 .type = DATA_CACHE, 1754 .level = 1, 1755 .size = 32 * KiB, 1756 .line_size = 64, 1757 .associativity = 8, 1758 .partitions = 1, 1759 .sets = 64, 1760 .lines_per_tag = 1, 1761 .self_init = 1, 1762 .no_invd_sharing = true, 1763 }, 1764 .l1i_cache = &(CPUCacheInfo) { 1765 .type = INSTRUCTION_CACHE, 1766 .level = 1, 1767 .size = 32 * KiB, 1768 .line_size = 64, 1769 .associativity = 8, 1770 .partitions = 1, 1771 .sets = 64, 1772 .lines_per_tag = 1, 1773 .self_init = 1, 1774 .no_invd_sharing = true, 1775 }, 1776 .l2_cache = &(CPUCacheInfo) { 1777 .type = UNIFIED_CACHE, 1778 .level = 2, 1779 .size = 512 * KiB, 1780 .line_size = 64, 1781 .associativity = 8, 1782 .partitions = 1, 1783 .sets = 1024, 1784 .lines_per_tag = 1, 1785 }, 1786 .l3_cache = &(CPUCacheInfo) { 1787 .type = UNIFIED_CACHE, 1788 .level = 3, 1789 .size = 16 * MiB, 1790 .line_size = 64, 1791 .associativity = 16, 1792 .partitions = 1, 1793 .sets = 16384, 1794 .lines_per_tag = 1, 1795 .self_init = true, 1796 .inclusive = true, 1797 .complex_indexing = true, 1798 }, 1799 }; 1800 1801 /* The following VMX features are not supported by KVM and are left out in the 1802 * CPU definitions: 1803 * 1804 * Dual-monitor support (all processors) 1805 * Entry to SMM 1806 * Deactivate dual-monitor treatment 1807 * Number of CR3-target values 1808 * Shutdown activity state 1809 * Wait-for-SIPI activity state 1810 * PAUSE-loop exiting (Westmere and newer) 1811 * EPT-violation #VE (Broadwell and newer) 1812 * Inject event with insn length=0 (Skylake and newer) 1813 * Conceal non-root operation from PT 1814 * Conceal VM exits from PT 1815 * Conceal VM entries from PT 1816 * Enable ENCLS exiting 1817 * Mode-based execute control (XS/XU) 1818 s TSC scaling (Skylake Server and newer) 1819 * GPA translation for PT (IceLake and newer) 1820 * User wait and pause 1821 * ENCLV exiting 1822 * Load IA32_RTIT_CTL 1823 * Clear IA32_RTIT_CTL 1824 * Advanced VM-exit information for EPT violations 1825 * Sub-page write permissions 1826 * PT in VMX operation 1827 */ 1828 1829 static X86CPUDefinition builtin_x86_defs[] = { 1830 { 1831 .name = "qemu64", 1832 .level = 0xd, 1833 .vendor = CPUID_VENDOR_AMD, 1834 .family = 6, 1835 .model = 6, 1836 .stepping = 3, 1837 .features[FEAT_1_EDX] = 1838 PPRO_FEATURES | 1839 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1840 CPUID_PSE36, 1841 .features[FEAT_1_ECX] = 1842 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1843 .features[FEAT_8000_0001_EDX] = 1844 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1845 .features[FEAT_8000_0001_ECX] = 1846 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1847 .xlevel = 0x8000000A, 1848 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1849 }, 1850 { 1851 .name = "phenom", 1852 .level = 5, 1853 .vendor = CPUID_VENDOR_AMD, 1854 .family = 16, 1855 .model = 2, 1856 .stepping = 3, 1857 /* Missing: CPUID_HT */ 1858 .features[FEAT_1_EDX] = 1859 PPRO_FEATURES | 1860 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1861 CPUID_PSE36 | CPUID_VME, 1862 .features[FEAT_1_ECX] = 1863 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1864 CPUID_EXT_POPCNT, 1865 .features[FEAT_8000_0001_EDX] = 1866 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1867 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1868 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1869 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1870 CPUID_EXT3_CR8LEG, 1871 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1872 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1873 .features[FEAT_8000_0001_ECX] = 1874 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1875 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1876 /* Missing: CPUID_SVM_LBRV */ 1877 .features[FEAT_SVM] = 1878 CPUID_SVM_NPT, 1879 .xlevel = 0x8000001A, 1880 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1881 }, 1882 { 1883 .name = "core2duo", 1884 .level = 10, 1885 .vendor = CPUID_VENDOR_INTEL, 1886 .family = 6, 1887 .model = 15, 1888 .stepping = 11, 1889 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1890 .features[FEAT_1_EDX] = 1891 PPRO_FEATURES | 1892 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1893 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1894 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1895 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1896 .features[FEAT_1_ECX] = 1897 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1898 CPUID_EXT_CX16, 1899 .features[FEAT_8000_0001_EDX] = 1900 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1901 .features[FEAT_8000_0001_ECX] = 1902 CPUID_EXT3_LAHF_LM, 1903 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 1904 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1905 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1906 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1907 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1908 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 1909 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1910 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1911 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1912 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1913 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1914 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1915 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1916 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 1917 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 1918 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 1919 .features[FEAT_VMX_SECONDARY_CTLS] = 1920 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 1921 .xlevel = 0x80000008, 1922 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1923 }, 1924 { 1925 .name = "kvm64", 1926 .level = 0xd, 1927 .vendor = CPUID_VENDOR_INTEL, 1928 .family = 15, 1929 .model = 6, 1930 .stepping = 1, 1931 /* Missing: CPUID_HT */ 1932 .features[FEAT_1_EDX] = 1933 PPRO_FEATURES | CPUID_VME | 1934 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1935 CPUID_PSE36, 1936 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1937 .features[FEAT_1_ECX] = 1938 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1939 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1940 .features[FEAT_8000_0001_EDX] = 1941 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1942 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1943 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1944 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1945 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1946 .features[FEAT_8000_0001_ECX] = 1947 0, 1948 /* VMX features from Cedar Mill/Prescott */ 1949 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1950 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1951 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1952 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1953 VMX_PIN_BASED_NMI_EXITING, 1954 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1955 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1956 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1957 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1958 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1959 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1960 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1961 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING, 1962 .xlevel = 0x80000008, 1963 .model_id = "Common KVM processor" 1964 }, 1965 { 1966 .name = "qemu32", 1967 .level = 4, 1968 .vendor = CPUID_VENDOR_INTEL, 1969 .family = 6, 1970 .model = 6, 1971 .stepping = 3, 1972 .features[FEAT_1_EDX] = 1973 PPRO_FEATURES, 1974 .features[FEAT_1_ECX] = 1975 CPUID_EXT_SSE3, 1976 .xlevel = 0x80000004, 1977 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1978 }, 1979 { 1980 .name = "kvm32", 1981 .level = 5, 1982 .vendor = CPUID_VENDOR_INTEL, 1983 .family = 15, 1984 .model = 6, 1985 .stepping = 1, 1986 .features[FEAT_1_EDX] = 1987 PPRO_FEATURES | CPUID_VME | 1988 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1989 .features[FEAT_1_ECX] = 1990 CPUID_EXT_SSE3, 1991 .features[FEAT_8000_0001_ECX] = 1992 0, 1993 /* VMX features from Yonah */ 1994 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1995 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1996 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1997 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1998 VMX_PIN_BASED_NMI_EXITING, 1999 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2000 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2001 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2002 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2003 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2004 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2005 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2006 .xlevel = 0x80000008, 2007 .model_id = "Common 32-bit KVM processor" 2008 }, 2009 { 2010 .name = "coreduo", 2011 .level = 10, 2012 .vendor = CPUID_VENDOR_INTEL, 2013 .family = 6, 2014 .model = 14, 2015 .stepping = 8, 2016 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2017 .features[FEAT_1_EDX] = 2018 PPRO_FEATURES | CPUID_VME | 2019 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 2020 CPUID_SS, 2021 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 2022 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 2023 .features[FEAT_1_ECX] = 2024 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 2025 .features[FEAT_8000_0001_EDX] = 2026 CPUID_EXT2_NX, 2027 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2028 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2029 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2030 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2031 VMX_PIN_BASED_NMI_EXITING, 2032 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2033 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2034 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2035 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2036 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2037 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2038 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2039 .xlevel = 0x80000008, 2040 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 2041 }, 2042 { 2043 .name = "486", 2044 .level = 1, 2045 .vendor = CPUID_VENDOR_INTEL, 2046 .family = 4, 2047 .model = 8, 2048 .stepping = 0, 2049 .features[FEAT_1_EDX] = 2050 I486_FEATURES, 2051 .xlevel = 0, 2052 .model_id = "", 2053 }, 2054 { 2055 .name = "pentium", 2056 .level = 1, 2057 .vendor = CPUID_VENDOR_INTEL, 2058 .family = 5, 2059 .model = 4, 2060 .stepping = 3, 2061 .features[FEAT_1_EDX] = 2062 PENTIUM_FEATURES, 2063 .xlevel = 0, 2064 .model_id = "", 2065 }, 2066 { 2067 .name = "pentium2", 2068 .level = 2, 2069 .vendor = CPUID_VENDOR_INTEL, 2070 .family = 6, 2071 .model = 5, 2072 .stepping = 2, 2073 .features[FEAT_1_EDX] = 2074 PENTIUM2_FEATURES, 2075 .xlevel = 0, 2076 .model_id = "", 2077 }, 2078 { 2079 .name = "pentium3", 2080 .level = 3, 2081 .vendor = CPUID_VENDOR_INTEL, 2082 .family = 6, 2083 .model = 7, 2084 .stepping = 3, 2085 .features[FEAT_1_EDX] = 2086 PENTIUM3_FEATURES, 2087 .xlevel = 0, 2088 .model_id = "", 2089 }, 2090 { 2091 .name = "athlon", 2092 .level = 2, 2093 .vendor = CPUID_VENDOR_AMD, 2094 .family = 6, 2095 .model = 2, 2096 .stepping = 3, 2097 .features[FEAT_1_EDX] = 2098 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 2099 CPUID_MCA, 2100 .features[FEAT_8000_0001_EDX] = 2101 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 2102 .xlevel = 0x80000008, 2103 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 2104 }, 2105 { 2106 .name = "n270", 2107 .level = 10, 2108 .vendor = CPUID_VENDOR_INTEL, 2109 .family = 6, 2110 .model = 28, 2111 .stepping = 2, 2112 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2113 .features[FEAT_1_EDX] = 2114 PPRO_FEATURES | 2115 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 2116 CPUID_ACPI | CPUID_SS, 2117 /* Some CPUs got no CPUID_SEP */ 2118 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 2119 * CPUID_EXT_XTPR */ 2120 .features[FEAT_1_ECX] = 2121 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 2122 CPUID_EXT_MOVBE, 2123 .features[FEAT_8000_0001_EDX] = 2124 CPUID_EXT2_NX, 2125 .features[FEAT_8000_0001_ECX] = 2126 CPUID_EXT3_LAHF_LM, 2127 .xlevel = 0x80000008, 2128 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 2129 }, 2130 { 2131 .name = "Conroe", 2132 .level = 10, 2133 .vendor = CPUID_VENDOR_INTEL, 2134 .family = 6, 2135 .model = 15, 2136 .stepping = 3, 2137 .features[FEAT_1_EDX] = 2138 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2139 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2140 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2141 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2142 CPUID_DE | CPUID_FP87, 2143 .features[FEAT_1_ECX] = 2144 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2145 .features[FEAT_8000_0001_EDX] = 2146 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2147 .features[FEAT_8000_0001_ECX] = 2148 CPUID_EXT3_LAHF_LM, 2149 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2150 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2151 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2152 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2153 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2154 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2155 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2156 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2157 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2158 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2159 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2160 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2161 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2162 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2163 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2164 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2165 .features[FEAT_VMX_SECONDARY_CTLS] = 2166 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 2167 .xlevel = 0x80000008, 2168 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 2169 }, 2170 { 2171 .name = "Penryn", 2172 .level = 10, 2173 .vendor = CPUID_VENDOR_INTEL, 2174 .family = 6, 2175 .model = 23, 2176 .stepping = 3, 2177 .features[FEAT_1_EDX] = 2178 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2179 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2180 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2181 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2182 CPUID_DE | CPUID_FP87, 2183 .features[FEAT_1_ECX] = 2184 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2185 CPUID_EXT_SSE3, 2186 .features[FEAT_8000_0001_EDX] = 2187 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2188 .features[FEAT_8000_0001_ECX] = 2189 CPUID_EXT3_LAHF_LM, 2190 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2191 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2192 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 2193 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | 2194 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 2195 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2196 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2197 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2198 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2199 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2200 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2201 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2202 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2203 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2204 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2205 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2206 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2207 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2208 .features[FEAT_VMX_SECONDARY_CTLS] = 2209 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2210 VMX_SECONDARY_EXEC_WBINVD_EXITING, 2211 .xlevel = 0x80000008, 2212 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 2213 }, 2214 { 2215 .name = "Nehalem", 2216 .level = 11, 2217 .vendor = CPUID_VENDOR_INTEL, 2218 .family = 6, 2219 .model = 26, 2220 .stepping = 3, 2221 .features[FEAT_1_EDX] = 2222 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2223 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2224 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2225 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2226 CPUID_DE | CPUID_FP87, 2227 .features[FEAT_1_ECX] = 2228 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2229 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2230 .features[FEAT_8000_0001_EDX] = 2231 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2232 .features[FEAT_8000_0001_ECX] = 2233 CPUID_EXT3_LAHF_LM, 2234 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2235 MSR_VMX_BASIC_TRUE_CTLS, 2236 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2237 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2238 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2239 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2240 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2241 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2242 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2243 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2244 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2245 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2246 .features[FEAT_VMX_EXIT_CTLS] = 2247 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2248 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2249 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2250 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2251 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2252 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2253 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2254 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2255 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2256 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2257 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2258 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2259 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2260 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2261 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2262 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2263 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2264 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2265 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2266 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2267 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2268 .features[FEAT_VMX_SECONDARY_CTLS] = 2269 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2270 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2271 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2272 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2273 VMX_SECONDARY_EXEC_ENABLE_VPID, 2274 .xlevel = 0x80000008, 2275 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 2276 .versions = (X86CPUVersionDefinition[]) { 2277 { .version = 1 }, 2278 { 2279 .version = 2, 2280 .alias = "Nehalem-IBRS", 2281 .props = (PropValue[]) { 2282 { "spec-ctrl", "on" }, 2283 { "model-id", 2284 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, 2285 { /* end of list */ } 2286 } 2287 }, 2288 { /* end of list */ } 2289 } 2290 }, 2291 { 2292 .name = "Westmere", 2293 .level = 11, 2294 .vendor = CPUID_VENDOR_INTEL, 2295 .family = 6, 2296 .model = 44, 2297 .stepping = 1, 2298 .features[FEAT_1_EDX] = 2299 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2300 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2301 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2302 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2303 CPUID_DE | CPUID_FP87, 2304 .features[FEAT_1_ECX] = 2305 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2306 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2307 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2308 .features[FEAT_8000_0001_EDX] = 2309 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2310 .features[FEAT_8000_0001_ECX] = 2311 CPUID_EXT3_LAHF_LM, 2312 .features[FEAT_6_EAX] = 2313 CPUID_6_EAX_ARAT, 2314 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2315 MSR_VMX_BASIC_TRUE_CTLS, 2316 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2317 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2318 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2319 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2320 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2321 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2322 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2323 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2324 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2325 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2326 .features[FEAT_VMX_EXIT_CTLS] = 2327 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2328 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2329 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2330 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2331 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2332 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2333 MSR_VMX_MISC_STORE_LMA, 2334 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2335 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2336 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2337 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2338 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2339 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2340 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2341 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2342 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2343 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2344 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2345 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2346 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2347 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2348 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2349 .features[FEAT_VMX_SECONDARY_CTLS] = 2350 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2351 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2352 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2353 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2354 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2355 .xlevel = 0x80000008, 2356 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 2357 .versions = (X86CPUVersionDefinition[]) { 2358 { .version = 1 }, 2359 { 2360 .version = 2, 2361 .alias = "Westmere-IBRS", 2362 .props = (PropValue[]) { 2363 { "spec-ctrl", "on" }, 2364 { "model-id", 2365 "Westmere E56xx/L56xx/X56xx (IBRS update)" }, 2366 { /* end of list */ } 2367 } 2368 }, 2369 { /* end of list */ } 2370 } 2371 }, 2372 { 2373 .name = "SandyBridge", 2374 .level = 0xd, 2375 .vendor = CPUID_VENDOR_INTEL, 2376 .family = 6, 2377 .model = 42, 2378 .stepping = 1, 2379 .features[FEAT_1_EDX] = 2380 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2381 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2382 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2383 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2384 CPUID_DE | CPUID_FP87, 2385 .features[FEAT_1_ECX] = 2386 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2387 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2388 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2389 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2390 CPUID_EXT_SSE3, 2391 .features[FEAT_8000_0001_EDX] = 2392 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2393 CPUID_EXT2_SYSCALL, 2394 .features[FEAT_8000_0001_ECX] = 2395 CPUID_EXT3_LAHF_LM, 2396 .features[FEAT_XSAVE] = 2397 CPUID_XSAVE_XSAVEOPT, 2398 .features[FEAT_6_EAX] = 2399 CPUID_6_EAX_ARAT, 2400 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2401 MSR_VMX_BASIC_TRUE_CTLS, 2402 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2403 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2404 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2405 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2406 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2407 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2408 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2409 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2410 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2411 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2412 .features[FEAT_VMX_EXIT_CTLS] = 2413 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2414 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2415 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2416 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2417 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2418 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2419 MSR_VMX_MISC_STORE_LMA, 2420 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2421 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2422 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2423 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2424 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2425 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2426 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2427 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2428 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2429 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2430 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2431 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2432 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2433 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2434 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2435 .features[FEAT_VMX_SECONDARY_CTLS] = 2436 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2437 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2438 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2439 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2440 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2441 .xlevel = 0x80000008, 2442 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 2443 .versions = (X86CPUVersionDefinition[]) { 2444 { .version = 1 }, 2445 { 2446 .version = 2, 2447 .alias = "SandyBridge-IBRS", 2448 .props = (PropValue[]) { 2449 { "spec-ctrl", "on" }, 2450 { "model-id", 2451 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, 2452 { /* end of list */ } 2453 } 2454 }, 2455 { /* end of list */ } 2456 } 2457 }, 2458 { 2459 .name = "IvyBridge", 2460 .level = 0xd, 2461 .vendor = CPUID_VENDOR_INTEL, 2462 .family = 6, 2463 .model = 58, 2464 .stepping = 9, 2465 .features[FEAT_1_EDX] = 2466 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2467 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2468 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2469 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2470 CPUID_DE | CPUID_FP87, 2471 .features[FEAT_1_ECX] = 2472 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2473 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2474 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2475 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2476 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2477 .features[FEAT_7_0_EBX] = 2478 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 2479 CPUID_7_0_EBX_ERMS, 2480 .features[FEAT_8000_0001_EDX] = 2481 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2482 CPUID_EXT2_SYSCALL, 2483 .features[FEAT_8000_0001_ECX] = 2484 CPUID_EXT3_LAHF_LM, 2485 .features[FEAT_XSAVE] = 2486 CPUID_XSAVE_XSAVEOPT, 2487 .features[FEAT_6_EAX] = 2488 CPUID_6_EAX_ARAT, 2489 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2490 MSR_VMX_BASIC_TRUE_CTLS, 2491 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2492 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2493 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2494 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2495 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2496 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2497 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2498 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2499 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2500 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2501 .features[FEAT_VMX_EXIT_CTLS] = 2502 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2503 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2504 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2505 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2506 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2507 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2508 MSR_VMX_MISC_STORE_LMA, 2509 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2510 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2511 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2512 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2513 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2514 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2515 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2516 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2517 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2518 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2519 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2520 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2521 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2522 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2523 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2524 .features[FEAT_VMX_SECONDARY_CTLS] = 2525 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2526 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2527 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2528 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2529 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2530 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2531 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2532 VMX_SECONDARY_EXEC_RDRAND_EXITING, 2533 .xlevel = 0x80000008, 2534 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 2535 .versions = (X86CPUVersionDefinition[]) { 2536 { .version = 1 }, 2537 { 2538 .version = 2, 2539 .alias = "IvyBridge-IBRS", 2540 .props = (PropValue[]) { 2541 { "spec-ctrl", "on" }, 2542 { "model-id", 2543 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, 2544 { /* end of list */ } 2545 } 2546 }, 2547 { /* end of list */ } 2548 } 2549 }, 2550 { 2551 .name = "Haswell", 2552 .level = 0xd, 2553 .vendor = CPUID_VENDOR_INTEL, 2554 .family = 6, 2555 .model = 60, 2556 .stepping = 4, 2557 .features[FEAT_1_EDX] = 2558 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2559 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2560 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2561 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2562 CPUID_DE | CPUID_FP87, 2563 .features[FEAT_1_ECX] = 2564 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2565 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2566 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2567 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2568 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2569 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2570 .features[FEAT_8000_0001_EDX] = 2571 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2572 CPUID_EXT2_SYSCALL, 2573 .features[FEAT_8000_0001_ECX] = 2574 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2575 .features[FEAT_7_0_EBX] = 2576 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2577 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2578 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2579 CPUID_7_0_EBX_RTM, 2580 .features[FEAT_XSAVE] = 2581 CPUID_XSAVE_XSAVEOPT, 2582 .features[FEAT_6_EAX] = 2583 CPUID_6_EAX_ARAT, 2584 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2585 MSR_VMX_BASIC_TRUE_CTLS, 2586 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2587 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2588 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2589 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2590 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2591 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2592 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2593 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2594 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2595 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2596 .features[FEAT_VMX_EXIT_CTLS] = 2597 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2598 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2599 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2600 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2601 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2602 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2603 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2604 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2605 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2606 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2607 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2608 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2609 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2610 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2611 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2612 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2613 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2614 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2615 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2616 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2617 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2618 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2619 .features[FEAT_VMX_SECONDARY_CTLS] = 2620 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2621 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2622 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2623 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2624 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2625 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2626 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2627 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2628 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 2629 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2630 .xlevel = 0x80000008, 2631 .model_id = "Intel Core Processor (Haswell)", 2632 .versions = (X86CPUVersionDefinition[]) { 2633 { .version = 1 }, 2634 { 2635 .version = 2, 2636 .alias = "Haswell-noTSX", 2637 .props = (PropValue[]) { 2638 { "hle", "off" }, 2639 { "rtm", "off" }, 2640 { "stepping", "1" }, 2641 { "model-id", "Intel Core Processor (Haswell, no TSX)", }, 2642 { /* end of list */ } 2643 }, 2644 }, 2645 { 2646 .version = 3, 2647 .alias = "Haswell-IBRS", 2648 .props = (PropValue[]) { 2649 /* Restore TSX features removed by -v2 above */ 2650 { "hle", "on" }, 2651 { "rtm", "on" }, 2652 /* 2653 * Haswell and Haswell-IBRS had stepping=4 in 2654 * QEMU 4.0 and older 2655 */ 2656 { "stepping", "4" }, 2657 { "spec-ctrl", "on" }, 2658 { "model-id", 2659 "Intel Core Processor (Haswell, IBRS)" }, 2660 { /* end of list */ } 2661 } 2662 }, 2663 { 2664 .version = 4, 2665 .alias = "Haswell-noTSX-IBRS", 2666 .props = (PropValue[]) { 2667 { "hle", "off" }, 2668 { "rtm", "off" }, 2669 /* spec-ctrl was already enabled by -v3 above */ 2670 { "stepping", "1" }, 2671 { "model-id", 2672 "Intel Core Processor (Haswell, no TSX, IBRS)" }, 2673 { /* end of list */ } 2674 } 2675 }, 2676 { /* end of list */ } 2677 } 2678 }, 2679 { 2680 .name = "Broadwell", 2681 .level = 0xd, 2682 .vendor = CPUID_VENDOR_INTEL, 2683 .family = 6, 2684 .model = 61, 2685 .stepping = 2, 2686 .features[FEAT_1_EDX] = 2687 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2688 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2689 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2690 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2691 CPUID_DE | CPUID_FP87, 2692 .features[FEAT_1_ECX] = 2693 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2694 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2695 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2696 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2697 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2698 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2699 .features[FEAT_8000_0001_EDX] = 2700 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2701 CPUID_EXT2_SYSCALL, 2702 .features[FEAT_8000_0001_ECX] = 2703 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2704 .features[FEAT_7_0_EBX] = 2705 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2706 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2707 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2708 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2709 CPUID_7_0_EBX_SMAP, 2710 .features[FEAT_XSAVE] = 2711 CPUID_XSAVE_XSAVEOPT, 2712 .features[FEAT_6_EAX] = 2713 CPUID_6_EAX_ARAT, 2714 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2715 MSR_VMX_BASIC_TRUE_CTLS, 2716 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2717 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2718 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2719 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2720 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2721 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2722 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2723 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2724 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2725 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2726 .features[FEAT_VMX_EXIT_CTLS] = 2727 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2728 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2729 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2730 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2731 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2732 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2733 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2734 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2735 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2736 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2737 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2738 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2739 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2740 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2741 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2742 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2743 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2744 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2745 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2746 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2747 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2748 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2749 .features[FEAT_VMX_SECONDARY_CTLS] = 2750 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2751 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2752 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2753 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2754 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2755 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2756 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2757 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2758 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2759 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2760 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2761 .xlevel = 0x80000008, 2762 .model_id = "Intel Core Processor (Broadwell)", 2763 .versions = (X86CPUVersionDefinition[]) { 2764 { .version = 1 }, 2765 { 2766 .version = 2, 2767 .alias = "Broadwell-noTSX", 2768 .props = (PropValue[]) { 2769 { "hle", "off" }, 2770 { "rtm", "off" }, 2771 { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, 2772 { /* end of list */ } 2773 }, 2774 }, 2775 { 2776 .version = 3, 2777 .alias = "Broadwell-IBRS", 2778 .props = (PropValue[]) { 2779 /* Restore TSX features removed by -v2 above */ 2780 { "hle", "on" }, 2781 { "rtm", "on" }, 2782 { "spec-ctrl", "on" }, 2783 { "model-id", 2784 "Intel Core Processor (Broadwell, IBRS)" }, 2785 { /* end of list */ } 2786 } 2787 }, 2788 { 2789 .version = 4, 2790 .alias = "Broadwell-noTSX-IBRS", 2791 .props = (PropValue[]) { 2792 { "hle", "off" }, 2793 { "rtm", "off" }, 2794 /* spec-ctrl was already enabled by -v3 above */ 2795 { "model-id", 2796 "Intel Core Processor (Broadwell, no TSX, IBRS)" }, 2797 { /* end of list */ } 2798 } 2799 }, 2800 { /* end of list */ } 2801 } 2802 }, 2803 { 2804 .name = "Skylake-Client", 2805 .level = 0xd, 2806 .vendor = CPUID_VENDOR_INTEL, 2807 .family = 6, 2808 .model = 94, 2809 .stepping = 3, 2810 .features[FEAT_1_EDX] = 2811 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2812 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2813 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2814 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2815 CPUID_DE | CPUID_FP87, 2816 .features[FEAT_1_ECX] = 2817 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2818 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2819 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2820 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2821 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2822 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2823 .features[FEAT_8000_0001_EDX] = 2824 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2825 CPUID_EXT2_SYSCALL, 2826 .features[FEAT_8000_0001_ECX] = 2827 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2828 .features[FEAT_7_0_EBX] = 2829 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2830 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2831 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2832 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2833 CPUID_7_0_EBX_SMAP, 2834 /* Missing: XSAVES (not supported by some Linux versions, 2835 * including v4.1 to v4.12). 2836 * KVM doesn't yet expose any XSAVES state save component, 2837 * and the only one defined in Skylake (processor tracing) 2838 * probably will block migration anyway. 2839 */ 2840 .features[FEAT_XSAVE] = 2841 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2842 CPUID_XSAVE_XGETBV1, 2843 .features[FEAT_6_EAX] = 2844 CPUID_6_EAX_ARAT, 2845 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2846 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2847 MSR_VMX_BASIC_TRUE_CTLS, 2848 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2849 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2850 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2851 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2852 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2853 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2854 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2855 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2856 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2857 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2858 .features[FEAT_VMX_EXIT_CTLS] = 2859 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2860 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2861 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2862 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2863 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2864 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2865 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2866 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2867 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2868 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2869 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2870 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2871 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2872 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2873 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2874 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2875 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2876 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2877 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2878 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2879 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2880 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2881 .features[FEAT_VMX_SECONDARY_CTLS] = 2882 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2883 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2884 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2885 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2886 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2887 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2888 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2889 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2890 .xlevel = 0x80000008, 2891 .model_id = "Intel Core Processor (Skylake)", 2892 .versions = (X86CPUVersionDefinition[]) { 2893 { .version = 1 }, 2894 { 2895 .version = 2, 2896 .alias = "Skylake-Client-IBRS", 2897 .props = (PropValue[]) { 2898 { "spec-ctrl", "on" }, 2899 { "model-id", 2900 "Intel Core Processor (Skylake, IBRS)" }, 2901 { /* end of list */ } 2902 } 2903 }, 2904 { 2905 .version = 3, 2906 .alias = "Skylake-Client-noTSX-IBRS", 2907 .props = (PropValue[]) { 2908 { "hle", "off" }, 2909 { "rtm", "off" }, 2910 { "model-id", 2911 "Intel Core Processor (Skylake, IBRS, no TSX)" }, 2912 { /* end of list */ } 2913 } 2914 }, 2915 { /* end of list */ } 2916 } 2917 }, 2918 { 2919 .name = "Skylake-Server", 2920 .level = 0xd, 2921 .vendor = CPUID_VENDOR_INTEL, 2922 .family = 6, 2923 .model = 85, 2924 .stepping = 4, 2925 .features[FEAT_1_EDX] = 2926 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2927 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2928 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2929 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2930 CPUID_DE | CPUID_FP87, 2931 .features[FEAT_1_ECX] = 2932 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2933 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2934 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2935 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2936 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2937 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2938 .features[FEAT_8000_0001_EDX] = 2939 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2940 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2941 .features[FEAT_8000_0001_ECX] = 2942 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2943 .features[FEAT_7_0_EBX] = 2944 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2945 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2946 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2947 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2948 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2949 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2950 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2951 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2952 .features[FEAT_7_0_ECX] = 2953 CPUID_7_0_ECX_PKU, 2954 /* Missing: XSAVES (not supported by some Linux versions, 2955 * including v4.1 to v4.12). 2956 * KVM doesn't yet expose any XSAVES state save component, 2957 * and the only one defined in Skylake (processor tracing) 2958 * probably will block migration anyway. 2959 */ 2960 .features[FEAT_XSAVE] = 2961 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2962 CPUID_XSAVE_XGETBV1, 2963 .features[FEAT_6_EAX] = 2964 CPUID_6_EAX_ARAT, 2965 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2966 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2967 MSR_VMX_BASIC_TRUE_CTLS, 2968 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2969 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2970 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2971 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2972 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2973 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2974 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2975 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2976 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2977 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2978 .features[FEAT_VMX_EXIT_CTLS] = 2979 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2980 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2981 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2982 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2983 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2984 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2985 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2986 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2987 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2988 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2989 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2990 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2991 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2992 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2993 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2994 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2995 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2996 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2997 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2998 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2999 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3000 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3001 .features[FEAT_VMX_SECONDARY_CTLS] = 3002 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3003 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3004 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3005 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3006 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3007 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3008 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3009 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3010 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3011 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3012 .xlevel = 0x80000008, 3013 .model_id = "Intel Xeon Processor (Skylake)", 3014 .versions = (X86CPUVersionDefinition[]) { 3015 { .version = 1 }, 3016 { 3017 .version = 2, 3018 .alias = "Skylake-Server-IBRS", 3019 .props = (PropValue[]) { 3020 /* clflushopt was not added to Skylake-Server-IBRS */ 3021 /* TODO: add -v3 including clflushopt */ 3022 { "clflushopt", "off" }, 3023 { "spec-ctrl", "on" }, 3024 { "model-id", 3025 "Intel Xeon Processor (Skylake, IBRS)" }, 3026 { /* end of list */ } 3027 } 3028 }, 3029 { 3030 .version = 3, 3031 .alias = "Skylake-Server-noTSX-IBRS", 3032 .props = (PropValue[]) { 3033 { "hle", "off" }, 3034 { "rtm", "off" }, 3035 { "model-id", 3036 "Intel Xeon Processor (Skylake, IBRS, no TSX)" }, 3037 { /* end of list */ } 3038 } 3039 }, 3040 { 3041 .version = 4, 3042 .props = (PropValue[]) { 3043 { "vmx-eptp-switching", "on" }, 3044 { /* end of list */ } 3045 } 3046 }, 3047 { /* end of list */ } 3048 } 3049 }, 3050 { 3051 .name = "Cascadelake-Server", 3052 .level = 0xd, 3053 .vendor = CPUID_VENDOR_INTEL, 3054 .family = 6, 3055 .model = 85, 3056 .stepping = 6, 3057 .features[FEAT_1_EDX] = 3058 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3059 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3060 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3061 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3062 CPUID_DE | CPUID_FP87, 3063 .features[FEAT_1_ECX] = 3064 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3065 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3066 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3067 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3068 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3069 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3070 .features[FEAT_8000_0001_EDX] = 3071 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3072 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3073 .features[FEAT_8000_0001_ECX] = 3074 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3075 .features[FEAT_7_0_EBX] = 3076 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3077 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3078 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3079 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3080 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3081 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3082 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3083 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3084 .features[FEAT_7_0_ECX] = 3085 CPUID_7_0_ECX_PKU | 3086 CPUID_7_0_ECX_AVX512VNNI, 3087 .features[FEAT_7_0_EDX] = 3088 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3089 /* Missing: XSAVES (not supported by some Linux versions, 3090 * including v4.1 to v4.12). 3091 * KVM doesn't yet expose any XSAVES state save component, 3092 * and the only one defined in Skylake (processor tracing) 3093 * probably will block migration anyway. 3094 */ 3095 .features[FEAT_XSAVE] = 3096 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3097 CPUID_XSAVE_XGETBV1, 3098 .features[FEAT_6_EAX] = 3099 CPUID_6_EAX_ARAT, 3100 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3101 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3102 MSR_VMX_BASIC_TRUE_CTLS, 3103 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3104 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3105 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3106 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3107 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3108 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3109 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3110 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3111 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3112 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3113 .features[FEAT_VMX_EXIT_CTLS] = 3114 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3115 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3116 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3117 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3118 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3119 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3120 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3121 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3122 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3123 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3124 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3125 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3126 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3127 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3128 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3129 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3130 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3131 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3132 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3133 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3134 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3135 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3136 .features[FEAT_VMX_SECONDARY_CTLS] = 3137 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3138 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3139 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3140 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3141 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3142 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3143 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3144 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3145 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3146 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3147 .xlevel = 0x80000008, 3148 .model_id = "Intel Xeon Processor (Cascadelake)", 3149 .versions = (X86CPUVersionDefinition[]) { 3150 { .version = 1 }, 3151 { .version = 2, 3152 .note = "ARCH_CAPABILITIES", 3153 .props = (PropValue[]) { 3154 { "arch-capabilities", "on" }, 3155 { "rdctl-no", "on" }, 3156 { "ibrs-all", "on" }, 3157 { "skip-l1dfl-vmentry", "on" }, 3158 { "mds-no", "on" }, 3159 { /* end of list */ } 3160 }, 3161 }, 3162 { .version = 3, 3163 .alias = "Cascadelake-Server-noTSX", 3164 .note = "ARCH_CAPABILITIES, no TSX", 3165 .props = (PropValue[]) { 3166 { "hle", "off" }, 3167 { "rtm", "off" }, 3168 { /* end of list */ } 3169 }, 3170 }, 3171 { .version = 4, 3172 .note = "ARCH_CAPABILITIES, no TSX", 3173 .props = (PropValue[]) { 3174 { "vmx-eptp-switching", "on" }, 3175 { /* end of list */ } 3176 }, 3177 }, 3178 { /* end of list */ } 3179 } 3180 }, 3181 { 3182 .name = "Cooperlake", 3183 .level = 0xd, 3184 .vendor = CPUID_VENDOR_INTEL, 3185 .family = 6, 3186 .model = 85, 3187 .stepping = 10, 3188 .features[FEAT_1_EDX] = 3189 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3190 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3191 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3192 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3193 CPUID_DE | CPUID_FP87, 3194 .features[FEAT_1_ECX] = 3195 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3196 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3197 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3198 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3199 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3200 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3201 .features[FEAT_8000_0001_EDX] = 3202 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3203 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3204 .features[FEAT_8000_0001_ECX] = 3205 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3206 .features[FEAT_7_0_EBX] = 3207 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3208 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3209 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3210 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3211 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3212 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3213 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3214 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3215 .features[FEAT_7_0_ECX] = 3216 CPUID_7_0_ECX_PKU | 3217 CPUID_7_0_ECX_AVX512VNNI, 3218 .features[FEAT_7_0_EDX] = 3219 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP | 3220 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES, 3221 .features[FEAT_ARCH_CAPABILITIES] = 3222 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | 3223 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | 3224 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, 3225 .features[FEAT_7_1_EAX] = 3226 CPUID_7_1_EAX_AVX512_BF16, 3227 /* 3228 * Missing: XSAVES (not supported by some Linux versions, 3229 * including v4.1 to v4.12). 3230 * KVM doesn't yet expose any XSAVES state save component, 3231 * and the only one defined in Skylake (processor tracing) 3232 * probably will block migration anyway. 3233 */ 3234 .features[FEAT_XSAVE] = 3235 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3236 CPUID_XSAVE_XGETBV1, 3237 .features[FEAT_6_EAX] = 3238 CPUID_6_EAX_ARAT, 3239 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3240 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3241 MSR_VMX_BASIC_TRUE_CTLS, 3242 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3243 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3244 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3245 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3246 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3247 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3248 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3249 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3250 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3251 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3252 .features[FEAT_VMX_EXIT_CTLS] = 3253 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3254 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3255 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3256 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3257 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3258 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3259 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3260 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3261 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3262 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3263 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3264 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3265 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3266 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3267 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3268 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3269 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3270 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3271 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3272 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3273 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3274 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3275 .features[FEAT_VMX_SECONDARY_CTLS] = 3276 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3277 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3278 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3279 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3280 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3281 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3282 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3283 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3284 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3285 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3286 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3287 .xlevel = 0x80000008, 3288 .model_id = "Intel Xeon Processor (Cooperlake)", 3289 }, 3290 { 3291 .name = "Icelake-Client", 3292 .level = 0xd, 3293 .vendor = CPUID_VENDOR_INTEL, 3294 .family = 6, 3295 .model = 126, 3296 .stepping = 0, 3297 .features[FEAT_1_EDX] = 3298 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3299 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3300 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3301 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3302 CPUID_DE | CPUID_FP87, 3303 .features[FEAT_1_ECX] = 3304 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3305 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3306 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3307 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3308 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3309 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3310 .features[FEAT_8000_0001_EDX] = 3311 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 3312 CPUID_EXT2_SYSCALL, 3313 .features[FEAT_8000_0001_ECX] = 3314 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3315 .features[FEAT_8000_0008_EBX] = 3316 CPUID_8000_0008_EBX_WBNOINVD, 3317 .features[FEAT_7_0_EBX] = 3318 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3319 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3320 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3321 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3322 CPUID_7_0_EBX_SMAP, 3323 .features[FEAT_7_0_ECX] = 3324 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3325 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3326 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3327 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3328 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3329 .features[FEAT_7_0_EDX] = 3330 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3331 /* Missing: XSAVES (not supported by some Linux versions, 3332 * including v4.1 to v4.12). 3333 * KVM doesn't yet expose any XSAVES state save component, 3334 * and the only one defined in Skylake (processor tracing) 3335 * probably will block migration anyway. 3336 */ 3337 .features[FEAT_XSAVE] = 3338 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3339 CPUID_XSAVE_XGETBV1, 3340 .features[FEAT_6_EAX] = 3341 CPUID_6_EAX_ARAT, 3342 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3343 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3344 MSR_VMX_BASIC_TRUE_CTLS, 3345 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3346 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3347 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3348 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3349 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3350 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3351 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3352 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3353 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3354 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3355 .features[FEAT_VMX_EXIT_CTLS] = 3356 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3357 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3358 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3359 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3360 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3361 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3362 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3363 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3364 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3365 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 3366 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3367 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3368 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3369 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3370 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3371 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3372 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3373 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3374 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3375 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3376 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3377 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3378 .features[FEAT_VMX_SECONDARY_CTLS] = 3379 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3380 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3381 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3382 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3383 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3384 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3385 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3386 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3387 .xlevel = 0x80000008, 3388 .model_id = "Intel Core Processor (Icelake)", 3389 .versions = (X86CPUVersionDefinition[]) { 3390 { 3391 .version = 1, 3392 .note = "deprecated" 3393 }, 3394 { 3395 .version = 2, 3396 .note = "no TSX, deprecated", 3397 .alias = "Icelake-Client-noTSX", 3398 .props = (PropValue[]) { 3399 { "hle", "off" }, 3400 { "rtm", "off" }, 3401 { /* end of list */ } 3402 }, 3403 }, 3404 { /* end of list */ } 3405 }, 3406 .deprecation_note = "use Icelake-Server instead" 3407 }, 3408 { 3409 .name = "Icelake-Server", 3410 .level = 0xd, 3411 .vendor = CPUID_VENDOR_INTEL, 3412 .family = 6, 3413 .model = 134, 3414 .stepping = 0, 3415 .features[FEAT_1_EDX] = 3416 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3417 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3418 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3419 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3420 CPUID_DE | CPUID_FP87, 3421 .features[FEAT_1_ECX] = 3422 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3423 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3424 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3425 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3426 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3427 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3428 .features[FEAT_8000_0001_EDX] = 3429 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3430 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3431 .features[FEAT_8000_0001_ECX] = 3432 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3433 .features[FEAT_8000_0008_EBX] = 3434 CPUID_8000_0008_EBX_WBNOINVD, 3435 .features[FEAT_7_0_EBX] = 3436 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3437 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3438 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3439 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3440 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3441 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3442 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3443 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3444 .features[FEAT_7_0_ECX] = 3445 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3446 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3447 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3448 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3449 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 3450 .features[FEAT_7_0_EDX] = 3451 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3452 /* Missing: XSAVES (not supported by some Linux versions, 3453 * including v4.1 to v4.12). 3454 * KVM doesn't yet expose any XSAVES state save component, 3455 * and the only one defined in Skylake (processor tracing) 3456 * probably will block migration anyway. 3457 */ 3458 .features[FEAT_XSAVE] = 3459 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3460 CPUID_XSAVE_XGETBV1, 3461 .features[FEAT_6_EAX] = 3462 CPUID_6_EAX_ARAT, 3463 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3464 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3465 MSR_VMX_BASIC_TRUE_CTLS, 3466 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3467 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3468 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3469 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3470 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3471 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3472 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3473 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3474 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3475 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3476 .features[FEAT_VMX_EXIT_CTLS] = 3477 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3478 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3479 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3480 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3481 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3482 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3483 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3484 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3485 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3486 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3487 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3488 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3489 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3490 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3491 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3492 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3493 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3494 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3495 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3496 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3497 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3498 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3499 .features[FEAT_VMX_SECONDARY_CTLS] = 3500 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3501 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3502 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3503 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3504 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3505 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3506 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3507 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3508 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 3509 .xlevel = 0x80000008, 3510 .model_id = "Intel Xeon Processor (Icelake)", 3511 .versions = (X86CPUVersionDefinition[]) { 3512 { .version = 1 }, 3513 { 3514 .version = 2, 3515 .note = "no TSX", 3516 .alias = "Icelake-Server-noTSX", 3517 .props = (PropValue[]) { 3518 { "hle", "off" }, 3519 { "rtm", "off" }, 3520 { /* end of list */ } 3521 }, 3522 }, 3523 { 3524 .version = 3, 3525 .props = (PropValue[]) { 3526 { "arch-capabilities", "on" }, 3527 { "rdctl-no", "on" }, 3528 { "ibrs-all", "on" }, 3529 { "skip-l1dfl-vmentry", "on" }, 3530 { "mds-no", "on" }, 3531 { "pschange-mc-no", "on" }, 3532 { "taa-no", "on" }, 3533 { /* end of list */ } 3534 }, 3535 }, 3536 { 3537 .version = 4, 3538 .props = (PropValue[]) { 3539 { "sha-ni", "on" }, 3540 { "avx512ifma", "on" }, 3541 { "rdpid", "on" }, 3542 { "fsrm", "on" }, 3543 { "vmx-rdseed-exit", "on" }, 3544 { "vmx-pml", "on" }, 3545 { "vmx-eptp-switching", "on" }, 3546 { "model", "106" }, 3547 { /* end of list */ } 3548 }, 3549 }, 3550 { /* end of list */ } 3551 } 3552 }, 3553 { 3554 .name = "Denverton", 3555 .level = 21, 3556 .vendor = CPUID_VENDOR_INTEL, 3557 .family = 6, 3558 .model = 95, 3559 .stepping = 1, 3560 .features[FEAT_1_EDX] = 3561 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | 3562 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | 3563 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3564 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | 3565 CPUID_SSE | CPUID_SSE2, 3566 .features[FEAT_1_ECX] = 3567 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3568 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 | 3569 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3570 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | 3571 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND, 3572 .features[FEAT_8000_0001_EDX] = 3573 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | 3574 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, 3575 .features[FEAT_8000_0001_ECX] = 3576 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3577 .features[FEAT_7_0_EBX] = 3578 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS | 3579 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP | 3580 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI, 3581 .features[FEAT_7_0_EDX] = 3582 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | 3583 CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3584 /* 3585 * Missing: XSAVES (not supported by some Linux versions, 3586 * including v4.1 to v4.12). 3587 * KVM doesn't yet expose any XSAVES state save component, 3588 * and the only one defined in Skylake (processor tracing) 3589 * probably will block migration anyway. 3590 */ 3591 .features[FEAT_XSAVE] = 3592 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, 3593 .features[FEAT_6_EAX] = 3594 CPUID_6_EAX_ARAT, 3595 .features[FEAT_ARCH_CAPABILITIES] = 3596 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY, 3597 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3598 MSR_VMX_BASIC_TRUE_CTLS, 3599 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3600 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3601 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3602 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3603 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3604 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3605 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3606 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3607 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3608 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3609 .features[FEAT_VMX_EXIT_CTLS] = 3610 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3611 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3612 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3613 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3614 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3615 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3616 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3617 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3618 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3619 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3620 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3621 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3622 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3623 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3624 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3625 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3626 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3627 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3628 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3629 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3630 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3631 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3632 .features[FEAT_VMX_SECONDARY_CTLS] = 3633 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3634 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3635 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3636 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3637 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3638 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3639 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3640 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3641 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3642 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3643 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3644 .xlevel = 0x80000008, 3645 .model_id = "Intel Atom Processor (Denverton)", 3646 .versions = (X86CPUVersionDefinition[]) { 3647 { .version = 1 }, 3648 { 3649 .version = 2, 3650 .note = "no MPX, no MONITOR", 3651 .props = (PropValue[]) { 3652 { "monitor", "off" }, 3653 { "mpx", "off" }, 3654 { /* end of list */ }, 3655 }, 3656 }, 3657 { /* end of list */ }, 3658 }, 3659 }, 3660 { 3661 .name = "Snowridge", 3662 .level = 27, 3663 .vendor = CPUID_VENDOR_INTEL, 3664 .family = 6, 3665 .model = 134, 3666 .stepping = 1, 3667 .features[FEAT_1_EDX] = 3668 /* missing: CPUID_PN CPUID_IA64 */ 3669 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 3670 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | 3671 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | 3672 CPUID_CX8 | CPUID_APIC | CPUID_SEP | 3673 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3674 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | 3675 CPUID_MMX | 3676 CPUID_FXSR | CPUID_SSE | CPUID_SSE2, 3677 .features[FEAT_1_ECX] = 3678 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3679 CPUID_EXT_SSSE3 | 3680 CPUID_EXT_CX16 | 3681 CPUID_EXT_SSE41 | 3682 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3683 CPUID_EXT_POPCNT | 3684 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | 3685 CPUID_EXT_RDRAND, 3686 .features[FEAT_8000_0001_EDX] = 3687 CPUID_EXT2_SYSCALL | 3688 CPUID_EXT2_NX | 3689 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3690 CPUID_EXT2_LM, 3691 .features[FEAT_8000_0001_ECX] = 3692 CPUID_EXT3_LAHF_LM | 3693 CPUID_EXT3_3DNOWPREFETCH, 3694 .features[FEAT_7_0_EBX] = 3695 CPUID_7_0_EBX_FSGSBASE | 3696 CPUID_7_0_EBX_SMEP | 3697 CPUID_7_0_EBX_ERMS | 3698 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ 3699 CPUID_7_0_EBX_RDSEED | 3700 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3701 CPUID_7_0_EBX_CLWB | 3702 CPUID_7_0_EBX_SHA_NI, 3703 .features[FEAT_7_0_ECX] = 3704 CPUID_7_0_ECX_UMIP | 3705 /* missing bit 5 */ 3706 CPUID_7_0_ECX_GFNI | 3707 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | 3708 CPUID_7_0_ECX_MOVDIR64B, 3709 .features[FEAT_7_0_EDX] = 3710 CPUID_7_0_EDX_SPEC_CTRL | 3711 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | 3712 CPUID_7_0_EDX_CORE_CAPABILITY, 3713 .features[FEAT_CORE_CAPABILITY] = 3714 MSR_CORE_CAP_SPLIT_LOCK_DETECT, 3715 /* 3716 * Missing: XSAVES (not supported by some Linux versions, 3717 * including v4.1 to v4.12). 3718 * KVM doesn't yet expose any XSAVES state save component, 3719 * and the only one defined in Skylake (processor tracing) 3720 * probably will block migration anyway. 3721 */ 3722 .features[FEAT_XSAVE] = 3723 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3724 CPUID_XSAVE_XGETBV1, 3725 .features[FEAT_6_EAX] = 3726 CPUID_6_EAX_ARAT, 3727 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3728 MSR_VMX_BASIC_TRUE_CTLS, 3729 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3730 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3731 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3732 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3733 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3734 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3735 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3736 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3737 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3738 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3739 .features[FEAT_VMX_EXIT_CTLS] = 3740 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3741 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3742 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3743 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3744 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3745 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3746 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3747 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3748 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3749 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3750 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3751 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3752 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3753 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3754 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3755 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3756 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3757 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3758 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3759 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3760 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3761 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3762 .features[FEAT_VMX_SECONDARY_CTLS] = 3763 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3764 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3765 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3766 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3767 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3768 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3769 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3770 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3771 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3772 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3773 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3774 .xlevel = 0x80000008, 3775 .model_id = "Intel Atom Processor (SnowRidge)", 3776 .versions = (X86CPUVersionDefinition[]) { 3777 { .version = 1 }, 3778 { 3779 .version = 2, 3780 .props = (PropValue[]) { 3781 { "mpx", "off" }, 3782 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" }, 3783 { /* end of list */ }, 3784 }, 3785 }, 3786 { /* end of list */ }, 3787 }, 3788 }, 3789 { 3790 .name = "KnightsMill", 3791 .level = 0xd, 3792 .vendor = CPUID_VENDOR_INTEL, 3793 .family = 6, 3794 .model = 133, 3795 .stepping = 0, 3796 .features[FEAT_1_EDX] = 3797 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 3798 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 3799 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 3800 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 3801 CPUID_PSE | CPUID_DE | CPUID_FP87, 3802 .features[FEAT_1_ECX] = 3803 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3804 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3805 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3806 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3807 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3808 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3809 .features[FEAT_8000_0001_EDX] = 3810 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3811 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3812 .features[FEAT_8000_0001_ECX] = 3813 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3814 .features[FEAT_7_0_EBX] = 3815 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3816 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 3817 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 3818 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 3819 CPUID_7_0_EBX_AVX512ER, 3820 .features[FEAT_7_0_ECX] = 3821 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3822 .features[FEAT_7_0_EDX] = 3823 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 3824 .features[FEAT_XSAVE] = 3825 CPUID_XSAVE_XSAVEOPT, 3826 .features[FEAT_6_EAX] = 3827 CPUID_6_EAX_ARAT, 3828 .xlevel = 0x80000008, 3829 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 3830 }, 3831 { 3832 .name = "Opteron_G1", 3833 .level = 5, 3834 .vendor = CPUID_VENDOR_AMD, 3835 .family = 15, 3836 .model = 6, 3837 .stepping = 1, 3838 .features[FEAT_1_EDX] = 3839 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3840 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3841 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3842 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3843 CPUID_DE | CPUID_FP87, 3844 .features[FEAT_1_ECX] = 3845 CPUID_EXT_SSE3, 3846 .features[FEAT_8000_0001_EDX] = 3847 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3848 .xlevel = 0x80000008, 3849 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 3850 }, 3851 { 3852 .name = "Opteron_G2", 3853 .level = 5, 3854 .vendor = CPUID_VENDOR_AMD, 3855 .family = 15, 3856 .model = 6, 3857 .stepping = 1, 3858 .features[FEAT_1_EDX] = 3859 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3860 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3861 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3862 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3863 CPUID_DE | CPUID_FP87, 3864 .features[FEAT_1_ECX] = 3865 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 3866 .features[FEAT_8000_0001_EDX] = 3867 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3868 .features[FEAT_8000_0001_ECX] = 3869 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3870 .xlevel = 0x80000008, 3871 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 3872 }, 3873 { 3874 .name = "Opteron_G3", 3875 .level = 5, 3876 .vendor = CPUID_VENDOR_AMD, 3877 .family = 16, 3878 .model = 2, 3879 .stepping = 3, 3880 .features[FEAT_1_EDX] = 3881 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3882 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3883 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3884 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3885 CPUID_DE | CPUID_FP87, 3886 .features[FEAT_1_ECX] = 3887 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 3888 CPUID_EXT_SSE3, 3889 .features[FEAT_8000_0001_EDX] = 3890 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 3891 CPUID_EXT2_RDTSCP, 3892 .features[FEAT_8000_0001_ECX] = 3893 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 3894 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3895 .xlevel = 0x80000008, 3896 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 3897 }, 3898 { 3899 .name = "Opteron_G4", 3900 .level = 0xd, 3901 .vendor = CPUID_VENDOR_AMD, 3902 .family = 21, 3903 .model = 1, 3904 .stepping = 2, 3905 .features[FEAT_1_EDX] = 3906 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3907 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3908 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3909 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3910 CPUID_DE | CPUID_FP87, 3911 .features[FEAT_1_ECX] = 3912 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3913 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3914 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 3915 CPUID_EXT_SSE3, 3916 .features[FEAT_8000_0001_EDX] = 3917 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3918 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3919 .features[FEAT_8000_0001_ECX] = 3920 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3921 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3922 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3923 CPUID_EXT3_LAHF_LM, 3924 .features[FEAT_SVM] = 3925 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3926 /* no xsaveopt! */ 3927 .xlevel = 0x8000001A, 3928 .model_id = "AMD Opteron 62xx class CPU", 3929 }, 3930 { 3931 .name = "Opteron_G5", 3932 .level = 0xd, 3933 .vendor = CPUID_VENDOR_AMD, 3934 .family = 21, 3935 .model = 2, 3936 .stepping = 0, 3937 .features[FEAT_1_EDX] = 3938 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3939 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3940 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3941 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3942 CPUID_DE | CPUID_FP87, 3943 .features[FEAT_1_ECX] = 3944 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 3945 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 3946 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 3947 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3948 .features[FEAT_8000_0001_EDX] = 3949 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3950 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3951 .features[FEAT_8000_0001_ECX] = 3952 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3953 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3954 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3955 CPUID_EXT3_LAHF_LM, 3956 .features[FEAT_SVM] = 3957 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3958 /* no xsaveopt! */ 3959 .xlevel = 0x8000001A, 3960 .model_id = "AMD Opteron 63xx class CPU", 3961 }, 3962 { 3963 .name = "EPYC", 3964 .level = 0xd, 3965 .vendor = CPUID_VENDOR_AMD, 3966 .family = 23, 3967 .model = 1, 3968 .stepping = 2, 3969 .features[FEAT_1_EDX] = 3970 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 3971 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 3972 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 3973 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 3974 CPUID_VME | CPUID_FP87, 3975 .features[FEAT_1_ECX] = 3976 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 3977 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 3978 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3979 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 3980 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3981 .features[FEAT_8000_0001_EDX] = 3982 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 3983 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 3984 CPUID_EXT2_SYSCALL, 3985 .features[FEAT_8000_0001_ECX] = 3986 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 3987 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 3988 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 3989 CPUID_EXT3_TOPOEXT, 3990 .features[FEAT_7_0_EBX] = 3991 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3992 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 3993 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3994 CPUID_7_0_EBX_SHA_NI, 3995 .features[FEAT_XSAVE] = 3996 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3997 CPUID_XSAVE_XGETBV1, 3998 .features[FEAT_6_EAX] = 3999 CPUID_6_EAX_ARAT, 4000 .features[FEAT_SVM] = 4001 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4002 .xlevel = 0x8000001E, 4003 .model_id = "AMD EPYC Processor", 4004 .cache_info = &epyc_cache_info, 4005 .versions = (X86CPUVersionDefinition[]) { 4006 { .version = 1 }, 4007 { 4008 .version = 2, 4009 .alias = "EPYC-IBPB", 4010 .props = (PropValue[]) { 4011 { "ibpb", "on" }, 4012 { "model-id", 4013 "AMD EPYC Processor (with IBPB)" }, 4014 { /* end of list */ } 4015 } 4016 }, 4017 { 4018 .version = 3, 4019 .props = (PropValue[]) { 4020 { "ibpb", "on" }, 4021 { "perfctr-core", "on" }, 4022 { "clzero", "on" }, 4023 { "xsaveerptr", "on" }, 4024 { "xsaves", "on" }, 4025 { "model-id", 4026 "AMD EPYC Processor" }, 4027 { /* end of list */ } 4028 } 4029 }, 4030 { /* end of list */ } 4031 } 4032 }, 4033 { 4034 .name = "Dhyana", 4035 .level = 0xd, 4036 .vendor = CPUID_VENDOR_HYGON, 4037 .family = 24, 4038 .model = 0, 4039 .stepping = 1, 4040 .features[FEAT_1_EDX] = 4041 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4042 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4043 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4044 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4045 CPUID_VME | CPUID_FP87, 4046 .features[FEAT_1_ECX] = 4047 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4048 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 4049 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4050 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4051 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 4052 .features[FEAT_8000_0001_EDX] = 4053 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4054 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4055 CPUID_EXT2_SYSCALL, 4056 .features[FEAT_8000_0001_ECX] = 4057 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4058 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4059 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4060 CPUID_EXT3_TOPOEXT, 4061 .features[FEAT_8000_0008_EBX] = 4062 CPUID_8000_0008_EBX_IBPB, 4063 .features[FEAT_7_0_EBX] = 4064 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4065 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4066 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 4067 /* 4068 * Missing: XSAVES (not supported by some Linux versions, 4069 * including v4.1 to v4.12). 4070 * KVM doesn't yet expose any XSAVES state save component. 4071 */ 4072 .features[FEAT_XSAVE] = 4073 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4074 CPUID_XSAVE_XGETBV1, 4075 .features[FEAT_6_EAX] = 4076 CPUID_6_EAX_ARAT, 4077 .features[FEAT_SVM] = 4078 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4079 .xlevel = 0x8000001E, 4080 .model_id = "Hygon Dhyana Processor", 4081 .cache_info = &epyc_cache_info, 4082 }, 4083 { 4084 .name = "EPYC-Rome", 4085 .level = 0xd, 4086 .vendor = CPUID_VENDOR_AMD, 4087 .family = 23, 4088 .model = 49, 4089 .stepping = 0, 4090 .features[FEAT_1_EDX] = 4091 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4092 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4093 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4094 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4095 CPUID_VME | CPUID_FP87, 4096 .features[FEAT_1_ECX] = 4097 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4098 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 4099 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4100 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4101 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 4102 .features[FEAT_8000_0001_EDX] = 4103 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4104 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4105 CPUID_EXT2_SYSCALL, 4106 .features[FEAT_8000_0001_ECX] = 4107 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4108 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4109 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4110 CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, 4111 .features[FEAT_8000_0008_EBX] = 4112 CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | 4113 CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | 4114 CPUID_8000_0008_EBX_STIBP, 4115 .features[FEAT_7_0_EBX] = 4116 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4117 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4118 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 4119 CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB, 4120 .features[FEAT_7_0_ECX] = 4121 CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID, 4122 .features[FEAT_XSAVE] = 4123 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4124 CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, 4125 .features[FEAT_6_EAX] = 4126 CPUID_6_EAX_ARAT, 4127 .features[FEAT_SVM] = 4128 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4129 .xlevel = 0x8000001E, 4130 .model_id = "AMD EPYC-Rome Processor", 4131 .cache_info = &epyc_rome_cache_info, 4132 }, 4133 }; 4134 4135 /* KVM-specific features that are automatically added/removed 4136 * from all CPU models when KVM is enabled. 4137 */ 4138 static PropValue kvm_default_props[] = { 4139 { "kvmclock", "on" }, 4140 { "kvm-nopiodelay", "on" }, 4141 { "kvm-asyncpf", "on" }, 4142 { "kvm-steal-time", "on" }, 4143 { "kvm-pv-eoi", "on" }, 4144 { "kvmclock-stable-bit", "on" }, 4145 { "x2apic", "on" }, 4146 { "kvm-msi-ext-dest-id", "off" }, 4147 { "acpi", "off" }, 4148 { "monitor", "off" }, 4149 { "svm", "off" }, 4150 { NULL, NULL }, 4151 }; 4152 4153 /* TCG-specific defaults that override all CPU models when using TCG 4154 */ 4155 static PropValue tcg_default_props[] = { 4156 { "vme", "off" }, 4157 { NULL, NULL }, 4158 }; 4159 4160 4161 /* 4162 * We resolve CPU model aliases using -v1 when using "-machine 4163 * none", but this is just for compatibility while libvirt isn't 4164 * adapted to resolve CPU model versions before creating VMs. 4165 * See "Runnability guarantee of CPU models" at 4166 * docs/system/deprecated.rst. 4167 */ 4168 X86CPUVersion default_cpu_version = 1; 4169 4170 void x86_cpu_set_default_version(X86CPUVersion version) 4171 { 4172 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ 4173 assert(version != CPU_VERSION_AUTO); 4174 default_cpu_version = version; 4175 } 4176 4177 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) 4178 { 4179 int v = 0; 4180 const X86CPUVersionDefinition *vdef = 4181 x86_cpu_def_get_versions(model->cpudef); 4182 while (vdef->version) { 4183 v = vdef->version; 4184 vdef++; 4185 } 4186 return v; 4187 } 4188 4189 /* Return the actual version being used for a specific CPU model */ 4190 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) 4191 { 4192 X86CPUVersion v = model->version; 4193 if (v == CPU_VERSION_AUTO) { 4194 v = default_cpu_version; 4195 } 4196 if (v == CPU_VERSION_LATEST) { 4197 return x86_cpu_model_last_version(model); 4198 } 4199 return v; 4200 } 4201 4202 void x86_cpu_change_kvm_default(const char *prop, const char *value) 4203 { 4204 PropValue *pv; 4205 for (pv = kvm_default_props; pv->prop; pv++) { 4206 if (!strcmp(pv->prop, prop)) { 4207 pv->value = value; 4208 break; 4209 } 4210 } 4211 4212 /* It is valid to call this function only for properties that 4213 * are already present in the kvm_default_props table. 4214 */ 4215 assert(pv->prop); 4216 } 4217 4218 static bool lmce_supported(void) 4219 { 4220 uint64_t mce_cap = 0; 4221 4222 #ifdef CONFIG_KVM 4223 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 4224 return false; 4225 } 4226 #endif 4227 4228 return !!(mce_cap & MCG_LMCE_P); 4229 } 4230 4231 #define CPUID_MODEL_ID_SZ 48 4232 4233 /** 4234 * cpu_x86_fill_model_id: 4235 * Get CPUID model ID string from host CPU. 4236 * 4237 * @str should have at least CPUID_MODEL_ID_SZ bytes 4238 * 4239 * The function does NOT add a null terminator to the string 4240 * automatically. 4241 */ 4242 static int cpu_x86_fill_model_id(char *str) 4243 { 4244 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 4245 int i; 4246 4247 for (i = 0; i < 3; i++) { 4248 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 4249 memcpy(str + i * 16 + 0, &eax, 4); 4250 memcpy(str + i * 16 + 4, &ebx, 4); 4251 memcpy(str + i * 16 + 8, &ecx, 4); 4252 memcpy(str + i * 16 + 12, &edx, 4); 4253 } 4254 return 0; 4255 } 4256 4257 static Property max_x86_cpu_properties[] = { 4258 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 4259 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 4260 DEFINE_PROP_END_OF_LIST() 4261 }; 4262 4263 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 4264 { 4265 DeviceClass *dc = DEVICE_CLASS(oc); 4266 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4267 4268 xcc->ordering = 9; 4269 4270 xcc->model_description = 4271 "Enables all features supported by the accelerator in the current host"; 4272 4273 device_class_set_props(dc, max_x86_cpu_properties); 4274 } 4275 4276 static void max_x86_cpu_initfn(Object *obj) 4277 { 4278 X86CPU *cpu = X86_CPU(obj); 4279 CPUX86State *env = &cpu->env; 4280 KVMState *s = kvm_state; 4281 4282 /* We can't fill the features array here because we don't know yet if 4283 * "migratable" is true or false. 4284 */ 4285 cpu->max_features = true; 4286 4287 if (accel_uses_host_cpuid()) { 4288 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 4289 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 4290 int family, model, stepping; 4291 4292 host_vendor_fms(vendor, &family, &model, &stepping); 4293 cpu_x86_fill_model_id(model_id); 4294 4295 object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); 4296 object_property_set_int(OBJECT(cpu), "family", family, &error_abort); 4297 object_property_set_int(OBJECT(cpu), "model", model, &error_abort); 4298 object_property_set_int(OBJECT(cpu), "stepping", stepping, 4299 &error_abort); 4300 object_property_set_str(OBJECT(cpu), "model-id", model_id, 4301 &error_abort); 4302 4303 if (kvm_enabled()) { 4304 env->cpuid_min_level = 4305 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 4306 env->cpuid_min_xlevel = 4307 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 4308 env->cpuid_min_xlevel2 = 4309 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 4310 } else { 4311 env->cpuid_min_level = 4312 hvf_get_supported_cpuid(0x0, 0, R_EAX); 4313 env->cpuid_min_xlevel = 4314 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 4315 env->cpuid_min_xlevel2 = 4316 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 4317 } 4318 4319 if (lmce_supported()) { 4320 object_property_set_bool(OBJECT(cpu), "lmce", true, &error_abort); 4321 } 4322 object_property_set_bool(OBJECT(cpu), "host-phys-bits", true, &error_abort); 4323 } else { 4324 object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD, 4325 &error_abort); 4326 object_property_set_int(OBJECT(cpu), "family", 6, &error_abort); 4327 object_property_set_int(OBJECT(cpu), "model", 6, &error_abort); 4328 object_property_set_int(OBJECT(cpu), "stepping", 3, &error_abort); 4329 object_property_set_str(OBJECT(cpu), "model-id", 4330 "QEMU TCG CPU version " QEMU_HW_VERSION, 4331 &error_abort); 4332 } 4333 4334 object_property_set_bool(OBJECT(cpu), "pmu", true, &error_abort); 4335 } 4336 4337 static const TypeInfo max_x86_cpu_type_info = { 4338 .name = X86_CPU_TYPE_NAME("max"), 4339 .parent = TYPE_X86_CPU, 4340 .instance_init = max_x86_cpu_initfn, 4341 .class_init = max_x86_cpu_class_init, 4342 }; 4343 4344 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 4345 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 4346 { 4347 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4348 4349 xcc->host_cpuid_required = true; 4350 xcc->ordering = 8; 4351 4352 #if defined(CONFIG_KVM) 4353 xcc->model_description = 4354 "KVM processor with all supported host features "; 4355 #elif defined(CONFIG_HVF) 4356 xcc->model_description = 4357 "HVF processor with all supported host features "; 4358 #endif 4359 } 4360 4361 static const TypeInfo host_x86_cpu_type_info = { 4362 .name = X86_CPU_TYPE_NAME("host"), 4363 .parent = X86_CPU_TYPE_NAME("max"), 4364 .class_init = host_x86_cpu_class_init, 4365 }; 4366 4367 #endif 4368 4369 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 4370 { 4371 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 4372 4373 switch (f->type) { 4374 case CPUID_FEATURE_WORD: 4375 { 4376 const char *reg = get_register_name_32(f->cpuid.reg); 4377 assert(reg); 4378 return g_strdup_printf("CPUID.%02XH:%s", 4379 f->cpuid.eax, reg); 4380 } 4381 case MSR_FEATURE_WORD: 4382 return g_strdup_printf("MSR(%02XH)", 4383 f->msr.index); 4384 } 4385 4386 return NULL; 4387 } 4388 4389 static bool x86_cpu_have_filtered_features(X86CPU *cpu) 4390 { 4391 FeatureWord w; 4392 4393 for (w = 0; w < FEATURE_WORDS; w++) { 4394 if (cpu->filtered_features[w]) { 4395 return true; 4396 } 4397 } 4398 4399 return false; 4400 } 4401 4402 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, 4403 const char *verbose_prefix) 4404 { 4405 CPUX86State *env = &cpu->env; 4406 FeatureWordInfo *f = &feature_word_info[w]; 4407 int i; 4408 4409 if (!cpu->force_features) { 4410 env->features[w] &= ~mask; 4411 } 4412 cpu->filtered_features[w] |= mask; 4413 4414 if (!verbose_prefix) { 4415 return; 4416 } 4417 4418 for (i = 0; i < 64; ++i) { 4419 if ((1ULL << i) & mask) { 4420 g_autofree char *feat_word_str = feature_word_description(f, i); 4421 warn_report("%s: %s%s%s [bit %d]", 4422 verbose_prefix, 4423 feat_word_str, 4424 f->feat_names[i] ? "." : "", 4425 f->feat_names[i] ? f->feat_names[i] : "", i); 4426 } 4427 } 4428 } 4429 4430 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 4431 const char *name, void *opaque, 4432 Error **errp) 4433 { 4434 X86CPU *cpu = X86_CPU(obj); 4435 CPUX86State *env = &cpu->env; 4436 int64_t value; 4437 4438 value = (env->cpuid_version >> 8) & 0xf; 4439 if (value == 0xf) { 4440 value += (env->cpuid_version >> 20) & 0xff; 4441 } 4442 visit_type_int(v, name, &value, errp); 4443 } 4444 4445 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 4446 const char *name, void *opaque, 4447 Error **errp) 4448 { 4449 X86CPU *cpu = X86_CPU(obj); 4450 CPUX86State *env = &cpu->env; 4451 const int64_t min = 0; 4452 const int64_t max = 0xff + 0xf; 4453 int64_t value; 4454 4455 if (!visit_type_int(v, name, &value, errp)) { 4456 return; 4457 } 4458 if (value < min || value > max) { 4459 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4460 name ? name : "null", value, min, max); 4461 return; 4462 } 4463 4464 env->cpuid_version &= ~0xff00f00; 4465 if (value > 0x0f) { 4466 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 4467 } else { 4468 env->cpuid_version |= value << 8; 4469 } 4470 } 4471 4472 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 4473 const char *name, void *opaque, 4474 Error **errp) 4475 { 4476 X86CPU *cpu = X86_CPU(obj); 4477 CPUX86State *env = &cpu->env; 4478 int64_t value; 4479 4480 value = (env->cpuid_version >> 4) & 0xf; 4481 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 4482 visit_type_int(v, name, &value, errp); 4483 } 4484 4485 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 4486 const char *name, void *opaque, 4487 Error **errp) 4488 { 4489 X86CPU *cpu = X86_CPU(obj); 4490 CPUX86State *env = &cpu->env; 4491 const int64_t min = 0; 4492 const int64_t max = 0xff; 4493 int64_t value; 4494 4495 if (!visit_type_int(v, name, &value, errp)) { 4496 return; 4497 } 4498 if (value < min || value > max) { 4499 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4500 name ? name : "null", value, min, max); 4501 return; 4502 } 4503 4504 env->cpuid_version &= ~0xf00f0; 4505 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 4506 } 4507 4508 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 4509 const char *name, void *opaque, 4510 Error **errp) 4511 { 4512 X86CPU *cpu = X86_CPU(obj); 4513 CPUX86State *env = &cpu->env; 4514 int64_t value; 4515 4516 value = env->cpuid_version & 0xf; 4517 visit_type_int(v, name, &value, errp); 4518 } 4519 4520 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 4521 const char *name, void *opaque, 4522 Error **errp) 4523 { 4524 X86CPU *cpu = X86_CPU(obj); 4525 CPUX86State *env = &cpu->env; 4526 const int64_t min = 0; 4527 const int64_t max = 0xf; 4528 int64_t value; 4529 4530 if (!visit_type_int(v, name, &value, errp)) { 4531 return; 4532 } 4533 if (value < min || value > max) { 4534 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4535 name ? name : "null", value, min, max); 4536 return; 4537 } 4538 4539 env->cpuid_version &= ~0xf; 4540 env->cpuid_version |= value & 0xf; 4541 } 4542 4543 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 4544 { 4545 X86CPU *cpu = X86_CPU(obj); 4546 CPUX86State *env = &cpu->env; 4547 char *value; 4548 4549 value = g_malloc(CPUID_VENDOR_SZ + 1); 4550 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 4551 env->cpuid_vendor3); 4552 return value; 4553 } 4554 4555 static void x86_cpuid_set_vendor(Object *obj, const char *value, 4556 Error **errp) 4557 { 4558 X86CPU *cpu = X86_CPU(obj); 4559 CPUX86State *env = &cpu->env; 4560 int i; 4561 4562 if (strlen(value) != CPUID_VENDOR_SZ) { 4563 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 4564 return; 4565 } 4566 4567 env->cpuid_vendor1 = 0; 4568 env->cpuid_vendor2 = 0; 4569 env->cpuid_vendor3 = 0; 4570 for (i = 0; i < 4; i++) { 4571 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 4572 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 4573 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 4574 } 4575 } 4576 4577 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 4578 { 4579 X86CPU *cpu = X86_CPU(obj); 4580 CPUX86State *env = &cpu->env; 4581 char *value; 4582 int i; 4583 4584 value = g_malloc(48 + 1); 4585 for (i = 0; i < 48; i++) { 4586 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 4587 } 4588 value[48] = '\0'; 4589 return value; 4590 } 4591 4592 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 4593 Error **errp) 4594 { 4595 X86CPU *cpu = X86_CPU(obj); 4596 CPUX86State *env = &cpu->env; 4597 int c, len, i; 4598 4599 if (model_id == NULL) { 4600 model_id = ""; 4601 } 4602 len = strlen(model_id); 4603 memset(env->cpuid_model, 0, 48); 4604 for (i = 0; i < 48; i++) { 4605 if (i >= len) { 4606 c = '\0'; 4607 } else { 4608 c = (uint8_t)model_id[i]; 4609 } 4610 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 4611 } 4612 } 4613 4614 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 4615 void *opaque, Error **errp) 4616 { 4617 X86CPU *cpu = X86_CPU(obj); 4618 int64_t value; 4619 4620 value = cpu->env.tsc_khz * 1000; 4621 visit_type_int(v, name, &value, errp); 4622 } 4623 4624 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 4625 void *opaque, Error **errp) 4626 { 4627 X86CPU *cpu = X86_CPU(obj); 4628 const int64_t min = 0; 4629 const int64_t max = INT64_MAX; 4630 int64_t value; 4631 4632 if (!visit_type_int(v, name, &value, errp)) { 4633 return; 4634 } 4635 if (value < min || value > max) { 4636 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4637 name ? name : "null", value, min, max); 4638 return; 4639 } 4640 4641 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 4642 } 4643 4644 /* Generic getter for "feature-words" and "filtered-features" properties */ 4645 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 4646 const char *name, void *opaque, 4647 Error **errp) 4648 { 4649 uint64_t *array = (uint64_t *)opaque; 4650 FeatureWord w; 4651 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 4652 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 4653 X86CPUFeatureWordInfoList *list = NULL; 4654 4655 for (w = 0; w < FEATURE_WORDS; w++) { 4656 FeatureWordInfo *wi = &feature_word_info[w]; 4657 /* 4658 * We didn't have MSR features when "feature-words" was 4659 * introduced. Therefore skipped other type entries. 4660 */ 4661 if (wi->type != CPUID_FEATURE_WORD) { 4662 continue; 4663 } 4664 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 4665 qwi->cpuid_input_eax = wi->cpuid.eax; 4666 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 4667 qwi->cpuid_input_ecx = wi->cpuid.ecx; 4668 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 4669 qwi->features = array[w]; 4670 4671 /* List will be in reverse order, but order shouldn't matter */ 4672 list_entries[w].next = list; 4673 list_entries[w].value = &word_infos[w]; 4674 list = &list_entries[w]; 4675 } 4676 4677 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 4678 } 4679 4680 /* Convert all '_' in a feature string option name to '-', to make feature 4681 * name conform to QOM property naming rule, which uses '-' instead of '_'. 4682 */ 4683 static inline void feat2prop(char *s) 4684 { 4685 while ((s = strchr(s, '_'))) { 4686 *s = '-'; 4687 } 4688 } 4689 4690 /* Return the feature property name for a feature flag bit */ 4691 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 4692 { 4693 const char *name; 4694 /* XSAVE components are automatically enabled by other features, 4695 * so return the original feature name instead 4696 */ 4697 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 4698 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 4699 4700 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 4701 x86_ext_save_areas[comp].bits) { 4702 w = x86_ext_save_areas[comp].feature; 4703 bitnr = ctz32(x86_ext_save_areas[comp].bits); 4704 } 4705 } 4706 4707 assert(bitnr < 64); 4708 assert(w < FEATURE_WORDS); 4709 name = feature_word_info[w].feat_names[bitnr]; 4710 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD)); 4711 return name; 4712 } 4713 4714 /* Compatibily hack to maintain legacy +-feat semantic, 4715 * where +-feat overwrites any feature set by 4716 * feat=on|feat even if the later is parsed after +-feat 4717 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 4718 */ 4719 static GList *plus_features, *minus_features; 4720 4721 static gint compare_string(gconstpointer a, gconstpointer b) 4722 { 4723 return g_strcmp0(a, b); 4724 } 4725 4726 /* Parse "+feature,-feature,feature=foo" CPU feature string 4727 */ 4728 static void x86_cpu_parse_featurestr(const char *typename, char *features, 4729 Error **errp) 4730 { 4731 char *featurestr; /* Single 'key=value" string being parsed */ 4732 static bool cpu_globals_initialized; 4733 bool ambiguous = false; 4734 4735 if (cpu_globals_initialized) { 4736 return; 4737 } 4738 cpu_globals_initialized = true; 4739 4740 if (!features) { 4741 return; 4742 } 4743 4744 for (featurestr = strtok(features, ","); 4745 featurestr; 4746 featurestr = strtok(NULL, ",")) { 4747 const char *name; 4748 const char *val = NULL; 4749 char *eq = NULL; 4750 char num[32]; 4751 GlobalProperty *prop; 4752 4753 /* Compatibility syntax: */ 4754 if (featurestr[0] == '+') { 4755 plus_features = g_list_append(plus_features, 4756 g_strdup(featurestr + 1)); 4757 continue; 4758 } else if (featurestr[0] == '-') { 4759 minus_features = g_list_append(minus_features, 4760 g_strdup(featurestr + 1)); 4761 continue; 4762 } 4763 4764 eq = strchr(featurestr, '='); 4765 if (eq) { 4766 *eq++ = 0; 4767 val = eq; 4768 } else { 4769 val = "on"; 4770 } 4771 4772 feat2prop(featurestr); 4773 name = featurestr; 4774 4775 if (g_list_find_custom(plus_features, name, compare_string)) { 4776 warn_report("Ambiguous CPU model string. " 4777 "Don't mix both \"+%s\" and \"%s=%s\"", 4778 name, name, val); 4779 ambiguous = true; 4780 } 4781 if (g_list_find_custom(minus_features, name, compare_string)) { 4782 warn_report("Ambiguous CPU model string. " 4783 "Don't mix both \"-%s\" and \"%s=%s\"", 4784 name, name, val); 4785 ambiguous = true; 4786 } 4787 4788 /* Special case: */ 4789 if (!strcmp(name, "tsc-freq")) { 4790 int ret; 4791 uint64_t tsc_freq; 4792 4793 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 4794 if (ret < 0 || tsc_freq > INT64_MAX) { 4795 error_setg(errp, "bad numerical value %s", val); 4796 return; 4797 } 4798 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 4799 val = num; 4800 name = "tsc-frequency"; 4801 } 4802 4803 prop = g_new0(typeof(*prop), 1); 4804 prop->driver = typename; 4805 prop->property = g_strdup(name); 4806 prop->value = g_strdup(val); 4807 qdev_prop_register_global(prop); 4808 } 4809 4810 if (ambiguous) { 4811 warn_report("Compatibility of ambiguous CPU model " 4812 "strings won't be kept on future QEMU versions"); 4813 } 4814 } 4815 4816 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 4817 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); 4818 4819 /* Build a list with the name of all features on a feature word array */ 4820 static void x86_cpu_list_feature_names(FeatureWordArray features, 4821 strList **feat_names) 4822 { 4823 FeatureWord w; 4824 strList **next = feat_names; 4825 4826 for (w = 0; w < FEATURE_WORDS; w++) { 4827 uint64_t filtered = features[w]; 4828 int i; 4829 for (i = 0; i < 64; i++) { 4830 if (filtered & (1ULL << i)) { 4831 strList *new = g_new0(strList, 1); 4832 new->value = g_strdup(x86_cpu_feature_name(w, i)); 4833 *next = new; 4834 next = &new->next; 4835 } 4836 } 4837 } 4838 } 4839 4840 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 4841 const char *name, void *opaque, 4842 Error **errp) 4843 { 4844 X86CPU *xc = X86_CPU(obj); 4845 strList *result = NULL; 4846 4847 x86_cpu_list_feature_names(xc->filtered_features, &result); 4848 visit_type_strList(v, "unavailable-features", &result, errp); 4849 } 4850 4851 /* Check for missing features that may prevent the CPU class from 4852 * running using the current machine and accelerator. 4853 */ 4854 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 4855 strList **missing_feats) 4856 { 4857 X86CPU *xc; 4858 Error *err = NULL; 4859 strList **next = missing_feats; 4860 4861 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 4862 strList *new = g_new0(strList, 1); 4863 new->value = g_strdup("kvm"); 4864 *missing_feats = new; 4865 return; 4866 } 4867 4868 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 4869 4870 x86_cpu_expand_features(xc, &err); 4871 if (err) { 4872 /* Errors at x86_cpu_expand_features should never happen, 4873 * but in case it does, just report the model as not 4874 * runnable at all using the "type" property. 4875 */ 4876 strList *new = g_new0(strList, 1); 4877 new->value = g_strdup("type"); 4878 *next = new; 4879 next = &new->next; 4880 error_free(err); 4881 } 4882 4883 x86_cpu_filter_features(xc, false); 4884 4885 x86_cpu_list_feature_names(xc->filtered_features, next); 4886 4887 object_unref(OBJECT(xc)); 4888 } 4889 4890 /* Print all cpuid feature names in featureset 4891 */ 4892 static void listflags(GList *features) 4893 { 4894 size_t len = 0; 4895 GList *tmp; 4896 4897 for (tmp = features; tmp; tmp = tmp->next) { 4898 const char *name = tmp->data; 4899 if ((len + strlen(name) + 1) >= 75) { 4900 qemu_printf("\n"); 4901 len = 0; 4902 } 4903 qemu_printf("%s%s", len == 0 ? " " : " ", name); 4904 len += strlen(name) + 1; 4905 } 4906 qemu_printf("\n"); 4907 } 4908 4909 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 4910 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 4911 { 4912 ObjectClass *class_a = (ObjectClass *)a; 4913 ObjectClass *class_b = (ObjectClass *)b; 4914 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 4915 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 4916 int ret; 4917 4918 if (cc_a->ordering != cc_b->ordering) { 4919 ret = cc_a->ordering - cc_b->ordering; 4920 } else { 4921 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a); 4922 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b); 4923 ret = strcmp(name_a, name_b); 4924 } 4925 return ret; 4926 } 4927 4928 static GSList *get_sorted_cpu_model_list(void) 4929 { 4930 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 4931 list = g_slist_sort(list, x86_cpu_list_compare); 4932 return list; 4933 } 4934 4935 static char *x86_cpu_class_get_model_id(X86CPUClass *xc) 4936 { 4937 Object *obj = object_new_with_class(OBJECT_CLASS(xc)); 4938 char *r = object_property_get_str(obj, "model-id", &error_abort); 4939 object_unref(obj); 4940 return r; 4941 } 4942 4943 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) 4944 { 4945 X86CPUVersion version; 4946 4947 if (!cc->model || !cc->model->is_alias) { 4948 return NULL; 4949 } 4950 version = x86_cpu_model_resolve_version(cc->model); 4951 if (version <= 0) { 4952 return NULL; 4953 } 4954 return x86_cpu_versioned_model_name(cc->model->cpudef, version); 4955 } 4956 4957 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 4958 { 4959 ObjectClass *oc = data; 4960 X86CPUClass *cc = X86_CPU_CLASS(oc); 4961 g_autofree char *name = x86_cpu_class_get_model_name(cc); 4962 g_autofree char *desc = g_strdup(cc->model_description); 4963 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc); 4964 g_autofree char *model_id = x86_cpu_class_get_model_id(cc); 4965 4966 if (!desc && alias_of) { 4967 if (cc->model && cc->model->version == CPU_VERSION_AUTO) { 4968 desc = g_strdup("(alias configured by machine type)"); 4969 } else { 4970 desc = g_strdup_printf("(alias of %s)", alias_of); 4971 } 4972 } 4973 if (!desc && cc->model && cc->model->note) { 4974 desc = g_strdup_printf("%s [%s]", model_id, cc->model->note); 4975 } 4976 if (!desc) { 4977 desc = g_strdup_printf("%s", model_id); 4978 } 4979 4980 qemu_printf("x86 %-20s %-58s\n", name, desc); 4981 } 4982 4983 /* list available CPU models and flags */ 4984 void x86_cpu_list(void) 4985 { 4986 int i, j; 4987 GSList *list; 4988 GList *names = NULL; 4989 4990 qemu_printf("Available CPUs:\n"); 4991 list = get_sorted_cpu_model_list(); 4992 g_slist_foreach(list, x86_cpu_list_entry, NULL); 4993 g_slist_free(list); 4994 4995 names = NULL; 4996 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 4997 FeatureWordInfo *fw = &feature_word_info[i]; 4998 for (j = 0; j < 64; j++) { 4999 if (fw->feat_names[j]) { 5000 names = g_list_append(names, (gpointer)fw->feat_names[j]); 5001 } 5002 } 5003 } 5004 5005 names = g_list_sort(names, (GCompareFunc)strcmp); 5006 5007 qemu_printf("\nRecognized CPUID flags:\n"); 5008 listflags(names); 5009 qemu_printf("\n"); 5010 g_list_free(names); 5011 } 5012 5013 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 5014 { 5015 ObjectClass *oc = data; 5016 X86CPUClass *cc = X86_CPU_CLASS(oc); 5017 CpuDefinitionInfoList **cpu_list = user_data; 5018 CpuDefinitionInfo *info; 5019 5020 info = g_malloc0(sizeof(*info)); 5021 info->name = x86_cpu_class_get_model_name(cc); 5022 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 5023 info->has_unavailable_features = true; 5024 info->q_typename = g_strdup(object_class_get_name(oc)); 5025 info->migration_safe = cc->migration_safe; 5026 info->has_migration_safe = true; 5027 info->q_static = cc->static_model; 5028 if (cc->model && cc->model->cpudef->deprecation_note) { 5029 info->deprecated = true; 5030 } else { 5031 info->deprecated = false; 5032 } 5033 /* 5034 * Old machine types won't report aliases, so that alias translation 5035 * doesn't break compatibility with previous QEMU versions. 5036 */ 5037 if (default_cpu_version != CPU_VERSION_LEGACY) { 5038 info->alias_of = x86_cpu_class_get_alias_of(cc); 5039 info->has_alias_of = !!info->alias_of; 5040 } 5041 5042 QAPI_LIST_PREPEND(*cpu_list, info); 5043 } 5044 5045 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 5046 { 5047 CpuDefinitionInfoList *cpu_list = NULL; 5048 GSList *list = get_sorted_cpu_model_list(); 5049 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 5050 g_slist_free(list); 5051 return cpu_list; 5052 } 5053 5054 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 5055 bool migratable_only) 5056 { 5057 FeatureWordInfo *wi = &feature_word_info[w]; 5058 uint64_t r = 0; 5059 5060 if (kvm_enabled()) { 5061 switch (wi->type) { 5062 case CPUID_FEATURE_WORD: 5063 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 5064 wi->cpuid.ecx, 5065 wi->cpuid.reg); 5066 break; 5067 case MSR_FEATURE_WORD: 5068 r = kvm_arch_get_supported_msr_feature(kvm_state, 5069 wi->msr.index); 5070 break; 5071 } 5072 } else if (hvf_enabled()) { 5073 if (wi->type != CPUID_FEATURE_WORD) { 5074 return 0; 5075 } 5076 r = hvf_get_supported_cpuid(wi->cpuid.eax, 5077 wi->cpuid.ecx, 5078 wi->cpuid.reg); 5079 } else if (tcg_enabled()) { 5080 r = wi->tcg_features; 5081 } else { 5082 return ~0; 5083 } 5084 if (migratable_only) { 5085 r &= x86_cpu_get_migratable_flags(w); 5086 } 5087 return r; 5088 } 5089 5090 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 5091 { 5092 PropValue *pv; 5093 for (pv = props; pv->prop; pv++) { 5094 if (!pv->value) { 5095 continue; 5096 } 5097 object_property_parse(OBJECT(cpu), pv->prop, pv->value, 5098 &error_abort); 5099 } 5100 } 5101 5102 /* Apply properties for the CPU model version specified in model */ 5103 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) 5104 { 5105 const X86CPUVersionDefinition *vdef; 5106 X86CPUVersion version = x86_cpu_model_resolve_version(model); 5107 5108 if (version == CPU_VERSION_LEGACY) { 5109 return; 5110 } 5111 5112 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { 5113 PropValue *p; 5114 5115 for (p = vdef->props; p && p->prop; p++) { 5116 object_property_parse(OBJECT(cpu), p->prop, p->value, 5117 &error_abort); 5118 } 5119 5120 if (vdef->version == version) { 5121 break; 5122 } 5123 } 5124 5125 /* 5126 * If we reached the end of the list, version number was invalid 5127 */ 5128 assert(vdef->version == version); 5129 } 5130 5131 /* Load data from X86CPUDefinition into a X86CPU object 5132 */ 5133 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) 5134 { 5135 X86CPUDefinition *def = model->cpudef; 5136 CPUX86State *env = &cpu->env; 5137 const char *vendor; 5138 char host_vendor[CPUID_VENDOR_SZ + 1]; 5139 FeatureWord w; 5140 5141 /*NOTE: any property set by this function should be returned by 5142 * x86_cpu_static_props(), so static expansion of 5143 * query-cpu-model-expansion is always complete. 5144 */ 5145 5146 /* CPU models only set _minimum_ values for level/xlevel: */ 5147 object_property_set_uint(OBJECT(cpu), "min-level", def->level, 5148 &error_abort); 5149 object_property_set_uint(OBJECT(cpu), "min-xlevel", def->xlevel, 5150 &error_abort); 5151 5152 object_property_set_int(OBJECT(cpu), "family", def->family, &error_abort); 5153 object_property_set_int(OBJECT(cpu), "model", def->model, &error_abort); 5154 object_property_set_int(OBJECT(cpu), "stepping", def->stepping, 5155 &error_abort); 5156 object_property_set_str(OBJECT(cpu), "model-id", def->model_id, 5157 &error_abort); 5158 for (w = 0; w < FEATURE_WORDS; w++) { 5159 env->features[w] = def->features[w]; 5160 } 5161 5162 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 5163 cpu->legacy_cache = !def->cache_info; 5164 5165 /* Special cases not set in the X86CPUDefinition structs: */ 5166 /* TODO: in-kernel irqchip for hvf */ 5167 if (kvm_enabled()) { 5168 if (!kvm_irqchip_in_kernel()) { 5169 x86_cpu_change_kvm_default("x2apic", "off"); 5170 } else if (kvm_irqchip_is_split() && kvm_enable_x2apic()) { 5171 x86_cpu_change_kvm_default("kvm-msi-ext-dest-id", "on"); 5172 } 5173 5174 x86_cpu_apply_props(cpu, kvm_default_props); 5175 } else if (tcg_enabled()) { 5176 x86_cpu_apply_props(cpu, tcg_default_props); 5177 } 5178 5179 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 5180 5181 /* sysenter isn't supported in compatibility mode on AMD, 5182 * syscall isn't supported in compatibility mode on Intel. 5183 * Normally we advertise the actual CPU vendor, but you can 5184 * override this using the 'vendor' property if you want to use 5185 * KVM's sysenter/syscall emulation in compatibility mode and 5186 * when doing cross vendor migration 5187 */ 5188 vendor = def->vendor; 5189 if (accel_uses_host_cpuid()) { 5190 uint32_t ebx = 0, ecx = 0, edx = 0; 5191 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 5192 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 5193 vendor = host_vendor; 5194 } 5195 5196 object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); 5197 5198 x86_cpu_apply_version_props(cpu, model); 5199 5200 /* 5201 * Properties in versioned CPU model are not user specified features. 5202 * We can simply clear env->user_features here since it will be filled later 5203 * in x86_cpu_expand_features() based on plus_features and minus_features. 5204 */ 5205 memset(&env->user_features, 0, sizeof(env->user_features)); 5206 } 5207 5208 #ifndef CONFIG_USER_ONLY 5209 /* Return a QDict containing keys for all properties that can be included 5210 * in static expansion of CPU models. All properties set by x86_cpu_load_model() 5211 * must be included in the dictionary. 5212 */ 5213 static QDict *x86_cpu_static_props(void) 5214 { 5215 FeatureWord w; 5216 int i; 5217 static const char *props[] = { 5218 "min-level", 5219 "min-xlevel", 5220 "family", 5221 "model", 5222 "stepping", 5223 "model-id", 5224 "vendor", 5225 "lmce", 5226 NULL, 5227 }; 5228 static QDict *d; 5229 5230 if (d) { 5231 return d; 5232 } 5233 5234 d = qdict_new(); 5235 for (i = 0; props[i]; i++) { 5236 qdict_put_null(d, props[i]); 5237 } 5238 5239 for (w = 0; w < FEATURE_WORDS; w++) { 5240 FeatureWordInfo *fi = &feature_word_info[w]; 5241 int bit; 5242 for (bit = 0; bit < 64; bit++) { 5243 if (!fi->feat_names[bit]) { 5244 continue; 5245 } 5246 qdict_put_null(d, fi->feat_names[bit]); 5247 } 5248 } 5249 5250 return d; 5251 } 5252 5253 /* Add an entry to @props dict, with the value for property. */ 5254 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 5255 { 5256 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 5257 &error_abort); 5258 5259 qdict_put_obj(props, prop, value); 5260 } 5261 5262 /* Convert CPU model data from X86CPU object to a property dictionary 5263 * that can recreate exactly the same CPU model. 5264 */ 5265 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 5266 { 5267 QDict *sprops = x86_cpu_static_props(); 5268 const QDictEntry *e; 5269 5270 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 5271 const char *prop = qdict_entry_key(e); 5272 x86_cpu_expand_prop(cpu, props, prop); 5273 } 5274 } 5275 5276 /* Convert CPU model data from X86CPU object to a property dictionary 5277 * that can recreate exactly the same CPU model, including every 5278 * writeable QOM property. 5279 */ 5280 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 5281 { 5282 ObjectPropertyIterator iter; 5283 ObjectProperty *prop; 5284 5285 object_property_iter_init(&iter, OBJECT(cpu)); 5286 while ((prop = object_property_iter_next(&iter))) { 5287 /* skip read-only or write-only properties */ 5288 if (!prop->get || !prop->set) { 5289 continue; 5290 } 5291 5292 /* "hotplugged" is the only property that is configurable 5293 * on the command-line but will be set differently on CPUs 5294 * created using "-cpu ... -smp ..." and by CPUs created 5295 * on the fly by x86_cpu_from_model() for querying. Skip it. 5296 */ 5297 if (!strcmp(prop->name, "hotplugged")) { 5298 continue; 5299 } 5300 x86_cpu_expand_prop(cpu, props, prop->name); 5301 } 5302 } 5303 5304 static void object_apply_props(Object *obj, QDict *props, Error **errp) 5305 { 5306 const QDictEntry *prop; 5307 5308 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 5309 if (!object_property_set_qobject(obj, qdict_entry_key(prop), 5310 qdict_entry_value(prop), errp)) { 5311 break; 5312 } 5313 } 5314 } 5315 5316 /* Create X86CPU object according to model+props specification */ 5317 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 5318 { 5319 X86CPU *xc = NULL; 5320 X86CPUClass *xcc; 5321 Error *err = NULL; 5322 5323 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 5324 if (xcc == NULL) { 5325 error_setg(&err, "CPU model '%s' not found", model); 5326 goto out; 5327 } 5328 5329 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 5330 if (props) { 5331 object_apply_props(OBJECT(xc), props, &err); 5332 if (err) { 5333 goto out; 5334 } 5335 } 5336 5337 x86_cpu_expand_features(xc, &err); 5338 if (err) { 5339 goto out; 5340 } 5341 5342 out: 5343 if (err) { 5344 error_propagate(errp, err); 5345 object_unref(OBJECT(xc)); 5346 xc = NULL; 5347 } 5348 return xc; 5349 } 5350 5351 CpuModelExpansionInfo * 5352 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 5353 CpuModelInfo *model, 5354 Error **errp) 5355 { 5356 X86CPU *xc = NULL; 5357 Error *err = NULL; 5358 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 5359 QDict *props = NULL; 5360 const char *base_name; 5361 5362 xc = x86_cpu_from_model(model->name, 5363 model->has_props ? 5364 qobject_to(QDict, model->props) : 5365 NULL, &err); 5366 if (err) { 5367 goto out; 5368 } 5369 5370 props = qdict_new(); 5371 ret->model = g_new0(CpuModelInfo, 1); 5372 ret->model->props = QOBJECT(props); 5373 ret->model->has_props = true; 5374 5375 switch (type) { 5376 case CPU_MODEL_EXPANSION_TYPE_STATIC: 5377 /* Static expansion will be based on "base" only */ 5378 base_name = "base"; 5379 x86_cpu_to_dict(xc, props); 5380 break; 5381 case CPU_MODEL_EXPANSION_TYPE_FULL: 5382 /* As we don't return every single property, full expansion needs 5383 * to keep the original model name+props, and add extra 5384 * properties on top of that. 5385 */ 5386 base_name = model->name; 5387 x86_cpu_to_dict_full(xc, props); 5388 break; 5389 default: 5390 error_setg(&err, "Unsupported expansion type"); 5391 goto out; 5392 } 5393 5394 x86_cpu_to_dict(xc, props); 5395 5396 ret->model->name = g_strdup(base_name); 5397 5398 out: 5399 object_unref(OBJECT(xc)); 5400 if (err) { 5401 error_propagate(errp, err); 5402 qapi_free_CpuModelExpansionInfo(ret); 5403 ret = NULL; 5404 } 5405 return ret; 5406 } 5407 #endif /* !CONFIG_USER_ONLY */ 5408 5409 static gchar *x86_gdb_arch_name(CPUState *cs) 5410 { 5411 #ifdef TARGET_X86_64 5412 return g_strdup("i386:x86-64"); 5413 #else 5414 return g_strdup("i386"); 5415 #endif 5416 } 5417 5418 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 5419 { 5420 X86CPUModel *model = data; 5421 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5422 CPUClass *cc = CPU_CLASS(oc); 5423 5424 xcc->model = model; 5425 xcc->migration_safe = true; 5426 cc->deprecation_note = model->cpudef->deprecation_note; 5427 } 5428 5429 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) 5430 { 5431 g_autofree char *typename = x86_cpu_type_name(name); 5432 TypeInfo ti = { 5433 .name = typename, 5434 .parent = TYPE_X86_CPU, 5435 .class_init = x86_cpu_cpudef_class_init, 5436 .class_data = model, 5437 }; 5438 5439 type_register(&ti); 5440 } 5441 5442 static void x86_register_cpudef_types(X86CPUDefinition *def) 5443 { 5444 X86CPUModel *m; 5445 const X86CPUVersionDefinition *vdef; 5446 5447 /* AMD aliases are handled at runtime based on CPUID vendor, so 5448 * they shouldn't be set on the CPU model table. 5449 */ 5450 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 5451 /* catch mistakes instead of silently truncating model_id when too long */ 5452 assert(def->model_id && strlen(def->model_id) <= 48); 5453 5454 /* Unversioned model: */ 5455 m = g_new0(X86CPUModel, 1); 5456 m->cpudef = def; 5457 m->version = CPU_VERSION_AUTO; 5458 m->is_alias = true; 5459 x86_register_cpu_model_type(def->name, m); 5460 5461 /* Versioned models: */ 5462 5463 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { 5464 X86CPUModel *m = g_new0(X86CPUModel, 1); 5465 g_autofree char *name = 5466 x86_cpu_versioned_model_name(def, vdef->version); 5467 m->cpudef = def; 5468 m->version = vdef->version; 5469 m->note = vdef->note; 5470 x86_register_cpu_model_type(name, m); 5471 5472 if (vdef->alias) { 5473 X86CPUModel *am = g_new0(X86CPUModel, 1); 5474 am->cpudef = def; 5475 am->version = vdef->version; 5476 am->is_alias = true; 5477 x86_register_cpu_model_type(vdef->alias, am); 5478 } 5479 } 5480 5481 } 5482 5483 #if !defined(CONFIG_USER_ONLY) 5484 5485 void cpu_clear_apic_feature(CPUX86State *env) 5486 { 5487 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 5488 } 5489 5490 #endif /* !CONFIG_USER_ONLY */ 5491 5492 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 5493 uint32_t *eax, uint32_t *ebx, 5494 uint32_t *ecx, uint32_t *edx) 5495 { 5496 X86CPU *cpu = env_archcpu(env); 5497 CPUState *cs = env_cpu(env); 5498 uint32_t die_offset; 5499 uint32_t limit; 5500 uint32_t signature[3]; 5501 X86CPUTopoInfo topo_info; 5502 5503 topo_info.dies_per_pkg = env->nr_dies; 5504 topo_info.cores_per_die = cs->nr_cores; 5505 topo_info.threads_per_core = cs->nr_threads; 5506 5507 /* Calculate & apply limits for different index ranges */ 5508 if (index >= 0xC0000000) { 5509 limit = env->cpuid_xlevel2; 5510 } else if (index >= 0x80000000) { 5511 limit = env->cpuid_xlevel; 5512 } else if (index >= 0x40000000) { 5513 limit = 0x40000001; 5514 } else { 5515 limit = env->cpuid_level; 5516 } 5517 5518 if (index > limit) { 5519 /* Intel documentation states that invalid EAX input will 5520 * return the same information as EAX=cpuid_level 5521 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 5522 */ 5523 index = env->cpuid_level; 5524 } 5525 5526 switch(index) { 5527 case 0: 5528 *eax = env->cpuid_level; 5529 *ebx = env->cpuid_vendor1; 5530 *edx = env->cpuid_vendor2; 5531 *ecx = env->cpuid_vendor3; 5532 break; 5533 case 1: 5534 *eax = env->cpuid_version; 5535 *ebx = (cpu->apic_id << 24) | 5536 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 5537 *ecx = env->features[FEAT_1_ECX]; 5538 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 5539 *ecx |= CPUID_EXT_OSXSAVE; 5540 } 5541 *edx = env->features[FEAT_1_EDX]; 5542 if (cs->nr_cores * cs->nr_threads > 1) { 5543 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 5544 *edx |= CPUID_HT; 5545 } 5546 if (!cpu->enable_pmu) { 5547 *ecx &= ~CPUID_EXT_PDCM; 5548 } 5549 break; 5550 case 2: 5551 /* cache info: needed for Pentium Pro compatibility */ 5552 if (cpu->cache_info_passthrough) { 5553 host_cpuid(index, 0, eax, ebx, ecx, edx); 5554 break; 5555 } 5556 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 5557 *ebx = 0; 5558 if (!cpu->enable_l3_cache) { 5559 *ecx = 0; 5560 } else { 5561 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 5562 } 5563 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 5564 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 5565 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 5566 break; 5567 case 4: 5568 /* cache info: needed for Core compatibility */ 5569 if (cpu->cache_info_passthrough) { 5570 host_cpuid(index, count, eax, ebx, ecx, edx); 5571 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 5572 *eax &= ~0xFC000000; 5573 if ((*eax & 31) && cs->nr_cores > 1) { 5574 *eax |= (cs->nr_cores - 1) << 26; 5575 } 5576 } else { 5577 *eax = 0; 5578 switch (count) { 5579 case 0: /* L1 dcache info */ 5580 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 5581 1, cs->nr_cores, 5582 eax, ebx, ecx, edx); 5583 break; 5584 case 1: /* L1 icache info */ 5585 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 5586 1, cs->nr_cores, 5587 eax, ebx, ecx, edx); 5588 break; 5589 case 2: /* L2 cache info */ 5590 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 5591 cs->nr_threads, cs->nr_cores, 5592 eax, ebx, ecx, edx); 5593 break; 5594 case 3: /* L3 cache info */ 5595 die_offset = apicid_die_offset(&topo_info); 5596 if (cpu->enable_l3_cache) { 5597 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 5598 (1 << die_offset), cs->nr_cores, 5599 eax, ebx, ecx, edx); 5600 break; 5601 } 5602 /* fall through */ 5603 default: /* end of info */ 5604 *eax = *ebx = *ecx = *edx = 0; 5605 break; 5606 } 5607 } 5608 break; 5609 case 5: 5610 /* MONITOR/MWAIT Leaf */ 5611 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 5612 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 5613 *ecx = cpu->mwait.ecx; /* flags */ 5614 *edx = cpu->mwait.edx; /* mwait substates */ 5615 break; 5616 case 6: 5617 /* Thermal and Power Leaf */ 5618 *eax = env->features[FEAT_6_EAX]; 5619 *ebx = 0; 5620 *ecx = 0; 5621 *edx = 0; 5622 break; 5623 case 7: 5624 /* Structured Extended Feature Flags Enumeration Leaf */ 5625 if (count == 0) { 5626 /* Maximum ECX value for sub-leaves */ 5627 *eax = env->cpuid_level_func7; 5628 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 5629 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 5630 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 5631 *ecx |= CPUID_7_0_ECX_OSPKE; 5632 } 5633 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 5634 } else if (count == 1) { 5635 *eax = env->features[FEAT_7_1_EAX]; 5636 *ebx = 0; 5637 *ecx = 0; 5638 *edx = 0; 5639 } else { 5640 *eax = 0; 5641 *ebx = 0; 5642 *ecx = 0; 5643 *edx = 0; 5644 } 5645 break; 5646 case 9: 5647 /* Direct Cache Access Information Leaf */ 5648 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 5649 *ebx = 0; 5650 *ecx = 0; 5651 *edx = 0; 5652 break; 5653 case 0xA: 5654 /* Architectural Performance Monitoring Leaf */ 5655 if (kvm_enabled() && cpu->enable_pmu) { 5656 KVMState *s = cs->kvm_state; 5657 5658 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 5659 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 5660 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 5661 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 5662 } else if (hvf_enabled() && cpu->enable_pmu) { 5663 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 5664 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 5665 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 5666 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 5667 } else { 5668 *eax = 0; 5669 *ebx = 0; 5670 *ecx = 0; 5671 *edx = 0; 5672 } 5673 break; 5674 case 0xB: 5675 /* Extended Topology Enumeration Leaf */ 5676 if (!cpu->enable_cpuid_0xb) { 5677 *eax = *ebx = *ecx = *edx = 0; 5678 break; 5679 } 5680 5681 *ecx = count & 0xff; 5682 *edx = cpu->apic_id; 5683 5684 switch (count) { 5685 case 0: 5686 *eax = apicid_core_offset(&topo_info); 5687 *ebx = cs->nr_threads; 5688 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5689 break; 5690 case 1: 5691 *eax = apicid_pkg_offset(&topo_info); 5692 *ebx = cs->nr_cores * cs->nr_threads; 5693 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5694 break; 5695 default: 5696 *eax = 0; 5697 *ebx = 0; 5698 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5699 } 5700 5701 assert(!(*eax & ~0x1f)); 5702 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5703 break; 5704 case 0x1F: 5705 /* V2 Extended Topology Enumeration Leaf */ 5706 if (env->nr_dies < 2) { 5707 *eax = *ebx = *ecx = *edx = 0; 5708 break; 5709 } 5710 5711 *ecx = count & 0xff; 5712 *edx = cpu->apic_id; 5713 switch (count) { 5714 case 0: 5715 *eax = apicid_core_offset(&topo_info); 5716 *ebx = cs->nr_threads; 5717 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5718 break; 5719 case 1: 5720 *eax = apicid_die_offset(&topo_info); 5721 *ebx = cs->nr_cores * cs->nr_threads; 5722 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5723 break; 5724 case 2: 5725 *eax = apicid_pkg_offset(&topo_info); 5726 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; 5727 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; 5728 break; 5729 default: 5730 *eax = 0; 5731 *ebx = 0; 5732 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5733 } 5734 assert(!(*eax & ~0x1f)); 5735 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5736 break; 5737 case 0xD: { 5738 /* Processor Extended State */ 5739 *eax = 0; 5740 *ebx = 0; 5741 *ecx = 0; 5742 *edx = 0; 5743 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 5744 break; 5745 } 5746 5747 if (count == 0) { 5748 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 5749 *eax = env->features[FEAT_XSAVE_COMP_LO]; 5750 *edx = env->features[FEAT_XSAVE_COMP_HI]; 5751 /* 5752 * The initial value of xcr0 and ebx == 0, On host without kvm 5753 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 5754 * even through guest update xcr0, this will crash some legacy guest 5755 * (e.g., CentOS 6), So set ebx == ecx to workaroud it. 5756 */ 5757 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0); 5758 } else if (count == 1) { 5759 *eax = env->features[FEAT_XSAVE]; 5760 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 5761 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 5762 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 5763 *eax = esa->size; 5764 *ebx = esa->offset; 5765 } 5766 } 5767 break; 5768 } 5769 case 0x14: { 5770 /* Intel Processor Trace Enumeration */ 5771 *eax = 0; 5772 *ebx = 0; 5773 *ecx = 0; 5774 *edx = 0; 5775 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 5776 !kvm_enabled()) { 5777 break; 5778 } 5779 5780 if (count == 0) { 5781 *eax = INTEL_PT_MAX_SUBLEAF; 5782 *ebx = INTEL_PT_MINIMAL_EBX; 5783 *ecx = INTEL_PT_MINIMAL_ECX; 5784 if (env->features[FEAT_14_0_ECX] & CPUID_14_0_ECX_LIP) { 5785 *ecx |= CPUID_14_0_ECX_LIP; 5786 } 5787 } else if (count == 1) { 5788 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 5789 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 5790 } 5791 break; 5792 } 5793 case 0x40000000: 5794 /* 5795 * CPUID code in kvm_arch_init_vcpu() ignores stuff 5796 * set here, but we restrict to TCG none the less. 5797 */ 5798 if (tcg_enabled() && cpu->expose_tcg) { 5799 memcpy(signature, "TCGTCGTCGTCG", 12); 5800 *eax = 0x40000001; 5801 *ebx = signature[0]; 5802 *ecx = signature[1]; 5803 *edx = signature[2]; 5804 } else { 5805 *eax = 0; 5806 *ebx = 0; 5807 *ecx = 0; 5808 *edx = 0; 5809 } 5810 break; 5811 case 0x40000001: 5812 *eax = 0; 5813 *ebx = 0; 5814 *ecx = 0; 5815 *edx = 0; 5816 break; 5817 case 0x80000000: 5818 *eax = env->cpuid_xlevel; 5819 *ebx = env->cpuid_vendor1; 5820 *edx = env->cpuid_vendor2; 5821 *ecx = env->cpuid_vendor3; 5822 break; 5823 case 0x80000001: 5824 *eax = env->cpuid_version; 5825 *ebx = 0; 5826 *ecx = env->features[FEAT_8000_0001_ECX]; 5827 *edx = env->features[FEAT_8000_0001_EDX]; 5828 5829 /* The Linux kernel checks for the CMPLegacy bit and 5830 * discards multiple thread information if it is set. 5831 * So don't set it here for Intel to make Linux guests happy. 5832 */ 5833 if (cs->nr_cores * cs->nr_threads > 1) { 5834 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 5835 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 5836 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 5837 *ecx |= 1 << 1; /* CmpLegacy bit */ 5838 } 5839 } 5840 break; 5841 case 0x80000002: 5842 case 0x80000003: 5843 case 0x80000004: 5844 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 5845 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 5846 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 5847 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 5848 break; 5849 case 0x80000005: 5850 /* cache info (L1 cache) */ 5851 if (cpu->cache_info_passthrough) { 5852 host_cpuid(index, 0, eax, ebx, ecx, edx); 5853 break; 5854 } 5855 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | 5856 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 5857 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | 5858 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 5859 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 5860 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 5861 break; 5862 case 0x80000006: 5863 /* cache info (L2 cache) */ 5864 if (cpu->cache_info_passthrough) { 5865 host_cpuid(index, 0, eax, ebx, ecx, edx); 5866 break; 5867 } 5868 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | 5869 (L2_DTLB_2M_ENTRIES << 16) | 5870 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | 5871 (L2_ITLB_2M_ENTRIES); 5872 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | 5873 (L2_DTLB_4K_ENTRIES << 16) | 5874 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | 5875 (L2_ITLB_4K_ENTRIES); 5876 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 5877 cpu->enable_l3_cache ? 5878 env->cache_info_amd.l3_cache : NULL, 5879 ecx, edx); 5880 break; 5881 case 0x80000007: 5882 *eax = 0; 5883 *ebx = 0; 5884 *ecx = 0; 5885 *edx = env->features[FEAT_8000_0007_EDX]; 5886 break; 5887 case 0x80000008: 5888 /* virtual & phys address size in low 2 bytes. */ 5889 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5890 /* 64 bit processor */ 5891 *eax = cpu->phys_bits; /* configurable physical bits */ 5892 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 5893 *eax |= 0x00003900; /* 57 bits virtual */ 5894 } else { 5895 *eax |= 0x00003000; /* 48 bits virtual */ 5896 } 5897 } else { 5898 *eax = cpu->phys_bits; 5899 } 5900 *ebx = env->features[FEAT_8000_0008_EBX]; 5901 if (cs->nr_cores * cs->nr_threads > 1) { 5902 /* 5903 * Bits 15:12 is "The number of bits in the initial 5904 * Core::X86::Apic::ApicId[ApicId] value that indicate 5905 * thread ID within a package". 5906 * Bits 7:0 is "The number of threads in the package is NC+1" 5907 */ 5908 *ecx = (apicid_pkg_offset(&topo_info) << 12) | 5909 ((cs->nr_cores * cs->nr_threads) - 1); 5910 } else { 5911 *ecx = 0; 5912 } 5913 *edx = 0; 5914 break; 5915 case 0x8000000A: 5916 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5917 *eax = 0x00000001; /* SVM Revision */ 5918 *ebx = 0x00000010; /* nr of ASIDs */ 5919 *ecx = 0; 5920 *edx = env->features[FEAT_SVM]; /* optional features */ 5921 } else { 5922 *eax = 0; 5923 *ebx = 0; 5924 *ecx = 0; 5925 *edx = 0; 5926 } 5927 break; 5928 case 0x8000001D: 5929 *eax = 0; 5930 if (cpu->cache_info_passthrough) { 5931 host_cpuid(index, count, eax, ebx, ecx, edx); 5932 break; 5933 } 5934 switch (count) { 5935 case 0: /* L1 dcache info */ 5936 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, 5937 &topo_info, eax, ebx, ecx, edx); 5938 break; 5939 case 1: /* L1 icache info */ 5940 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, 5941 &topo_info, eax, ebx, ecx, edx); 5942 break; 5943 case 2: /* L2 cache info */ 5944 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, 5945 &topo_info, eax, ebx, ecx, edx); 5946 break; 5947 case 3: /* L3 cache info */ 5948 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, 5949 &topo_info, eax, ebx, ecx, edx); 5950 break; 5951 default: /* end of info */ 5952 *eax = *ebx = *ecx = *edx = 0; 5953 break; 5954 } 5955 break; 5956 case 0x8000001E: 5957 if (cpu->core_id <= 255) { 5958 encode_topo_cpuid8000001e(cpu, &topo_info, eax, ebx, ecx, edx); 5959 } else { 5960 *eax = 0; 5961 *ebx = 0; 5962 *ecx = 0; 5963 *edx = 0; 5964 } 5965 break; 5966 case 0xC0000000: 5967 *eax = env->cpuid_xlevel2; 5968 *ebx = 0; 5969 *ecx = 0; 5970 *edx = 0; 5971 break; 5972 case 0xC0000001: 5973 /* Support for VIA CPU's CPUID instruction */ 5974 *eax = env->cpuid_version; 5975 *ebx = 0; 5976 *ecx = 0; 5977 *edx = env->features[FEAT_C000_0001_EDX]; 5978 break; 5979 case 0xC0000002: 5980 case 0xC0000003: 5981 case 0xC0000004: 5982 /* Reserved for the future, and now filled with zero */ 5983 *eax = 0; 5984 *ebx = 0; 5985 *ecx = 0; 5986 *edx = 0; 5987 break; 5988 case 0x8000001F: 5989 *eax = sev_enabled() ? 0x2 : 0; 5990 *ebx = sev_get_cbit_position(); 5991 *ebx |= sev_get_reduced_phys_bits() << 6; 5992 *ecx = 0; 5993 *edx = 0; 5994 break; 5995 default: 5996 /* reserved values: zero */ 5997 *eax = 0; 5998 *ebx = 0; 5999 *ecx = 0; 6000 *edx = 0; 6001 break; 6002 } 6003 } 6004 6005 static void x86_cpu_reset(DeviceState *dev) 6006 { 6007 CPUState *s = CPU(dev); 6008 X86CPU *cpu = X86_CPU(s); 6009 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 6010 CPUX86State *env = &cpu->env; 6011 target_ulong cr4; 6012 uint64_t xcr0; 6013 int i; 6014 6015 xcc->parent_reset(dev); 6016 6017 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 6018 6019 env->old_exception = -1; 6020 6021 /* init to reset state */ 6022 6023 env->hflags2 |= HF2_GIF_MASK; 6024 env->hflags &= ~HF_GUEST_MASK; 6025 6026 cpu_x86_update_cr0(env, 0x60000010); 6027 env->a20_mask = ~0x0; 6028 env->smbase = 0x30000; 6029 env->msr_smi_count = 0; 6030 6031 env->idt.limit = 0xffff; 6032 env->gdt.limit = 0xffff; 6033 env->ldt.limit = 0xffff; 6034 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 6035 env->tr.limit = 0xffff; 6036 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 6037 6038 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 6039 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 6040 DESC_R_MASK | DESC_A_MASK); 6041 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 6042 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6043 DESC_A_MASK); 6044 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 6045 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6046 DESC_A_MASK); 6047 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 6048 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6049 DESC_A_MASK); 6050 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 6051 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6052 DESC_A_MASK); 6053 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 6054 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6055 DESC_A_MASK); 6056 6057 env->eip = 0xfff0; 6058 env->regs[R_EDX] = env->cpuid_version; 6059 6060 env->eflags = 0x2; 6061 6062 /* FPU init */ 6063 for (i = 0; i < 8; i++) { 6064 env->fptags[i] = 1; 6065 } 6066 cpu_set_fpuc(env, 0x37f); 6067 6068 env->mxcsr = 0x1f80; 6069 /* All units are in INIT state. */ 6070 env->xstate_bv = 0; 6071 6072 env->pat = 0x0007040600070406ULL; 6073 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 6074 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 6075 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 6076 } 6077 6078 memset(env->dr, 0, sizeof(env->dr)); 6079 env->dr[6] = DR6_FIXED_1; 6080 env->dr[7] = DR7_FIXED_1; 6081 cpu_breakpoint_remove_all(s, BP_CPU); 6082 cpu_watchpoint_remove_all(s, BP_CPU); 6083 6084 cr4 = 0; 6085 xcr0 = XSTATE_FP_MASK; 6086 6087 #ifdef CONFIG_USER_ONLY 6088 /* Enable all the features for user-mode. */ 6089 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 6090 xcr0 |= XSTATE_SSE_MASK; 6091 } 6092 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6093 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6094 if (env->features[esa->feature] & esa->bits) { 6095 xcr0 |= 1ull << i; 6096 } 6097 } 6098 6099 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 6100 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 6101 } 6102 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 6103 cr4 |= CR4_FSGSBASE_MASK; 6104 } 6105 #endif 6106 6107 env->xcr0 = xcr0; 6108 cpu_x86_update_cr4(env, cr4); 6109 6110 /* 6111 * SDM 11.11.5 requires: 6112 * - IA32_MTRR_DEF_TYPE MSR.E = 0 6113 * - IA32_MTRR_PHYSMASKn.V = 0 6114 * All other bits are undefined. For simplification, zero it all. 6115 */ 6116 env->mtrr_deftype = 0; 6117 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 6118 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 6119 6120 env->interrupt_injected = -1; 6121 env->exception_nr = -1; 6122 env->exception_pending = 0; 6123 env->exception_injected = 0; 6124 env->exception_has_payload = false; 6125 env->exception_payload = 0; 6126 env->nmi_injected = false; 6127 #if !defined(CONFIG_USER_ONLY) 6128 /* We hard-wire the BSP to the first CPU. */ 6129 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 6130 6131 s->halted = !cpu_is_bsp(cpu); 6132 6133 if (kvm_enabled()) { 6134 kvm_arch_reset_vcpu(cpu); 6135 } 6136 #endif 6137 } 6138 6139 #ifndef CONFIG_USER_ONLY 6140 bool cpu_is_bsp(X86CPU *cpu) 6141 { 6142 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 6143 } 6144 6145 /* TODO: remove me, when reset over QOM tree is implemented */ 6146 static void x86_cpu_machine_reset_cb(void *opaque) 6147 { 6148 X86CPU *cpu = opaque; 6149 cpu_reset(CPU(cpu)); 6150 } 6151 #endif 6152 6153 static void mce_init(X86CPU *cpu) 6154 { 6155 CPUX86State *cenv = &cpu->env; 6156 unsigned int bank; 6157 6158 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 6159 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 6160 (CPUID_MCE | CPUID_MCA)) { 6161 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 6162 (cpu->enable_lmce ? MCG_LMCE_P : 0); 6163 cenv->mcg_ctl = ~(uint64_t)0; 6164 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 6165 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 6166 } 6167 } 6168 } 6169 6170 #ifndef CONFIG_USER_ONLY 6171 APICCommonClass *apic_get_class(void) 6172 { 6173 const char *apic_type = "apic"; 6174 6175 /* TODO: in-kernel irqchip for hvf */ 6176 if (kvm_apic_in_kernel()) { 6177 apic_type = "kvm-apic"; 6178 } else if (xen_enabled()) { 6179 apic_type = "xen-apic"; 6180 } else if (whpx_apic_in_platform()) { 6181 apic_type = "whpx-apic"; 6182 } 6183 6184 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 6185 } 6186 6187 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 6188 { 6189 APICCommonState *apic; 6190 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 6191 6192 cpu->apic_state = DEVICE(object_new_with_class(apic_class)); 6193 6194 object_property_add_child(OBJECT(cpu), "lapic", 6195 OBJECT(cpu->apic_state)); 6196 object_unref(OBJECT(cpu->apic_state)); 6197 6198 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 6199 /* TODO: convert to link<> */ 6200 apic = APIC_COMMON(cpu->apic_state); 6201 apic->cpu = cpu; 6202 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 6203 } 6204 6205 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6206 { 6207 APICCommonState *apic; 6208 static bool apic_mmio_map_once; 6209 6210 if (cpu->apic_state == NULL) { 6211 return; 6212 } 6213 qdev_realize(DEVICE(cpu->apic_state), NULL, errp); 6214 6215 /* Map APIC MMIO area */ 6216 apic = APIC_COMMON(cpu->apic_state); 6217 if (!apic_mmio_map_once) { 6218 memory_region_add_subregion_overlap(get_system_memory(), 6219 apic->apicbase & 6220 MSR_IA32_APICBASE_BASE, 6221 &apic->io_memory, 6222 0x1000); 6223 apic_mmio_map_once = true; 6224 } 6225 } 6226 6227 static void x86_cpu_machine_done(Notifier *n, void *unused) 6228 { 6229 X86CPU *cpu = container_of(n, X86CPU, machine_done); 6230 MemoryRegion *smram = 6231 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 6232 6233 if (smram) { 6234 cpu->smram = g_new(MemoryRegion, 1); 6235 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 6236 smram, 0, 4 * GiB); 6237 memory_region_set_enabled(cpu->smram, true); 6238 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 6239 } 6240 } 6241 #else 6242 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6243 { 6244 } 6245 #endif 6246 6247 /* Note: Only safe for use on x86(-64) hosts */ 6248 static uint32_t x86_host_phys_bits(void) 6249 { 6250 uint32_t eax; 6251 uint32_t host_phys_bits; 6252 6253 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 6254 if (eax >= 0x80000008) { 6255 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 6256 /* Note: According to AMD doc 25481 rev 2.34 they have a field 6257 * at 23:16 that can specify a maximum physical address bits for 6258 * the guest that can override this value; but I've not seen 6259 * anything with that set. 6260 */ 6261 host_phys_bits = eax & 0xff; 6262 } else { 6263 /* It's an odd 64 bit machine that doesn't have the leaf for 6264 * physical address bits; fall back to 36 that's most older 6265 * Intel. 6266 */ 6267 host_phys_bits = 36; 6268 } 6269 6270 return host_phys_bits; 6271 } 6272 6273 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 6274 { 6275 if (*min < value) { 6276 *min = value; 6277 } 6278 } 6279 6280 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 6281 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 6282 { 6283 CPUX86State *env = &cpu->env; 6284 FeatureWordInfo *fi = &feature_word_info[w]; 6285 uint32_t eax = fi->cpuid.eax; 6286 uint32_t region = eax & 0xF0000000; 6287 6288 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 6289 if (!env->features[w]) { 6290 return; 6291 } 6292 6293 switch (region) { 6294 case 0x00000000: 6295 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 6296 break; 6297 case 0x80000000: 6298 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 6299 break; 6300 case 0xC0000000: 6301 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 6302 break; 6303 } 6304 6305 if (eax == 7) { 6306 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7, 6307 fi->cpuid.ecx); 6308 } 6309 } 6310 6311 /* Calculate XSAVE components based on the configured CPU feature flags */ 6312 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 6313 { 6314 CPUX86State *env = &cpu->env; 6315 int i; 6316 uint64_t mask; 6317 6318 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 6319 env->features[FEAT_XSAVE_COMP_LO] = 0; 6320 env->features[FEAT_XSAVE_COMP_HI] = 0; 6321 return; 6322 } 6323 6324 mask = 0; 6325 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6326 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6327 if (env->features[esa->feature] & esa->bits) { 6328 mask |= (1ULL << i); 6329 } 6330 } 6331 6332 env->features[FEAT_XSAVE_COMP_LO] = mask; 6333 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 6334 } 6335 6336 /***** Steps involved on loading and filtering CPUID data 6337 * 6338 * When initializing and realizing a CPU object, the steps 6339 * involved in setting up CPUID data are: 6340 * 6341 * 1) Loading CPU model definition (X86CPUDefinition). This is 6342 * implemented by x86_cpu_load_model() and should be completely 6343 * transparent, as it is done automatically by instance_init. 6344 * No code should need to look at X86CPUDefinition structs 6345 * outside instance_init. 6346 * 6347 * 2) CPU expansion. This is done by realize before CPUID 6348 * filtering, and will make sure host/accelerator data is 6349 * loaded for CPU models that depend on host capabilities 6350 * (e.g. "host"). Done by x86_cpu_expand_features(). 6351 * 6352 * 3) CPUID filtering. This initializes extra data related to 6353 * CPUID, and checks if the host supports all capabilities 6354 * required by the CPU. Runnability of a CPU model is 6355 * determined at this step. Done by x86_cpu_filter_features(). 6356 * 6357 * Some operations don't require all steps to be performed. 6358 * More precisely: 6359 * 6360 * - CPU instance creation (instance_init) will run only CPU 6361 * model loading. CPU expansion can't run at instance_init-time 6362 * because host/accelerator data may be not available yet. 6363 * - CPU realization will perform both CPU model expansion and CPUID 6364 * filtering, and return an error in case one of them fails. 6365 * - query-cpu-definitions needs to run all 3 steps. It needs 6366 * to run CPUID filtering, as the 'unavailable-features' 6367 * field is set based on the filtering results. 6368 * - The query-cpu-model-expansion QMP command only needs to run 6369 * CPU model loading and CPU expansion. It should not filter 6370 * any CPUID data based on host capabilities. 6371 */ 6372 6373 /* Expand CPU configuration data, based on configured features 6374 * and host/accelerator capabilities when appropriate. 6375 */ 6376 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 6377 { 6378 CPUX86State *env = &cpu->env; 6379 FeatureWord w; 6380 int i; 6381 GList *l; 6382 6383 for (l = plus_features; l; l = l->next) { 6384 const char *prop = l->data; 6385 if (!object_property_set_bool(OBJECT(cpu), prop, true, errp)) { 6386 return; 6387 } 6388 } 6389 6390 for (l = minus_features; l; l = l->next) { 6391 const char *prop = l->data; 6392 if (!object_property_set_bool(OBJECT(cpu), prop, false, errp)) { 6393 return; 6394 } 6395 } 6396 6397 /*TODO: Now cpu->max_features doesn't overwrite features 6398 * set using QOM properties, and we can convert 6399 * plus_features & minus_features to global properties 6400 * inside x86_cpu_parse_featurestr() too. 6401 */ 6402 if (cpu->max_features) { 6403 for (w = 0; w < FEATURE_WORDS; w++) { 6404 /* Override only features that weren't set explicitly 6405 * by the user. 6406 */ 6407 env->features[w] |= 6408 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 6409 ~env->user_features[w] & 6410 ~feature_word_info[w].no_autoenable_flags; 6411 } 6412 } 6413 6414 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) { 6415 FeatureDep *d = &feature_dependencies[i]; 6416 if (!(env->features[d->from.index] & d->from.mask)) { 6417 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask; 6418 6419 /* Not an error unless the dependent feature was added explicitly. */ 6420 mark_unavailable_features(cpu, d->to.index, 6421 unavailable_features & env->user_features[d->to.index], 6422 "This feature depends on other features that were not requested"); 6423 6424 env->features[d->to.index] &= ~unavailable_features; 6425 } 6426 } 6427 6428 if (!kvm_enabled() || !cpu->expose_kvm) { 6429 env->features[FEAT_KVM] = 0; 6430 } 6431 6432 x86_cpu_enable_xsave_components(cpu); 6433 6434 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 6435 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 6436 if (cpu->full_cpuid_auto_level) { 6437 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 6438 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 6439 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 6440 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 6441 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); 6442 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 6443 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 6444 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 6445 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 6446 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 6447 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 6448 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 6449 6450 /* Intel Processor Trace requires CPUID[0x14] */ 6451 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) { 6452 if (cpu->intel_pt_auto_level) { 6453 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 6454 } else if (cpu->env.cpuid_min_level < 0x14) { 6455 mark_unavailable_features(cpu, FEAT_7_0_EBX, 6456 CPUID_7_0_EBX_INTEL_PT, 6457 "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,min-level=0x14\""); 6458 } 6459 } 6460 6461 /* CPU topology with multi-dies support requires CPUID[0x1F] */ 6462 if (env->nr_dies > 1) { 6463 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); 6464 } 6465 6466 /* SVM requires CPUID[0x8000000A] */ 6467 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 6468 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 6469 } 6470 6471 /* SEV requires CPUID[0x8000001F] */ 6472 if (sev_enabled()) { 6473 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 6474 } 6475 } 6476 6477 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 6478 if (env->cpuid_level_func7 == UINT32_MAX) { 6479 env->cpuid_level_func7 = env->cpuid_min_level_func7; 6480 } 6481 if (env->cpuid_level == UINT32_MAX) { 6482 env->cpuid_level = env->cpuid_min_level; 6483 } 6484 if (env->cpuid_xlevel == UINT32_MAX) { 6485 env->cpuid_xlevel = env->cpuid_min_xlevel; 6486 } 6487 if (env->cpuid_xlevel2 == UINT32_MAX) { 6488 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 6489 } 6490 } 6491 6492 /* 6493 * Finishes initialization of CPUID data, filters CPU feature 6494 * words based on host availability of each feature. 6495 * 6496 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 6497 */ 6498 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) 6499 { 6500 CPUX86State *env = &cpu->env; 6501 FeatureWord w; 6502 const char *prefix = NULL; 6503 6504 if (verbose) { 6505 prefix = accel_uses_host_cpuid() 6506 ? "host doesn't support requested feature" 6507 : "TCG doesn't support requested feature"; 6508 } 6509 6510 for (w = 0; w < FEATURE_WORDS; w++) { 6511 uint64_t host_feat = 6512 x86_cpu_get_supported_feature_word(w, false); 6513 uint64_t requested_features = env->features[w]; 6514 uint64_t unavailable_features = requested_features & ~host_feat; 6515 mark_unavailable_features(cpu, w, unavailable_features, prefix); 6516 } 6517 6518 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 6519 kvm_enabled()) { 6520 KVMState *s = CPU(cpu)->kvm_state; 6521 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 6522 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 6523 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 6524 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 6525 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 6526 6527 if (!eax_0 || 6528 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 6529 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 6530 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 6531 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 6532 INTEL_PT_ADDR_RANGES_NUM) || 6533 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 6534 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 6535 ((ecx_0 & CPUID_14_0_ECX_LIP) != 6536 (env->features[FEAT_14_0_ECX] & CPUID_14_0_ECX_LIP))) { 6537 /* 6538 * Processor Trace capabilities aren't configurable, so if the 6539 * host can't emulate the capabilities we report on 6540 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 6541 */ 6542 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix); 6543 } 6544 } 6545 } 6546 6547 static void x86_cpu_hyperv_realize(X86CPU *cpu) 6548 { 6549 size_t len; 6550 6551 /* Hyper-V vendor id */ 6552 if (!cpu->hyperv_vendor) { 6553 memcpy(cpu->hyperv_vendor_id, "Microsoft Hv", 12); 6554 } else { 6555 len = strlen(cpu->hyperv_vendor); 6556 6557 if (len > 12) { 6558 warn_report("hv-vendor-id truncated to 12 characters"); 6559 len = 12; 6560 } 6561 memset(cpu->hyperv_vendor_id, 0, 12); 6562 memcpy(cpu->hyperv_vendor_id, cpu->hyperv_vendor, len); 6563 } 6564 6565 /* 'Hv#1' interface identification*/ 6566 cpu->hyperv_interface_id[0] = 0x31237648; 6567 cpu->hyperv_interface_id[1] = 0; 6568 cpu->hyperv_interface_id[2] = 0; 6569 cpu->hyperv_interface_id[3] = 0; 6570 6571 /* Hypervisor system identity */ 6572 cpu->hyperv_version_id[0] = 0x00001bbc; 6573 cpu->hyperv_version_id[1] = 0x00060001; 6574 6575 /* Hypervisor implementation limits */ 6576 cpu->hyperv_limits[0] = 64; 6577 cpu->hyperv_limits[1] = 0; 6578 cpu->hyperv_limits[2] = 0; 6579 } 6580 6581 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 6582 { 6583 CPUState *cs = CPU(dev); 6584 X86CPU *cpu = X86_CPU(dev); 6585 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6586 CPUX86State *env = &cpu->env; 6587 Error *local_err = NULL; 6588 static bool ht_warned; 6589 6590 if (xcc->host_cpuid_required) { 6591 if (!accel_uses_host_cpuid()) { 6592 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6593 error_setg(&local_err, "CPU model '%s' requires KVM", name); 6594 goto out; 6595 } 6596 } 6597 6598 if (cpu->max_features && accel_uses_host_cpuid()) { 6599 if (enable_cpu_pm) { 6600 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 6601 &cpu->mwait.ecx, &cpu->mwait.edx); 6602 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 6603 if (kvm_enabled() && kvm_has_waitpkg()) { 6604 env->features[FEAT_7_0_ECX] |= CPUID_7_0_ECX_WAITPKG; 6605 } 6606 } 6607 if (kvm_enabled() && cpu->ucode_rev == 0) { 6608 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state, 6609 MSR_IA32_UCODE_REV); 6610 } 6611 } 6612 6613 if (cpu->ucode_rev == 0) { 6614 /* The default is the same as KVM's. */ 6615 if (IS_AMD_CPU(env)) { 6616 cpu->ucode_rev = 0x01000065; 6617 } else { 6618 cpu->ucode_rev = 0x100000000ULL; 6619 } 6620 } 6621 6622 /* mwait extended info: needed for Core compatibility */ 6623 /* We always wake on interrupt even if host does not have the capability */ 6624 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 6625 6626 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 6627 error_setg(errp, "apic-id property was not initialized properly"); 6628 return; 6629 } 6630 6631 x86_cpu_expand_features(cpu, &local_err); 6632 if (local_err) { 6633 goto out; 6634 } 6635 6636 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); 6637 6638 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { 6639 error_setg(&local_err, 6640 accel_uses_host_cpuid() ? 6641 "Host doesn't support requested features" : 6642 "TCG doesn't support requested features"); 6643 goto out; 6644 } 6645 6646 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 6647 * CPUID[1].EDX. 6648 */ 6649 if (IS_AMD_CPU(env)) { 6650 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 6651 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 6652 & CPUID_EXT2_AMD_ALIASES); 6653 } 6654 6655 /* For 64bit systems think about the number of physical bits to present. 6656 * ideally this should be the same as the host; anything other than matching 6657 * the host can cause incorrect guest behaviour. 6658 * QEMU used to pick the magic value of 40 bits that corresponds to 6659 * consumer AMD devices but nothing else. 6660 */ 6661 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 6662 if (accel_uses_host_cpuid()) { 6663 uint32_t host_phys_bits = x86_host_phys_bits(); 6664 static bool warned; 6665 6666 /* Print a warning if the user set it to a value that's not the 6667 * host value. 6668 */ 6669 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 6670 !warned) { 6671 warn_report("Host physical bits (%u)" 6672 " does not match phys-bits property (%u)", 6673 host_phys_bits, cpu->phys_bits); 6674 warned = true; 6675 } 6676 6677 if (cpu->host_phys_bits) { 6678 /* The user asked for us to use the host physical bits */ 6679 cpu->phys_bits = host_phys_bits; 6680 if (cpu->host_phys_bits_limit && 6681 cpu->phys_bits > cpu->host_phys_bits_limit) { 6682 cpu->phys_bits = cpu->host_phys_bits_limit; 6683 } 6684 } 6685 6686 if (cpu->phys_bits && 6687 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 6688 cpu->phys_bits < 32)) { 6689 error_setg(errp, "phys-bits should be between 32 and %u " 6690 " (but is %u)", 6691 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 6692 return; 6693 } 6694 } else { 6695 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 6696 error_setg(errp, "TCG only supports phys-bits=%u", 6697 TCG_PHYS_ADDR_BITS); 6698 return; 6699 } 6700 } 6701 /* 0 means it was not explicitly set by the user (or by machine 6702 * compat_props or by the host code above). In this case, the default 6703 * is the value used by TCG (40). 6704 */ 6705 if (cpu->phys_bits == 0) { 6706 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 6707 } 6708 } else { 6709 /* For 32 bit systems don't use the user set value, but keep 6710 * phys_bits consistent with what we tell the guest. 6711 */ 6712 if (cpu->phys_bits != 0) { 6713 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 6714 return; 6715 } 6716 6717 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 6718 cpu->phys_bits = 36; 6719 } else { 6720 cpu->phys_bits = 32; 6721 } 6722 } 6723 6724 /* Cache information initialization */ 6725 if (!cpu->legacy_cache) { 6726 if (!xcc->model || !xcc->model->cpudef->cache_info) { 6727 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6728 error_setg(errp, 6729 "CPU model '%s' doesn't support legacy-cache=off", name); 6730 return; 6731 } 6732 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 6733 *xcc->model->cpudef->cache_info; 6734 } else { 6735 /* Build legacy cache information */ 6736 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 6737 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 6738 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 6739 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 6740 6741 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 6742 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 6743 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 6744 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 6745 6746 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 6747 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 6748 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 6749 env->cache_info_amd.l3_cache = &legacy_l3_cache; 6750 } 6751 6752 /* Process Hyper-V enlightenments */ 6753 x86_cpu_hyperv_realize(cpu); 6754 6755 cpu_exec_realizefn(cs, &local_err); 6756 if (local_err != NULL) { 6757 error_propagate(errp, local_err); 6758 return; 6759 } 6760 6761 #ifndef CONFIG_USER_ONLY 6762 MachineState *ms = MACHINE(qdev_get_machine()); 6763 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 6764 6765 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { 6766 x86_cpu_apic_create(cpu, &local_err); 6767 if (local_err != NULL) { 6768 goto out; 6769 } 6770 } 6771 #endif 6772 6773 mce_init(cpu); 6774 6775 #ifndef CONFIG_USER_ONLY 6776 if (tcg_enabled()) { 6777 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 6778 cpu->cpu_as_root = g_new(MemoryRegion, 1); 6779 6780 /* Outer container... */ 6781 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 6782 memory_region_set_enabled(cpu->cpu_as_root, true); 6783 6784 /* ... with two regions inside: normal system memory with low 6785 * priority, and... 6786 */ 6787 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 6788 get_system_memory(), 0, ~0ull); 6789 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 6790 memory_region_set_enabled(cpu->cpu_as_mem, true); 6791 6792 cs->num_ases = 2; 6793 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 6794 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 6795 6796 /* ... SMRAM with higher priority, linked from /machine/smram. */ 6797 cpu->machine_done.notify = x86_cpu_machine_done; 6798 qemu_add_machine_init_done_notifier(&cpu->machine_done); 6799 } 6800 #endif 6801 6802 qemu_init_vcpu(cs); 6803 6804 /* 6805 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 6806 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 6807 * based on inputs (sockets,cores,threads), it is still better to give 6808 * users a warning. 6809 * 6810 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 6811 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 6812 */ 6813 if (IS_AMD_CPU(env) && 6814 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 6815 cs->nr_threads > 1 && !ht_warned) { 6816 warn_report("This family of AMD CPU doesn't support " 6817 "hyperthreading(%d)", 6818 cs->nr_threads); 6819 error_printf("Please configure -smp options properly" 6820 " or try enabling topoext feature.\n"); 6821 ht_warned = true; 6822 } 6823 6824 x86_cpu_apic_realize(cpu, &local_err); 6825 if (local_err != NULL) { 6826 goto out; 6827 } 6828 cpu_reset(cs); 6829 6830 xcc->parent_realize(dev, &local_err); 6831 6832 out: 6833 if (local_err != NULL) { 6834 error_propagate(errp, local_err); 6835 return; 6836 } 6837 } 6838 6839 static void x86_cpu_unrealizefn(DeviceState *dev) 6840 { 6841 X86CPU *cpu = X86_CPU(dev); 6842 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6843 6844 #ifndef CONFIG_USER_ONLY 6845 cpu_remove_sync(CPU(dev)); 6846 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 6847 #endif 6848 6849 if (cpu->apic_state) { 6850 object_unparent(OBJECT(cpu->apic_state)); 6851 cpu->apic_state = NULL; 6852 } 6853 6854 xcc->parent_unrealize(dev); 6855 } 6856 6857 typedef struct BitProperty { 6858 FeatureWord w; 6859 uint64_t mask; 6860 } BitProperty; 6861 6862 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 6863 void *opaque, Error **errp) 6864 { 6865 X86CPU *cpu = X86_CPU(obj); 6866 BitProperty *fp = opaque; 6867 uint64_t f = cpu->env.features[fp->w]; 6868 bool value = (f & fp->mask) == fp->mask; 6869 visit_type_bool(v, name, &value, errp); 6870 } 6871 6872 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 6873 void *opaque, Error **errp) 6874 { 6875 DeviceState *dev = DEVICE(obj); 6876 X86CPU *cpu = X86_CPU(obj); 6877 BitProperty *fp = opaque; 6878 bool value; 6879 6880 if (dev->realized) { 6881 qdev_prop_set_after_realize(dev, name, errp); 6882 return; 6883 } 6884 6885 if (!visit_type_bool(v, name, &value, errp)) { 6886 return; 6887 } 6888 6889 if (value) { 6890 cpu->env.features[fp->w] |= fp->mask; 6891 } else { 6892 cpu->env.features[fp->w] &= ~fp->mask; 6893 } 6894 cpu->env.user_features[fp->w] |= fp->mask; 6895 } 6896 6897 /* Register a boolean property to get/set a single bit in a uint32_t field. 6898 * 6899 * The same property name can be registered multiple times to make it affect 6900 * multiple bits in the same FeatureWord. In that case, the getter will return 6901 * true only if all bits are set. 6902 */ 6903 static void x86_cpu_register_bit_prop(X86CPUClass *xcc, 6904 const char *prop_name, 6905 FeatureWord w, 6906 int bitnr) 6907 { 6908 ObjectClass *oc = OBJECT_CLASS(xcc); 6909 BitProperty *fp; 6910 ObjectProperty *op; 6911 uint64_t mask = (1ULL << bitnr); 6912 6913 op = object_class_property_find(oc, prop_name); 6914 if (op) { 6915 fp = op->opaque; 6916 assert(fp->w == w); 6917 fp->mask |= mask; 6918 } else { 6919 fp = g_new0(BitProperty, 1); 6920 fp->w = w; 6921 fp->mask = mask; 6922 object_class_property_add(oc, prop_name, "bool", 6923 x86_cpu_get_bit_prop, 6924 x86_cpu_set_bit_prop, 6925 NULL, fp); 6926 } 6927 } 6928 6929 static void x86_cpu_register_feature_bit_props(X86CPUClass *xcc, 6930 FeatureWord w, 6931 int bitnr) 6932 { 6933 FeatureWordInfo *fi = &feature_word_info[w]; 6934 const char *name = fi->feat_names[bitnr]; 6935 6936 if (!name) { 6937 return; 6938 } 6939 6940 /* Property names should use "-" instead of "_". 6941 * Old names containing underscores are registered as aliases 6942 * using object_property_add_alias() 6943 */ 6944 assert(!strchr(name, '_')); 6945 /* aliases don't use "|" delimiters anymore, they are registered 6946 * manually using object_property_add_alias() */ 6947 assert(!strchr(name, '|')); 6948 x86_cpu_register_bit_prop(xcc, name, w, bitnr); 6949 } 6950 6951 #if !defined(CONFIG_USER_ONLY) 6952 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 6953 { 6954 X86CPU *cpu = X86_CPU(cs); 6955 CPUX86State *env = &cpu->env; 6956 GuestPanicInformation *panic_info = NULL; 6957 6958 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 6959 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 6960 6961 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 6962 6963 assert(HV_CRASH_PARAMS >= 5); 6964 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 6965 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 6966 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 6967 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 6968 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 6969 } 6970 6971 return panic_info; 6972 } 6973 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 6974 const char *name, void *opaque, 6975 Error **errp) 6976 { 6977 CPUState *cs = CPU(obj); 6978 GuestPanicInformation *panic_info; 6979 6980 if (!cs->crash_occurred) { 6981 error_setg(errp, "No crash occured"); 6982 return; 6983 } 6984 6985 panic_info = x86_cpu_get_crash_info(cs); 6986 if (panic_info == NULL) { 6987 error_setg(errp, "No crash information"); 6988 return; 6989 } 6990 6991 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 6992 errp); 6993 qapi_free_GuestPanicInformation(panic_info); 6994 } 6995 #endif /* !CONFIG_USER_ONLY */ 6996 6997 static void x86_cpu_initfn(Object *obj) 6998 { 6999 X86CPU *cpu = X86_CPU(obj); 7000 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 7001 CPUX86State *env = &cpu->env; 7002 7003 env->nr_dies = 1; 7004 cpu_set_cpustate_pointers(cpu); 7005 7006 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 7007 x86_cpu_get_feature_words, 7008 NULL, NULL, (void *)env->features); 7009 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 7010 x86_cpu_get_feature_words, 7011 NULL, NULL, (void *)cpu->filtered_features); 7012 7013 object_property_add_alias(obj, "sse3", obj, "pni"); 7014 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq"); 7015 object_property_add_alias(obj, "sse4-1", obj, "sse4.1"); 7016 object_property_add_alias(obj, "sse4-2", obj, "sse4.2"); 7017 object_property_add_alias(obj, "xd", obj, "nx"); 7018 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt"); 7019 object_property_add_alias(obj, "i64", obj, "lm"); 7020 7021 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl"); 7022 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust"); 7023 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt"); 7024 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm"); 7025 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy"); 7026 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr"); 7027 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core"); 7028 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb"); 7029 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay"); 7030 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu"); 7031 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf"); 7032 object_property_add_alias(obj, "kvm_asyncpf_int", obj, "kvm-asyncpf-int"); 7033 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time"); 7034 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi"); 7035 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt"); 7036 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control"); 7037 object_property_add_alias(obj, "svm_lock", obj, "svm-lock"); 7038 object_property_add_alias(obj, "nrip_save", obj, "nrip-save"); 7039 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale"); 7040 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean"); 7041 object_property_add_alias(obj, "pause_filter", obj, "pause-filter"); 7042 object_property_add_alias(obj, "sse4_1", obj, "sse4.1"); 7043 object_property_add_alias(obj, "sse4_2", obj, "sse4.2"); 7044 7045 if (xcc->model) { 7046 x86_cpu_load_model(cpu, xcc->model); 7047 } 7048 } 7049 7050 static int64_t x86_cpu_get_arch_id(CPUState *cs) 7051 { 7052 X86CPU *cpu = X86_CPU(cs); 7053 7054 return cpu->apic_id; 7055 } 7056 7057 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 7058 { 7059 X86CPU *cpu = X86_CPU(cs); 7060 7061 return cpu->env.cr[0] & CR0_PG_MASK; 7062 } 7063 7064 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 7065 { 7066 X86CPU *cpu = X86_CPU(cs); 7067 7068 cpu->env.eip = value; 7069 } 7070 7071 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 7072 { 7073 X86CPU *cpu = X86_CPU(cs); 7074 CPUX86State *env = &cpu->env; 7075 7076 #if !defined(CONFIG_USER_ONLY) 7077 if (interrupt_request & CPU_INTERRUPT_POLL) { 7078 return CPU_INTERRUPT_POLL; 7079 } 7080 #endif 7081 if (interrupt_request & CPU_INTERRUPT_SIPI) { 7082 return CPU_INTERRUPT_SIPI; 7083 } 7084 7085 if (env->hflags2 & HF2_GIF_MASK) { 7086 if ((interrupt_request & CPU_INTERRUPT_SMI) && 7087 !(env->hflags & HF_SMM_MASK)) { 7088 return CPU_INTERRUPT_SMI; 7089 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 7090 !(env->hflags2 & HF2_NMI_MASK)) { 7091 return CPU_INTERRUPT_NMI; 7092 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 7093 return CPU_INTERRUPT_MCE; 7094 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 7095 (((env->hflags2 & HF2_VINTR_MASK) && 7096 (env->hflags2 & HF2_HIF_MASK)) || 7097 (!(env->hflags2 & HF2_VINTR_MASK) && 7098 (env->eflags & IF_MASK && 7099 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 7100 return CPU_INTERRUPT_HARD; 7101 #if !defined(CONFIG_USER_ONLY) 7102 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 7103 (env->eflags & IF_MASK) && 7104 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 7105 return CPU_INTERRUPT_VIRQ; 7106 #endif 7107 } 7108 } 7109 7110 return 0; 7111 } 7112 7113 static bool x86_cpu_has_work(CPUState *cs) 7114 { 7115 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 7116 } 7117 7118 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 7119 { 7120 X86CPU *cpu = X86_CPU(cs); 7121 CPUX86State *env = &cpu->env; 7122 7123 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 7124 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 7125 : bfd_mach_i386_i8086); 7126 info->print_insn = print_insn_i386; 7127 7128 info->cap_arch = CS_ARCH_X86; 7129 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 7130 : env->hflags & HF_CS32_MASK ? CS_MODE_32 7131 : CS_MODE_16); 7132 info->cap_insn_unit = 1; 7133 info->cap_insn_split = 8; 7134 } 7135 7136 void x86_update_hflags(CPUX86State *env) 7137 { 7138 uint32_t hflags; 7139 #define HFLAG_COPY_MASK \ 7140 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 7141 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 7142 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 7143 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 7144 7145 hflags = env->hflags & HFLAG_COPY_MASK; 7146 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 7147 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 7148 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 7149 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 7150 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 7151 7152 if (env->cr[4] & CR4_OSFXSR_MASK) { 7153 hflags |= HF_OSFXSR_MASK; 7154 } 7155 7156 if (env->efer & MSR_EFER_LMA) { 7157 hflags |= HF_LMA_MASK; 7158 } 7159 7160 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 7161 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 7162 } else { 7163 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 7164 (DESC_B_SHIFT - HF_CS32_SHIFT); 7165 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 7166 (DESC_B_SHIFT - HF_SS32_SHIFT); 7167 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 7168 !(hflags & HF_CS32_MASK)) { 7169 hflags |= HF_ADDSEG_MASK; 7170 } else { 7171 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 7172 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 7173 } 7174 } 7175 env->hflags = hflags; 7176 } 7177 7178 static Property x86_cpu_properties[] = { 7179 #ifdef CONFIG_USER_ONLY 7180 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 7181 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 7182 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 7183 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 7184 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0), 7185 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 7186 #else 7187 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 7188 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 7189 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 7190 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1), 7191 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 7192 #endif 7193 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 7194 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 7195 7196 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, 7197 HYPERV_SPINLOCK_NEVER_NOTIFY), 7198 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 7199 HYPERV_FEAT_RELAXED, 0), 7200 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 7201 HYPERV_FEAT_VAPIC, 0), 7202 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 7203 HYPERV_FEAT_TIME, 0), 7204 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 7205 HYPERV_FEAT_CRASH, 0), 7206 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 7207 HYPERV_FEAT_RESET, 0), 7208 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 7209 HYPERV_FEAT_VPINDEX, 0), 7210 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 7211 HYPERV_FEAT_RUNTIME, 0), 7212 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 7213 HYPERV_FEAT_SYNIC, 0), 7214 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 7215 HYPERV_FEAT_STIMER, 0), 7216 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 7217 HYPERV_FEAT_FREQUENCIES, 0), 7218 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 7219 HYPERV_FEAT_REENLIGHTENMENT, 0), 7220 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 7221 HYPERV_FEAT_TLBFLUSH, 0), 7222 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 7223 HYPERV_FEAT_EVMCS, 0), 7224 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 7225 HYPERV_FEAT_IPI, 0), 7226 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 7227 HYPERV_FEAT_STIMER_DIRECT, 0), 7228 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, 7229 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), 7230 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 7231 7232 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 7233 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 7234 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false), 7235 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 7236 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 7237 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 7238 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 7239 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 7240 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7, 7241 UINT32_MAX), 7242 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 7243 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 7244 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 7245 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 7246 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 7247 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 7248 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0), 7249 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 7250 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor), 7251 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 7252 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 7253 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 7254 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 7255 false), 7256 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 7257 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 7258 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 7259 true), 7260 /* 7261 * lecacy_cache defaults to true unless the CPU model provides its 7262 * own cache information (see x86_cpu_load_def()). 7263 */ 7264 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 7265 7266 /* 7267 * From "Requirements for Implementing the Microsoft 7268 * Hypervisor Interface": 7269 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 7270 * 7271 * "Starting with Windows Server 2012 and Windows 8, if 7272 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 7273 * the hypervisor imposes no specific limit to the number of VPs. 7274 * In this case, Windows Server 2012 guest VMs may use more than 7275 * 64 VPs, up to the maximum supported number of processors applicable 7276 * to the specific Windows version being used." 7277 */ 7278 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 7279 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 7280 false), 7281 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 7282 true), 7283 DEFINE_PROP_END_OF_LIST() 7284 }; 7285 7286 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 7287 { 7288 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7289 CPUClass *cc = CPU_CLASS(oc); 7290 DeviceClass *dc = DEVICE_CLASS(oc); 7291 FeatureWord w; 7292 7293 device_class_set_parent_realize(dc, x86_cpu_realizefn, 7294 &xcc->parent_realize); 7295 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 7296 &xcc->parent_unrealize); 7297 device_class_set_props(dc, x86_cpu_properties); 7298 7299 device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset); 7300 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 7301 7302 cc->class_by_name = x86_cpu_class_by_name; 7303 cc->parse_features = x86_cpu_parse_featurestr; 7304 cc->has_work = x86_cpu_has_work; 7305 7306 #ifdef CONFIG_TCG 7307 tcg_cpu_common_class_init(cc); 7308 #endif /* CONFIG_TCG */ 7309 7310 cc->dump_state = x86_cpu_dump_state; 7311 cc->set_pc = x86_cpu_set_pc; 7312 cc->gdb_read_register = x86_cpu_gdb_read_register; 7313 cc->gdb_write_register = x86_cpu_gdb_write_register; 7314 cc->get_arch_id = x86_cpu_get_arch_id; 7315 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 7316 7317 #ifndef CONFIG_USER_ONLY 7318 cc->asidx_from_attrs = x86_asidx_from_attrs; 7319 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 7320 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug; 7321 cc->get_crash_info = x86_cpu_get_crash_info; 7322 cc->write_elf64_note = x86_cpu_write_elf64_note; 7323 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 7324 cc->write_elf32_note = x86_cpu_write_elf32_note; 7325 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 7326 cc->vmsd = &vmstate_x86_cpu; 7327 #endif /* !CONFIG_USER_ONLY */ 7328 7329 cc->gdb_arch_name = x86_gdb_arch_name; 7330 #ifdef TARGET_X86_64 7331 cc->gdb_core_xml_file = "i386-64bit.xml"; 7332 cc->gdb_num_core_regs = 66; 7333 #else 7334 cc->gdb_core_xml_file = "i386-32bit.xml"; 7335 cc->gdb_num_core_regs = 50; 7336 #endif 7337 cc->disas_set_info = x86_disas_set_info; 7338 7339 dc->user_creatable = true; 7340 7341 object_class_property_add(oc, "family", "int", 7342 x86_cpuid_version_get_family, 7343 x86_cpuid_version_set_family, NULL, NULL); 7344 object_class_property_add(oc, "model", "int", 7345 x86_cpuid_version_get_model, 7346 x86_cpuid_version_set_model, NULL, NULL); 7347 object_class_property_add(oc, "stepping", "int", 7348 x86_cpuid_version_get_stepping, 7349 x86_cpuid_version_set_stepping, NULL, NULL); 7350 object_class_property_add_str(oc, "vendor", 7351 x86_cpuid_get_vendor, 7352 x86_cpuid_set_vendor); 7353 object_class_property_add_str(oc, "model-id", 7354 x86_cpuid_get_model_id, 7355 x86_cpuid_set_model_id); 7356 object_class_property_add(oc, "tsc-frequency", "int", 7357 x86_cpuid_get_tsc_freq, 7358 x86_cpuid_set_tsc_freq, NULL, NULL); 7359 /* 7360 * The "unavailable-features" property has the same semantics as 7361 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 7362 * QMP command: they list the features that would have prevented the 7363 * CPU from running if the "enforce" flag was set. 7364 */ 7365 object_class_property_add(oc, "unavailable-features", "strList", 7366 x86_cpu_get_unavailable_features, 7367 NULL, NULL, NULL); 7368 7369 #if !defined(CONFIG_USER_ONLY) 7370 object_class_property_add(oc, "crash-information", "GuestPanicInformation", 7371 x86_cpu_get_crash_info_qom, NULL, NULL, NULL); 7372 #endif 7373 7374 for (w = 0; w < FEATURE_WORDS; w++) { 7375 int bitnr; 7376 for (bitnr = 0; bitnr < 64; bitnr++) { 7377 x86_cpu_register_feature_bit_props(xcc, w, bitnr); 7378 } 7379 } 7380 } 7381 7382 static const TypeInfo x86_cpu_type_info = { 7383 .name = TYPE_X86_CPU, 7384 .parent = TYPE_CPU, 7385 .instance_size = sizeof(X86CPU), 7386 .instance_init = x86_cpu_initfn, 7387 .abstract = true, 7388 .class_size = sizeof(X86CPUClass), 7389 .class_init = x86_cpu_common_class_init, 7390 }; 7391 7392 7393 /* "base" CPU model, used by query-cpu-model-expansion */ 7394 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 7395 { 7396 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7397 7398 xcc->static_model = true; 7399 xcc->migration_safe = true; 7400 xcc->model_description = "base CPU model type with no features enabled"; 7401 xcc->ordering = 8; 7402 } 7403 7404 static const TypeInfo x86_base_cpu_type_info = { 7405 .name = X86_CPU_TYPE_NAME("base"), 7406 .parent = TYPE_X86_CPU, 7407 .class_init = x86_cpu_base_class_init, 7408 }; 7409 7410 static void x86_cpu_register_types(void) 7411 { 7412 int i; 7413 7414 type_register_static(&x86_cpu_type_info); 7415 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 7416 x86_register_cpudef_types(&builtin_x86_defs[i]); 7417 } 7418 type_register_static(&max_x86_cpu_type_info); 7419 type_register_static(&x86_base_cpu_type_info); 7420 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 7421 type_register_static(&host_x86_cpu_type_info); 7422 #endif 7423 } 7424 7425 type_init(x86_cpu_register_types) 7426