1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "tcg/tcg-cpu.h" 28 #include "tcg/helper-tcg.h" 29 #include "exec/exec-all.h" 30 #include "sysemu/kvm.h" 31 #include "sysemu/reset.h" 32 #include "sysemu/hvf.h" 33 #include "sysemu/cpus.h" 34 #include "sysemu/xen.h" 35 #include "sysemu/whpx.h" 36 #include "kvm/kvm_i386.h" 37 #include "sev_i386.h" 38 39 #include "qemu/error-report.h" 40 #include "qemu/module.h" 41 #include "qemu/option.h" 42 #include "qemu/config-file.h" 43 #include "qapi/error.h" 44 #include "qapi/qapi-visit-machine.h" 45 #include "qapi/qapi-visit-run-state.h" 46 #include "qapi/qmp/qdict.h" 47 #include "qapi/qmp/qerror.h" 48 #include "qapi/visitor.h" 49 #include "qom/qom-qobject.h" 50 #include "sysemu/arch_init.h" 51 #include "qapi/qapi-commands-machine-target.h" 52 53 #include "standard-headers/asm-x86/kvm_para.h" 54 55 #include "sysemu/sysemu.h" 56 #include "sysemu/tcg.h" 57 #include "hw/qdev-properties.h" 58 #include "hw/i386/topology.h" 59 #ifndef CONFIG_USER_ONLY 60 #include "exec/address-spaces.h" 61 #include "hw/i386/apic_internal.h" 62 #include "hw/boards.h" 63 #endif 64 65 #include "disas/capstone.h" 66 67 /* Helpers for building CPUID[2] descriptors: */ 68 69 struct CPUID2CacheDescriptorInfo { 70 enum CacheType type; 71 int level; 72 int size; 73 int line_size; 74 int associativity; 75 }; 76 77 /* 78 * Known CPUID 2 cache descriptors. 79 * From Intel SDM Volume 2A, CPUID instruction 80 */ 81 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 82 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 83 .associativity = 4, .line_size = 32, }, 84 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 85 .associativity = 4, .line_size = 32, }, 86 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 87 .associativity = 4, .line_size = 64, }, 88 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 89 .associativity = 2, .line_size = 32, }, 90 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 91 .associativity = 4, .line_size = 32, }, 92 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 93 .associativity = 4, .line_size = 64, }, 94 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 95 .associativity = 6, .line_size = 64, }, 96 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 97 .associativity = 2, .line_size = 64, }, 98 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 99 .associativity = 8, .line_size = 64, }, 100 /* lines per sector is not supported cpuid2_cache_descriptor(), 101 * so descriptors 0x22, 0x23 are not included 102 */ 103 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 104 .associativity = 16, .line_size = 64, }, 105 /* lines per sector is not supported cpuid2_cache_descriptor(), 106 * so descriptors 0x25, 0x20 are not included 107 */ 108 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 109 .associativity = 8, .line_size = 64, }, 110 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 111 .associativity = 8, .line_size = 64, }, 112 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 113 .associativity = 4, .line_size = 32, }, 114 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 115 .associativity = 4, .line_size = 32, }, 116 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 117 .associativity = 4, .line_size = 32, }, 118 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 119 .associativity = 4, .line_size = 32, }, 120 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 121 .associativity = 4, .line_size = 32, }, 122 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 123 .associativity = 4, .line_size = 64, }, 124 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 125 .associativity = 8, .line_size = 64, }, 126 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 127 .associativity = 12, .line_size = 64, }, 128 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 129 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 130 .associativity = 12, .line_size = 64, }, 131 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 132 .associativity = 16, .line_size = 64, }, 133 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 134 .associativity = 12, .line_size = 64, }, 135 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 136 .associativity = 16, .line_size = 64, }, 137 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 138 .associativity = 24, .line_size = 64, }, 139 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 140 .associativity = 8, .line_size = 64, }, 141 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 142 .associativity = 4, .line_size = 64, }, 143 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 144 .associativity = 4, .line_size = 64, }, 145 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 146 .associativity = 4, .line_size = 64, }, 147 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 148 .associativity = 4, .line_size = 64, }, 149 /* lines per sector is not supported cpuid2_cache_descriptor(), 150 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 151 */ 152 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 153 .associativity = 8, .line_size = 64, }, 154 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 155 .associativity = 2, .line_size = 64, }, 156 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 157 .associativity = 8, .line_size = 64, }, 158 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 159 .associativity = 8, .line_size = 32, }, 160 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 161 .associativity = 8, .line_size = 32, }, 162 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 163 .associativity = 8, .line_size = 32, }, 164 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 165 .associativity = 8, .line_size = 32, }, 166 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 167 .associativity = 4, .line_size = 64, }, 168 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 169 .associativity = 8, .line_size = 64, }, 170 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 171 .associativity = 4, .line_size = 64, }, 172 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 173 .associativity = 4, .line_size = 64, }, 174 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 175 .associativity = 4, .line_size = 64, }, 176 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 177 .associativity = 8, .line_size = 64, }, 178 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 179 .associativity = 8, .line_size = 64, }, 180 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 181 .associativity = 8, .line_size = 64, }, 182 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 183 .associativity = 12, .line_size = 64, }, 184 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 185 .associativity = 12, .line_size = 64, }, 186 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 187 .associativity = 12, .line_size = 64, }, 188 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 189 .associativity = 16, .line_size = 64, }, 190 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 191 .associativity = 16, .line_size = 64, }, 192 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 193 .associativity = 16, .line_size = 64, }, 194 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 195 .associativity = 24, .line_size = 64, }, 196 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 197 .associativity = 24, .line_size = 64, }, 198 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 199 .associativity = 24, .line_size = 64, }, 200 }; 201 202 /* 203 * "CPUID leaf 2 does not report cache descriptor information, 204 * use CPUID leaf 4 to query cache parameters" 205 */ 206 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 207 208 /* 209 * Return a CPUID 2 cache descriptor for a given cache. 210 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 211 */ 212 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 213 { 214 int i; 215 216 assert(cache->size > 0); 217 assert(cache->level > 0); 218 assert(cache->line_size > 0); 219 assert(cache->associativity > 0); 220 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 221 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 222 if (d->level == cache->level && d->type == cache->type && 223 d->size == cache->size && d->line_size == cache->line_size && 224 d->associativity == cache->associativity) { 225 return i; 226 } 227 } 228 229 return CACHE_DESCRIPTOR_UNAVAILABLE; 230 } 231 232 /* CPUID Leaf 4 constants: */ 233 234 /* EAX: */ 235 #define CACHE_TYPE_D 1 236 #define CACHE_TYPE_I 2 237 #define CACHE_TYPE_UNIFIED 3 238 239 #define CACHE_LEVEL(l) (l << 5) 240 241 #define CACHE_SELF_INIT_LEVEL (1 << 8) 242 243 /* EDX: */ 244 #define CACHE_NO_INVD_SHARING (1 << 0) 245 #define CACHE_INCLUSIVE (1 << 1) 246 #define CACHE_COMPLEX_IDX (1 << 2) 247 248 /* Encode CacheType for CPUID[4].EAX */ 249 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 250 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 251 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 252 0 /* Invalid value */) 253 254 255 /* Encode cache info for CPUID[4] */ 256 static void encode_cache_cpuid4(CPUCacheInfo *cache, 257 int num_apic_ids, int num_cores, 258 uint32_t *eax, uint32_t *ebx, 259 uint32_t *ecx, uint32_t *edx) 260 { 261 assert(cache->size == cache->line_size * cache->associativity * 262 cache->partitions * cache->sets); 263 264 assert(num_apic_ids > 0); 265 *eax = CACHE_TYPE(cache->type) | 266 CACHE_LEVEL(cache->level) | 267 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 268 ((num_cores - 1) << 26) | 269 ((num_apic_ids - 1) << 14); 270 271 assert(cache->line_size > 0); 272 assert(cache->partitions > 0); 273 assert(cache->associativity > 0); 274 /* We don't implement fully-associative caches */ 275 assert(cache->associativity < cache->sets); 276 *ebx = (cache->line_size - 1) | 277 ((cache->partitions - 1) << 12) | 278 ((cache->associativity - 1) << 22); 279 280 assert(cache->sets > 0); 281 *ecx = cache->sets - 1; 282 283 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 284 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 285 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 286 } 287 288 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 289 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 290 { 291 assert(cache->size % 1024 == 0); 292 assert(cache->lines_per_tag > 0); 293 assert(cache->associativity > 0); 294 assert(cache->line_size > 0); 295 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 296 (cache->lines_per_tag << 8) | (cache->line_size); 297 } 298 299 #define ASSOC_FULL 0xFF 300 301 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 302 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 303 a == 2 ? 0x2 : \ 304 a == 4 ? 0x4 : \ 305 a == 8 ? 0x6 : \ 306 a == 16 ? 0x8 : \ 307 a == 32 ? 0xA : \ 308 a == 48 ? 0xB : \ 309 a == 64 ? 0xC : \ 310 a == 96 ? 0xD : \ 311 a == 128 ? 0xE : \ 312 a == ASSOC_FULL ? 0xF : \ 313 0 /* invalid value */) 314 315 /* 316 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 317 * @l3 can be NULL. 318 */ 319 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 320 CPUCacheInfo *l3, 321 uint32_t *ecx, uint32_t *edx) 322 { 323 assert(l2->size % 1024 == 0); 324 assert(l2->associativity > 0); 325 assert(l2->lines_per_tag > 0); 326 assert(l2->line_size > 0); 327 *ecx = ((l2->size / 1024) << 16) | 328 (AMD_ENC_ASSOC(l2->associativity) << 12) | 329 (l2->lines_per_tag << 8) | (l2->line_size); 330 331 if (l3) { 332 assert(l3->size % (512 * 1024) == 0); 333 assert(l3->associativity > 0); 334 assert(l3->lines_per_tag > 0); 335 assert(l3->line_size > 0); 336 *edx = ((l3->size / (512 * 1024)) << 18) | 337 (AMD_ENC_ASSOC(l3->associativity) << 12) | 338 (l3->lines_per_tag << 8) | (l3->line_size); 339 } else { 340 *edx = 0; 341 } 342 } 343 344 /* Encode cache info for CPUID[8000001D] */ 345 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, 346 X86CPUTopoInfo *topo_info, 347 uint32_t *eax, uint32_t *ebx, 348 uint32_t *ecx, uint32_t *edx) 349 { 350 uint32_t l3_threads; 351 assert(cache->size == cache->line_size * cache->associativity * 352 cache->partitions * cache->sets); 353 354 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 355 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 356 357 /* L3 is shared among multiple cores */ 358 if (cache->level == 3) { 359 l3_threads = topo_info->cores_per_die * topo_info->threads_per_core; 360 *eax |= (l3_threads - 1) << 14; 361 } else { 362 *eax |= ((topo_info->threads_per_core - 1) << 14); 363 } 364 365 assert(cache->line_size > 0); 366 assert(cache->partitions > 0); 367 assert(cache->associativity > 0); 368 /* We don't implement fully-associative caches */ 369 assert(cache->associativity < cache->sets); 370 *ebx = (cache->line_size - 1) | 371 ((cache->partitions - 1) << 12) | 372 ((cache->associativity - 1) << 22); 373 374 assert(cache->sets > 0); 375 *ecx = cache->sets - 1; 376 377 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 378 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 379 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 380 } 381 382 /* Encode cache info for CPUID[8000001E] */ 383 static void encode_topo_cpuid8000001e(X86CPU *cpu, X86CPUTopoInfo *topo_info, 384 uint32_t *eax, uint32_t *ebx, 385 uint32_t *ecx, uint32_t *edx) 386 { 387 X86CPUTopoIDs topo_ids; 388 389 x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids); 390 391 *eax = cpu->apic_id; 392 393 /* 394 * CPUID_Fn8000001E_EBX [Core Identifiers] (CoreId) 395 * Read-only. Reset: 0000_XXXXh. 396 * See Core::X86::Cpuid::ExtApicId. 397 * Core::X86::Cpuid::CoreId_lthree[1:0]_core[3:0]_thread[1:0]; 398 * Bits Description 399 * 31:16 Reserved. 400 * 15:8 ThreadsPerCore: threads per core. Read-only. Reset: XXh. 401 * The number of threads per core is ThreadsPerCore+1. 402 * 7:0 CoreId: core ID. Read-only. Reset: XXh. 403 * 404 * NOTE: CoreId is already part of apic_id. Just use it. We can 405 * use all the 8 bits to represent the core_id here. 406 */ 407 *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.core_id & 0xFF); 408 409 /* 410 * CPUID_Fn8000001E_ECX [Node Identifiers] (NodeId) 411 * Read-only. Reset: 0000_0XXXh. 412 * Core::X86::Cpuid::NodeId_lthree[1:0]_core[3:0]_thread[1:0]; 413 * Bits Description 414 * 31:11 Reserved. 415 * 10:8 NodesPerProcessor: Node per processor. Read-only. Reset: XXXb. 416 * ValidValues: 417 * Value Description 418 * 000b 1 node per processor. 419 * 001b 2 nodes per processor. 420 * 010b Reserved. 421 * 011b 4 nodes per processor. 422 * 111b-100b Reserved. 423 * 7:0 NodeId: Node ID. Read-only. Reset: XXh. 424 * 425 * NOTE: Hardware reserves 3 bits for number of nodes per processor. 426 * But users can create more nodes than the actual hardware can 427 * support. To genaralize we can use all the upper 8 bits for nodes. 428 * NodeId is combination of node and socket_id which is already decoded 429 * in apic_id. Just use it by shifting. 430 */ 431 *ecx = ((topo_info->dies_per_pkg - 1) << 8) | 432 ((cpu->apic_id >> apicid_die_offset(topo_info)) & 0xFF); 433 434 *edx = 0; 435 } 436 437 /* 438 * Definitions of the hardcoded cache entries we expose: 439 * These are legacy cache values. If there is a need to change any 440 * of these values please use builtin_x86_defs 441 */ 442 443 /* L1 data cache: */ 444 static CPUCacheInfo legacy_l1d_cache = { 445 .type = DATA_CACHE, 446 .level = 1, 447 .size = 32 * KiB, 448 .self_init = 1, 449 .line_size = 64, 450 .associativity = 8, 451 .sets = 64, 452 .partitions = 1, 453 .no_invd_sharing = true, 454 }; 455 456 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 457 static CPUCacheInfo legacy_l1d_cache_amd = { 458 .type = DATA_CACHE, 459 .level = 1, 460 .size = 64 * KiB, 461 .self_init = 1, 462 .line_size = 64, 463 .associativity = 2, 464 .sets = 512, 465 .partitions = 1, 466 .lines_per_tag = 1, 467 .no_invd_sharing = true, 468 }; 469 470 /* L1 instruction cache: */ 471 static CPUCacheInfo legacy_l1i_cache = { 472 .type = INSTRUCTION_CACHE, 473 .level = 1, 474 .size = 32 * KiB, 475 .self_init = 1, 476 .line_size = 64, 477 .associativity = 8, 478 .sets = 64, 479 .partitions = 1, 480 .no_invd_sharing = true, 481 }; 482 483 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 484 static CPUCacheInfo legacy_l1i_cache_amd = { 485 .type = INSTRUCTION_CACHE, 486 .level = 1, 487 .size = 64 * KiB, 488 .self_init = 1, 489 .line_size = 64, 490 .associativity = 2, 491 .sets = 512, 492 .partitions = 1, 493 .lines_per_tag = 1, 494 .no_invd_sharing = true, 495 }; 496 497 /* Level 2 unified cache: */ 498 static CPUCacheInfo legacy_l2_cache = { 499 .type = UNIFIED_CACHE, 500 .level = 2, 501 .size = 4 * MiB, 502 .self_init = 1, 503 .line_size = 64, 504 .associativity = 16, 505 .sets = 4096, 506 .partitions = 1, 507 .no_invd_sharing = true, 508 }; 509 510 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 511 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 512 .type = UNIFIED_CACHE, 513 .level = 2, 514 .size = 2 * MiB, 515 .line_size = 64, 516 .associativity = 8, 517 }; 518 519 520 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 521 static CPUCacheInfo legacy_l2_cache_amd = { 522 .type = UNIFIED_CACHE, 523 .level = 2, 524 .size = 512 * KiB, 525 .line_size = 64, 526 .lines_per_tag = 1, 527 .associativity = 16, 528 .sets = 512, 529 .partitions = 1, 530 }; 531 532 /* Level 3 unified cache: */ 533 static CPUCacheInfo legacy_l3_cache = { 534 .type = UNIFIED_CACHE, 535 .level = 3, 536 .size = 16 * MiB, 537 .line_size = 64, 538 .associativity = 16, 539 .sets = 16384, 540 .partitions = 1, 541 .lines_per_tag = 1, 542 .self_init = true, 543 .inclusive = true, 544 .complex_indexing = true, 545 }; 546 547 /* TLB definitions: */ 548 549 #define L1_DTLB_2M_ASSOC 1 550 #define L1_DTLB_2M_ENTRIES 255 551 #define L1_DTLB_4K_ASSOC 1 552 #define L1_DTLB_4K_ENTRIES 255 553 554 #define L1_ITLB_2M_ASSOC 1 555 #define L1_ITLB_2M_ENTRIES 255 556 #define L1_ITLB_4K_ASSOC 1 557 #define L1_ITLB_4K_ENTRIES 255 558 559 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 560 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 561 #define L2_DTLB_4K_ASSOC 4 562 #define L2_DTLB_4K_ENTRIES 512 563 564 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 565 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 566 #define L2_ITLB_4K_ASSOC 4 567 #define L2_ITLB_4K_ENTRIES 512 568 569 /* CPUID Leaf 0x14 constants: */ 570 #define INTEL_PT_MAX_SUBLEAF 0x1 571 /* 572 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 573 * MSR can be accessed; 574 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 575 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 576 * of Intel PT MSRs across warm reset; 577 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 578 */ 579 #define INTEL_PT_MINIMAL_EBX 0xf 580 /* 581 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 582 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 583 * accessed; 584 * bit[01]: ToPA tables can hold any number of output entries, up to the 585 * maximum allowed by the MaskOrTableOffset field of 586 * IA32_RTIT_OUTPUT_MASK_PTRS; 587 * bit[02]: Support Single-Range Output scheme; 588 */ 589 #define INTEL_PT_MINIMAL_ECX 0x7 590 /* generated packets which contain IP payloads have LIP values */ 591 #define INTEL_PT_IP_LIP (1 << 31) 592 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 593 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 594 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 595 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 596 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 597 598 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 599 uint32_t vendor2, uint32_t vendor3) 600 { 601 int i; 602 for (i = 0; i < 4; i++) { 603 dst[i] = vendor1 >> (8 * i); 604 dst[i + 4] = vendor2 >> (8 * i); 605 dst[i + 8] = vendor3 >> (8 * i); 606 } 607 dst[CPUID_VENDOR_SZ] = '\0'; 608 } 609 610 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 611 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 612 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 613 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 614 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 615 CPUID_PSE36 | CPUID_FXSR) 616 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 617 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 618 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 619 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 620 CPUID_PAE | CPUID_SEP | CPUID_APIC) 621 622 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 623 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 624 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 625 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 626 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 627 /* partly implemented: 628 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 629 /* missing: 630 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 631 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 632 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 633 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 634 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 635 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 636 CPUID_EXT_RDRAND) 637 /* missing: 638 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 639 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 640 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 641 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 642 CPUID_EXT_F16C */ 643 644 #ifdef TARGET_X86_64 645 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 646 #else 647 #define TCG_EXT2_X86_64_FEATURES 0 648 #endif 649 650 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 651 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 652 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 653 TCG_EXT2_X86_64_FEATURES) 654 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 655 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 656 #define TCG_EXT4_FEATURES 0 657 #define TCG_SVM_FEATURES CPUID_SVM_NPT 658 #define TCG_KVM_FEATURES 0 659 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 660 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 661 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 662 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 663 CPUID_7_0_EBX_ERMS) 664 /* missing: 665 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 666 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 667 CPUID_7_0_EBX_RDSEED */ 668 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 669 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 670 CPUID_7_0_ECX_LA57 | CPUID_7_0_ECX_PKS) 671 #define TCG_7_0_EDX_FEATURES 0 672 #define TCG_7_1_EAX_FEATURES 0 673 #define TCG_APM_FEATURES 0 674 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 675 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 676 /* missing: 677 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 678 #define TCG_14_0_ECX_FEATURES 0 679 680 typedef enum FeatureWordType { 681 CPUID_FEATURE_WORD, 682 MSR_FEATURE_WORD, 683 } FeatureWordType; 684 685 typedef struct FeatureWordInfo { 686 FeatureWordType type; 687 /* feature flags names are taken from "Intel Processor Identification and 688 * the CPUID Instruction" and AMD's "CPUID Specification". 689 * In cases of disagreement between feature naming conventions, 690 * aliases may be added. 691 */ 692 const char *feat_names[64]; 693 union { 694 /* If type==CPUID_FEATURE_WORD */ 695 struct { 696 uint32_t eax; /* Input EAX for CPUID */ 697 bool needs_ecx; /* CPUID instruction uses ECX as input */ 698 uint32_t ecx; /* Input ECX value for CPUID */ 699 int reg; /* output register (R_* constant) */ 700 } cpuid; 701 /* If type==MSR_FEATURE_WORD */ 702 struct { 703 uint32_t index; 704 } msr; 705 }; 706 uint64_t tcg_features; /* Feature flags supported by TCG */ 707 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */ 708 uint64_t migratable_flags; /* Feature flags known to be migratable */ 709 /* Features that shouldn't be auto-enabled by "-cpu host" */ 710 uint64_t no_autoenable_flags; 711 } FeatureWordInfo; 712 713 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 714 [FEAT_1_EDX] = { 715 .type = CPUID_FEATURE_WORD, 716 .feat_names = { 717 "fpu", "vme", "de", "pse", 718 "tsc", "msr", "pae", "mce", 719 "cx8", "apic", NULL, "sep", 720 "mtrr", "pge", "mca", "cmov", 721 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 722 NULL, "ds" /* Intel dts */, "acpi", "mmx", 723 "fxsr", "sse", "sse2", "ss", 724 "ht" /* Intel htt */, "tm", "ia64", "pbe", 725 }, 726 .cpuid = {.eax = 1, .reg = R_EDX, }, 727 .tcg_features = TCG_FEATURES, 728 }, 729 [FEAT_1_ECX] = { 730 .type = CPUID_FEATURE_WORD, 731 .feat_names = { 732 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 733 "ds-cpl", "vmx", "smx", "est", 734 "tm2", "ssse3", "cid", NULL, 735 "fma", "cx16", "xtpr", "pdcm", 736 NULL, "pcid", "dca", "sse4.1", 737 "sse4.2", "x2apic", "movbe", "popcnt", 738 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 739 "avx", "f16c", "rdrand", "hypervisor", 740 }, 741 .cpuid = { .eax = 1, .reg = R_ECX, }, 742 .tcg_features = TCG_EXT_FEATURES, 743 }, 744 /* Feature names that are already defined on feature_name[] but 745 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 746 * names on feat_names below. They are copied automatically 747 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 748 */ 749 [FEAT_8000_0001_EDX] = { 750 .type = CPUID_FEATURE_WORD, 751 .feat_names = { 752 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 753 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 754 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 755 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 756 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 757 "nx", NULL, "mmxext", NULL /* mmx */, 758 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 759 NULL, "lm", "3dnowext", "3dnow", 760 }, 761 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 762 .tcg_features = TCG_EXT2_FEATURES, 763 }, 764 [FEAT_8000_0001_ECX] = { 765 .type = CPUID_FEATURE_WORD, 766 .feat_names = { 767 "lahf-lm", "cmp-legacy", "svm", "extapic", 768 "cr8legacy", "abm", "sse4a", "misalignsse", 769 "3dnowprefetch", "osvw", "ibs", "xop", 770 "skinit", "wdt", NULL, "lwp", 771 "fma4", "tce", NULL, "nodeid-msr", 772 NULL, "tbm", "topoext", "perfctr-core", 773 "perfctr-nb", NULL, NULL, NULL, 774 NULL, NULL, NULL, NULL, 775 }, 776 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 777 .tcg_features = TCG_EXT3_FEATURES, 778 /* 779 * TOPOEXT is always allowed but can't be enabled blindly by 780 * "-cpu host", as it requires consistent cache topology info 781 * to be provided so it doesn't confuse guests. 782 */ 783 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 784 }, 785 [FEAT_C000_0001_EDX] = { 786 .type = CPUID_FEATURE_WORD, 787 .feat_names = { 788 NULL, NULL, "xstore", "xstore-en", 789 NULL, NULL, "xcrypt", "xcrypt-en", 790 "ace2", "ace2-en", "phe", "phe-en", 791 "pmm", "pmm-en", NULL, NULL, 792 NULL, NULL, NULL, NULL, 793 NULL, NULL, NULL, NULL, 794 NULL, NULL, NULL, NULL, 795 NULL, NULL, NULL, NULL, 796 }, 797 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 798 .tcg_features = TCG_EXT4_FEATURES, 799 }, 800 [FEAT_KVM] = { 801 .type = CPUID_FEATURE_WORD, 802 .feat_names = { 803 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 804 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 805 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 806 "kvm-poll-control", "kvm-pv-sched-yield", "kvm-asyncpf-int", "kvm-msi-ext-dest-id", 807 NULL, NULL, NULL, NULL, 808 NULL, NULL, NULL, NULL, 809 "kvmclock-stable-bit", NULL, NULL, NULL, 810 NULL, NULL, NULL, NULL, 811 }, 812 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 813 .tcg_features = TCG_KVM_FEATURES, 814 }, 815 [FEAT_KVM_HINTS] = { 816 .type = CPUID_FEATURE_WORD, 817 .feat_names = { 818 "kvm-hint-dedicated", NULL, NULL, NULL, 819 NULL, NULL, NULL, NULL, 820 NULL, NULL, NULL, NULL, 821 NULL, NULL, NULL, NULL, 822 NULL, NULL, NULL, NULL, 823 NULL, NULL, NULL, NULL, 824 NULL, NULL, NULL, NULL, 825 NULL, NULL, NULL, NULL, 826 }, 827 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 828 .tcg_features = TCG_KVM_FEATURES, 829 /* 830 * KVM hints aren't auto-enabled by -cpu host, they need to be 831 * explicitly enabled in the command-line. 832 */ 833 .no_autoenable_flags = ~0U, 834 }, 835 /* 836 * .feat_names are commented out for Hyper-V enlightenments because we 837 * don't want to have two different ways for enabling them on QEMU command 838 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 839 * enabling several feature bits simultaneously, exposing these bits 840 * individually may just confuse guests. 841 */ 842 [FEAT_HYPERV_EAX] = { 843 .type = CPUID_FEATURE_WORD, 844 .feat_names = { 845 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 846 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 847 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 848 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 849 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 850 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 851 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 852 NULL, NULL, 853 NULL, NULL, NULL, NULL, 854 NULL, NULL, NULL, NULL, 855 NULL, NULL, NULL, NULL, 856 NULL, NULL, NULL, NULL, 857 }, 858 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 859 }, 860 [FEAT_HYPERV_EBX] = { 861 .type = CPUID_FEATURE_WORD, 862 .feat_names = { 863 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 864 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 865 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 866 NULL /* hv_create_port */, NULL /* hv_connect_port */, 867 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 868 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 869 NULL, NULL, 870 NULL, NULL, NULL, NULL, 871 NULL, NULL, NULL, NULL, 872 NULL, NULL, NULL, NULL, 873 NULL, NULL, NULL, NULL, 874 }, 875 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 876 }, 877 [FEAT_HYPERV_EDX] = { 878 .type = CPUID_FEATURE_WORD, 879 .feat_names = { 880 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 881 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 882 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 883 NULL, NULL, 884 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 885 NULL, NULL, NULL, NULL, 886 NULL, NULL, NULL, NULL, 887 NULL, NULL, NULL, NULL, 888 NULL, NULL, NULL, NULL, 889 NULL, NULL, NULL, NULL, 890 }, 891 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 892 }, 893 [FEAT_HV_RECOMM_EAX] = { 894 .type = CPUID_FEATURE_WORD, 895 .feat_names = { 896 NULL /* hv_recommend_pv_as_switch */, 897 NULL /* hv_recommend_pv_tlbflush_local */, 898 NULL /* hv_recommend_pv_tlbflush_remote */, 899 NULL /* hv_recommend_msr_apic_access */, 900 NULL /* hv_recommend_msr_reset */, 901 NULL /* hv_recommend_relaxed_timing */, 902 NULL /* hv_recommend_dma_remapping */, 903 NULL /* hv_recommend_int_remapping */, 904 NULL /* hv_recommend_x2apic_msrs */, 905 NULL /* hv_recommend_autoeoi_deprecation */, 906 NULL /* hv_recommend_pv_ipi */, 907 NULL /* hv_recommend_ex_hypercalls */, 908 NULL /* hv_hypervisor_is_nested */, 909 NULL /* hv_recommend_int_mbec */, 910 NULL /* hv_recommend_evmcs */, 911 NULL, 912 NULL, NULL, NULL, NULL, 913 NULL, NULL, NULL, NULL, 914 NULL, NULL, NULL, NULL, 915 NULL, NULL, NULL, NULL, 916 }, 917 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 918 }, 919 [FEAT_HV_NESTED_EAX] = { 920 .type = CPUID_FEATURE_WORD, 921 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 922 }, 923 [FEAT_SVM] = { 924 .type = CPUID_FEATURE_WORD, 925 .feat_names = { 926 "npt", "lbrv", "svm-lock", "nrip-save", 927 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 928 NULL, NULL, "pause-filter", NULL, 929 "pfthreshold", "avic", NULL, "v-vmsave-vmload", 930 "vgif", NULL, NULL, NULL, 931 NULL, NULL, NULL, NULL, 932 NULL, NULL, NULL, NULL, 933 "svme-addr-chk", NULL, NULL, NULL, 934 }, 935 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 936 .tcg_features = TCG_SVM_FEATURES, 937 }, 938 [FEAT_7_0_EBX] = { 939 .type = CPUID_FEATURE_WORD, 940 .feat_names = { 941 "fsgsbase", "tsc-adjust", NULL, "bmi1", 942 "hle", "avx2", NULL, "smep", 943 "bmi2", "erms", "invpcid", "rtm", 944 NULL, NULL, "mpx", NULL, 945 "avx512f", "avx512dq", "rdseed", "adx", 946 "smap", "avx512ifma", "pcommit", "clflushopt", 947 "clwb", "intel-pt", "avx512pf", "avx512er", 948 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 949 }, 950 .cpuid = { 951 .eax = 7, 952 .needs_ecx = true, .ecx = 0, 953 .reg = R_EBX, 954 }, 955 .tcg_features = TCG_7_0_EBX_FEATURES, 956 }, 957 [FEAT_7_0_ECX] = { 958 .type = CPUID_FEATURE_WORD, 959 .feat_names = { 960 NULL, "avx512vbmi", "umip", "pku", 961 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL, 962 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 963 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 964 "la57", NULL, NULL, NULL, 965 NULL, NULL, "rdpid", NULL, 966 NULL, "cldemote", NULL, "movdiri", 967 "movdir64b", NULL, NULL, "pks", 968 }, 969 .cpuid = { 970 .eax = 7, 971 .needs_ecx = true, .ecx = 0, 972 .reg = R_ECX, 973 }, 974 .tcg_features = TCG_7_0_ECX_FEATURES, 975 }, 976 [FEAT_7_0_EDX] = { 977 .type = CPUID_FEATURE_WORD, 978 .feat_names = { 979 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 980 "fsrm", NULL, NULL, NULL, 981 "avx512-vp2intersect", NULL, "md-clear", NULL, 982 NULL, NULL, "serialize", NULL, 983 "tsx-ldtrk", NULL, NULL /* pconfig */, NULL, 984 NULL, NULL, NULL, "avx512-fp16", 985 NULL, NULL, "spec-ctrl", "stibp", 986 NULL, "arch-capabilities", "core-capability", "ssbd", 987 }, 988 .cpuid = { 989 .eax = 7, 990 .needs_ecx = true, .ecx = 0, 991 .reg = R_EDX, 992 }, 993 .tcg_features = TCG_7_0_EDX_FEATURES, 994 }, 995 [FEAT_7_1_EAX] = { 996 .type = CPUID_FEATURE_WORD, 997 .feat_names = { 998 NULL, NULL, NULL, NULL, 999 NULL, "avx512-bf16", NULL, NULL, 1000 NULL, NULL, NULL, NULL, 1001 NULL, NULL, NULL, NULL, 1002 NULL, NULL, NULL, NULL, 1003 NULL, NULL, NULL, NULL, 1004 NULL, NULL, NULL, NULL, 1005 NULL, NULL, NULL, NULL, 1006 }, 1007 .cpuid = { 1008 .eax = 7, 1009 .needs_ecx = true, .ecx = 1, 1010 .reg = R_EAX, 1011 }, 1012 .tcg_features = TCG_7_1_EAX_FEATURES, 1013 }, 1014 [FEAT_8000_0007_EDX] = { 1015 .type = CPUID_FEATURE_WORD, 1016 .feat_names = { 1017 NULL, NULL, NULL, NULL, 1018 NULL, NULL, NULL, NULL, 1019 "invtsc", NULL, NULL, NULL, 1020 NULL, NULL, NULL, NULL, 1021 NULL, NULL, NULL, NULL, 1022 NULL, NULL, NULL, NULL, 1023 NULL, NULL, NULL, NULL, 1024 NULL, NULL, NULL, NULL, 1025 }, 1026 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1027 .tcg_features = TCG_APM_FEATURES, 1028 .unmigratable_flags = CPUID_APM_INVTSC, 1029 }, 1030 [FEAT_8000_0008_EBX] = { 1031 .type = CPUID_FEATURE_WORD, 1032 .feat_names = { 1033 "clzero", NULL, "xsaveerptr", NULL, 1034 NULL, NULL, NULL, NULL, 1035 NULL, "wbnoinvd", NULL, NULL, 1036 "ibpb", NULL, NULL, "amd-stibp", 1037 NULL, NULL, NULL, NULL, 1038 NULL, NULL, NULL, NULL, 1039 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1040 NULL, NULL, NULL, NULL, 1041 }, 1042 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1043 .tcg_features = 0, 1044 .unmigratable_flags = 0, 1045 }, 1046 [FEAT_XSAVE] = { 1047 .type = CPUID_FEATURE_WORD, 1048 .feat_names = { 1049 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1050 NULL, NULL, NULL, NULL, 1051 NULL, NULL, NULL, NULL, 1052 NULL, NULL, NULL, NULL, 1053 NULL, NULL, NULL, NULL, 1054 NULL, NULL, NULL, NULL, 1055 NULL, NULL, NULL, NULL, 1056 NULL, NULL, NULL, NULL, 1057 }, 1058 .cpuid = { 1059 .eax = 0xd, 1060 .needs_ecx = true, .ecx = 1, 1061 .reg = R_EAX, 1062 }, 1063 .tcg_features = TCG_XSAVE_FEATURES, 1064 }, 1065 [FEAT_6_EAX] = { 1066 .type = CPUID_FEATURE_WORD, 1067 .feat_names = { 1068 NULL, NULL, "arat", NULL, 1069 NULL, NULL, NULL, NULL, 1070 NULL, NULL, NULL, NULL, 1071 NULL, NULL, NULL, NULL, 1072 NULL, NULL, NULL, NULL, 1073 NULL, NULL, NULL, NULL, 1074 NULL, NULL, NULL, NULL, 1075 NULL, NULL, NULL, NULL, 1076 }, 1077 .cpuid = { .eax = 6, .reg = R_EAX, }, 1078 .tcg_features = TCG_6_EAX_FEATURES, 1079 }, 1080 [FEAT_XSAVE_COMP_LO] = { 1081 .type = CPUID_FEATURE_WORD, 1082 .cpuid = { 1083 .eax = 0xD, 1084 .needs_ecx = true, .ecx = 0, 1085 .reg = R_EAX, 1086 }, 1087 .tcg_features = ~0U, 1088 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1089 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1090 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1091 XSTATE_PKRU_MASK, 1092 }, 1093 [FEAT_XSAVE_COMP_HI] = { 1094 .type = CPUID_FEATURE_WORD, 1095 .cpuid = { 1096 .eax = 0xD, 1097 .needs_ecx = true, .ecx = 0, 1098 .reg = R_EDX, 1099 }, 1100 .tcg_features = ~0U, 1101 }, 1102 /*Below are MSR exposed features*/ 1103 [FEAT_ARCH_CAPABILITIES] = { 1104 .type = MSR_FEATURE_WORD, 1105 .feat_names = { 1106 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1107 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", 1108 "taa-no", NULL, NULL, NULL, 1109 NULL, NULL, NULL, NULL, 1110 NULL, NULL, NULL, NULL, 1111 NULL, NULL, NULL, NULL, 1112 NULL, NULL, NULL, NULL, 1113 NULL, NULL, NULL, NULL, 1114 }, 1115 .msr = { 1116 .index = MSR_IA32_ARCH_CAPABILITIES, 1117 }, 1118 }, 1119 [FEAT_CORE_CAPABILITY] = { 1120 .type = MSR_FEATURE_WORD, 1121 .feat_names = { 1122 NULL, NULL, NULL, NULL, 1123 NULL, "split-lock-detect", NULL, NULL, 1124 NULL, NULL, NULL, NULL, 1125 NULL, NULL, NULL, NULL, 1126 NULL, NULL, NULL, NULL, 1127 NULL, NULL, NULL, NULL, 1128 NULL, NULL, NULL, NULL, 1129 NULL, NULL, NULL, NULL, 1130 }, 1131 .msr = { 1132 .index = MSR_IA32_CORE_CAPABILITY, 1133 }, 1134 }, 1135 [FEAT_PERF_CAPABILITIES] = { 1136 .type = MSR_FEATURE_WORD, 1137 .feat_names = { 1138 NULL, NULL, NULL, NULL, 1139 NULL, NULL, NULL, NULL, 1140 NULL, NULL, NULL, NULL, 1141 NULL, "full-width-write", NULL, NULL, 1142 NULL, NULL, NULL, NULL, 1143 NULL, NULL, NULL, NULL, 1144 NULL, NULL, NULL, NULL, 1145 NULL, NULL, NULL, NULL, 1146 }, 1147 .msr = { 1148 .index = MSR_IA32_PERF_CAPABILITIES, 1149 }, 1150 }, 1151 1152 [FEAT_VMX_PROCBASED_CTLS] = { 1153 .type = MSR_FEATURE_WORD, 1154 .feat_names = { 1155 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset", 1156 NULL, NULL, NULL, "vmx-hlt-exit", 1157 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit", 1158 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit", 1159 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit", 1160 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit", 1161 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf", 1162 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls", 1163 }, 1164 .msr = { 1165 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1166 } 1167 }, 1168 1169 [FEAT_VMX_SECONDARY_CTLS] = { 1170 .type = MSR_FEATURE_WORD, 1171 .feat_names = { 1172 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit", 1173 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest", 1174 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit", 1175 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit", 1176 "vmx-rdseed-exit", "vmx-pml", NULL, NULL, 1177 "vmx-xsaves", NULL, NULL, NULL, 1178 NULL, NULL, NULL, NULL, 1179 NULL, NULL, NULL, NULL, 1180 }, 1181 .msr = { 1182 .index = MSR_IA32_VMX_PROCBASED_CTLS2, 1183 } 1184 }, 1185 1186 [FEAT_VMX_PINBASED_CTLS] = { 1187 .type = MSR_FEATURE_WORD, 1188 .feat_names = { 1189 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit", 1190 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr", 1191 NULL, NULL, NULL, NULL, 1192 NULL, NULL, NULL, NULL, 1193 NULL, NULL, NULL, NULL, 1194 NULL, NULL, NULL, NULL, 1195 NULL, NULL, NULL, NULL, 1196 NULL, NULL, NULL, NULL, 1197 }, 1198 .msr = { 1199 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1200 } 1201 }, 1202 1203 [FEAT_VMX_EXIT_CTLS] = { 1204 .type = MSR_FEATURE_WORD, 1205 /* 1206 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from 1207 * the LM CPUID bit. 1208 */ 1209 .feat_names = { 1210 NULL, NULL, "vmx-exit-nosave-debugctl", NULL, 1211 NULL, NULL, NULL, NULL, 1212 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL, 1213 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr", 1214 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat", 1215 "vmx-exit-save-efer", "vmx-exit-load-efer", 1216 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", 1217 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, 1218 NULL, "vmx-exit-load-pkrs", NULL, NULL, 1219 }, 1220 .msr = { 1221 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, 1222 } 1223 }, 1224 1225 [FEAT_VMX_ENTRY_CTLS] = { 1226 .type = MSR_FEATURE_WORD, 1227 .feat_names = { 1228 NULL, NULL, "vmx-entry-noload-debugctl", NULL, 1229 NULL, NULL, NULL, NULL, 1230 NULL, "vmx-entry-ia32e-mode", NULL, NULL, 1231 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", 1232 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, 1233 NULL, NULL, "vmx-entry-load-pkrs", NULL, 1234 NULL, NULL, NULL, NULL, 1235 NULL, NULL, NULL, NULL, 1236 }, 1237 .msr = { 1238 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1239 } 1240 }, 1241 1242 [FEAT_VMX_MISC] = { 1243 .type = MSR_FEATURE_WORD, 1244 .feat_names = { 1245 NULL, NULL, NULL, NULL, 1246 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown", 1247 "vmx-activity-wait-sipi", NULL, NULL, NULL, 1248 NULL, NULL, NULL, NULL, 1249 NULL, NULL, NULL, NULL, 1250 NULL, NULL, NULL, NULL, 1251 NULL, NULL, NULL, NULL, 1252 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL, 1253 }, 1254 .msr = { 1255 .index = MSR_IA32_VMX_MISC, 1256 } 1257 }, 1258 1259 [FEAT_VMX_EPT_VPID_CAPS] = { 1260 .type = MSR_FEATURE_WORD, 1261 .feat_names = { 1262 "vmx-ept-execonly", NULL, NULL, NULL, 1263 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5", 1264 NULL, NULL, NULL, NULL, 1265 NULL, NULL, NULL, NULL, 1266 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL, 1267 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL, 1268 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL, 1269 NULL, NULL, NULL, NULL, 1270 "vmx-invvpid", NULL, NULL, NULL, 1271 NULL, NULL, NULL, NULL, 1272 "vmx-invvpid-single-addr", "vmx-invept-single-context", 1273 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals", 1274 NULL, NULL, NULL, NULL, 1275 NULL, NULL, NULL, NULL, 1276 NULL, NULL, NULL, NULL, 1277 NULL, NULL, NULL, NULL, 1278 NULL, NULL, NULL, NULL, 1279 }, 1280 .msr = { 1281 .index = MSR_IA32_VMX_EPT_VPID_CAP, 1282 } 1283 }, 1284 1285 [FEAT_VMX_BASIC] = { 1286 .type = MSR_FEATURE_WORD, 1287 .feat_names = { 1288 [54] = "vmx-ins-outs", 1289 [55] = "vmx-true-ctls", 1290 }, 1291 .msr = { 1292 .index = MSR_IA32_VMX_BASIC, 1293 }, 1294 /* Just to be safe - we don't support setting the MSEG version field. */ 1295 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR, 1296 }, 1297 1298 [FEAT_VMX_VMFUNC] = { 1299 .type = MSR_FEATURE_WORD, 1300 .feat_names = { 1301 [0] = "vmx-eptp-switching", 1302 }, 1303 .msr = { 1304 .index = MSR_IA32_VMX_VMFUNC, 1305 } 1306 }, 1307 1308 [FEAT_14_0_ECX] = { 1309 .type = CPUID_FEATURE_WORD, 1310 .feat_names = { 1311 NULL, NULL, NULL, NULL, 1312 NULL, NULL, NULL, NULL, 1313 NULL, NULL, NULL, NULL, 1314 NULL, NULL, NULL, NULL, 1315 NULL, NULL, NULL, NULL, 1316 NULL, NULL, NULL, NULL, 1317 NULL, NULL, NULL, NULL, 1318 NULL, NULL, NULL, "intel-pt-lip", 1319 }, 1320 .cpuid = { 1321 .eax = 0x14, 1322 .needs_ecx = true, .ecx = 0, 1323 .reg = R_ECX, 1324 }, 1325 .tcg_features = TCG_14_0_ECX_FEATURES, 1326 }, 1327 1328 }; 1329 1330 typedef struct FeatureMask { 1331 FeatureWord index; 1332 uint64_t mask; 1333 } FeatureMask; 1334 1335 typedef struct FeatureDep { 1336 FeatureMask from, to; 1337 } FeatureDep; 1338 1339 static FeatureDep feature_dependencies[] = { 1340 { 1341 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES }, 1342 .to = { FEAT_ARCH_CAPABILITIES, ~0ull }, 1343 }, 1344 { 1345 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY }, 1346 .to = { FEAT_CORE_CAPABILITY, ~0ull }, 1347 }, 1348 { 1349 .from = { FEAT_1_ECX, CPUID_EXT_PDCM }, 1350 .to = { FEAT_PERF_CAPABILITIES, ~0ull }, 1351 }, 1352 { 1353 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1354 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull }, 1355 }, 1356 { 1357 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1358 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull }, 1359 }, 1360 { 1361 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1362 .to = { FEAT_VMX_EXIT_CTLS, ~0ull }, 1363 }, 1364 { 1365 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1366 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull }, 1367 }, 1368 { 1369 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1370 .to = { FEAT_VMX_MISC, ~0ull }, 1371 }, 1372 { 1373 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1374 .to = { FEAT_VMX_BASIC, ~0ull }, 1375 }, 1376 { 1377 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM }, 1378 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE }, 1379 }, 1380 { 1381 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS }, 1382 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull }, 1383 }, 1384 { 1385 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES }, 1386 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES }, 1387 }, 1388 { 1389 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND }, 1390 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING }, 1391 }, 1392 { 1393 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID }, 1394 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID }, 1395 }, 1396 { 1397 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED }, 1398 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING }, 1399 }, 1400 { 1401 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT }, 1402 .to = { FEAT_14_0_ECX, ~0ull }, 1403 }, 1404 { 1405 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP }, 1406 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP }, 1407 }, 1408 { 1409 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1410 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull }, 1411 }, 1412 { 1413 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1414 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST }, 1415 }, 1416 { 1417 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID }, 1418 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 }, 1419 }, 1420 { 1421 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC }, 1422 .to = { FEAT_VMX_VMFUNC, ~0ull }, 1423 }, 1424 { 1425 .from = { FEAT_8000_0001_ECX, CPUID_EXT3_SVM }, 1426 .to = { FEAT_SVM, ~0ull }, 1427 }, 1428 }; 1429 1430 typedef struct X86RegisterInfo32 { 1431 /* Name of register */ 1432 const char *name; 1433 /* QAPI enum value register */ 1434 X86CPURegister32 qapi_enum; 1435 } X86RegisterInfo32; 1436 1437 #define REGISTER(reg) \ 1438 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1439 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1440 REGISTER(EAX), 1441 REGISTER(ECX), 1442 REGISTER(EDX), 1443 REGISTER(EBX), 1444 REGISTER(ESP), 1445 REGISTER(EBP), 1446 REGISTER(ESI), 1447 REGISTER(EDI), 1448 }; 1449 #undef REGISTER 1450 1451 typedef struct ExtSaveArea { 1452 uint32_t feature, bits; 1453 uint32_t offset, size; 1454 } ExtSaveArea; 1455 1456 static const ExtSaveArea x86_ext_save_areas[] = { 1457 [XSTATE_FP_BIT] = { 1458 /* x87 FP state component is always enabled if XSAVE is supported */ 1459 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1460 /* x87 state is in the legacy region of the XSAVE area */ 1461 .offset = 0, 1462 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1463 }, 1464 [XSTATE_SSE_BIT] = { 1465 /* SSE state component is always enabled if XSAVE is supported */ 1466 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1467 /* SSE state is in the legacy region of the XSAVE area */ 1468 .offset = 0, 1469 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1470 }, 1471 [XSTATE_YMM_BIT] = 1472 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1473 .offset = offsetof(X86XSaveArea, avx_state), 1474 .size = sizeof(XSaveAVX) }, 1475 [XSTATE_BNDREGS_BIT] = 1476 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1477 .offset = offsetof(X86XSaveArea, bndreg_state), 1478 .size = sizeof(XSaveBNDREG) }, 1479 [XSTATE_BNDCSR_BIT] = 1480 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1481 .offset = offsetof(X86XSaveArea, bndcsr_state), 1482 .size = sizeof(XSaveBNDCSR) }, 1483 [XSTATE_OPMASK_BIT] = 1484 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1485 .offset = offsetof(X86XSaveArea, opmask_state), 1486 .size = sizeof(XSaveOpmask) }, 1487 [XSTATE_ZMM_Hi256_BIT] = 1488 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1489 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1490 .size = sizeof(XSaveZMM_Hi256) }, 1491 [XSTATE_Hi16_ZMM_BIT] = 1492 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1493 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1494 .size = sizeof(XSaveHi16_ZMM) }, 1495 [XSTATE_PKRU_BIT] = 1496 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1497 .offset = offsetof(X86XSaveArea, pkru_state), 1498 .size = sizeof(XSavePKRU) }, 1499 }; 1500 1501 static uint32_t xsave_area_size(uint64_t mask) 1502 { 1503 int i; 1504 uint64_t ret = 0; 1505 1506 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1507 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1508 if ((mask >> i) & 1) { 1509 ret = MAX(ret, esa->offset + esa->size); 1510 } 1511 } 1512 return ret; 1513 } 1514 1515 static inline bool accel_uses_host_cpuid(void) 1516 { 1517 return kvm_enabled() || hvf_enabled(); 1518 } 1519 1520 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1521 { 1522 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1523 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1524 } 1525 1526 /* Return name of 32-bit register, from a R_* constant */ 1527 static const char *get_register_name_32(unsigned int reg) 1528 { 1529 if (reg >= CPU_NB_REGS32) { 1530 return NULL; 1531 } 1532 return x86_reg_info_32[reg].name; 1533 } 1534 1535 /* 1536 * Returns the set of feature flags that are supported and migratable by 1537 * QEMU, for a given FeatureWord. 1538 */ 1539 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) 1540 { 1541 FeatureWordInfo *wi = &feature_word_info[w]; 1542 uint64_t r = 0; 1543 int i; 1544 1545 for (i = 0; i < 64; i++) { 1546 uint64_t f = 1ULL << i; 1547 1548 /* If the feature name is known, it is implicitly considered migratable, 1549 * unless it is explicitly set in unmigratable_flags */ 1550 if ((wi->migratable_flags & f) || 1551 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1552 r |= f; 1553 } 1554 } 1555 return r; 1556 } 1557 1558 void host_cpuid(uint32_t function, uint32_t count, 1559 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1560 { 1561 uint32_t vec[4]; 1562 1563 #ifdef __x86_64__ 1564 asm volatile("cpuid" 1565 : "=a"(vec[0]), "=b"(vec[1]), 1566 "=c"(vec[2]), "=d"(vec[3]) 1567 : "0"(function), "c"(count) : "cc"); 1568 #elif defined(__i386__) 1569 asm volatile("pusha \n\t" 1570 "cpuid \n\t" 1571 "mov %%eax, 0(%2) \n\t" 1572 "mov %%ebx, 4(%2) \n\t" 1573 "mov %%ecx, 8(%2) \n\t" 1574 "mov %%edx, 12(%2) \n\t" 1575 "popa" 1576 : : "a"(function), "c"(count), "S"(vec) 1577 : "memory", "cc"); 1578 #else 1579 abort(); 1580 #endif 1581 1582 if (eax) 1583 *eax = vec[0]; 1584 if (ebx) 1585 *ebx = vec[1]; 1586 if (ecx) 1587 *ecx = vec[2]; 1588 if (edx) 1589 *edx = vec[3]; 1590 } 1591 1592 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1593 { 1594 uint32_t eax, ebx, ecx, edx; 1595 1596 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1597 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1598 1599 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1600 if (family) { 1601 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1602 } 1603 if (model) { 1604 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1605 } 1606 if (stepping) { 1607 *stepping = eax & 0x0F; 1608 } 1609 } 1610 1611 /* CPU class name definitions: */ 1612 1613 /* Return type name for a given CPU model name 1614 * Caller is responsible for freeing the returned string. 1615 */ 1616 static char *x86_cpu_type_name(const char *model_name) 1617 { 1618 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1619 } 1620 1621 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1622 { 1623 g_autofree char *typename = x86_cpu_type_name(cpu_model); 1624 return object_class_by_name(typename); 1625 } 1626 1627 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1628 { 1629 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1630 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1631 return g_strndup(class_name, 1632 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1633 } 1634 1635 typedef struct PropValue { 1636 const char *prop, *value; 1637 } PropValue; 1638 1639 typedef struct X86CPUVersionDefinition { 1640 X86CPUVersion version; 1641 const char *alias; 1642 const char *note; 1643 PropValue *props; 1644 } X86CPUVersionDefinition; 1645 1646 /* Base definition for a CPU model */ 1647 typedef struct X86CPUDefinition { 1648 const char *name; 1649 uint32_t level; 1650 uint32_t xlevel; 1651 /* vendor is zero-terminated, 12 character ASCII string */ 1652 char vendor[CPUID_VENDOR_SZ + 1]; 1653 int family; 1654 int model; 1655 int stepping; 1656 FeatureWordArray features; 1657 const char *model_id; 1658 CPUCaches *cache_info; 1659 /* 1660 * Definitions for alternative versions of CPU model. 1661 * List is terminated by item with version == 0. 1662 * If NULL, version 1 will be registered automatically. 1663 */ 1664 const X86CPUVersionDefinition *versions; 1665 const char *deprecation_note; 1666 } X86CPUDefinition; 1667 1668 /* Reference to a specific CPU model version */ 1669 struct X86CPUModel { 1670 /* Base CPU definition */ 1671 X86CPUDefinition *cpudef; 1672 /* CPU model version */ 1673 X86CPUVersion version; 1674 const char *note; 1675 /* 1676 * If true, this is an alias CPU model. 1677 * This matters only for "-cpu help" and query-cpu-definitions 1678 */ 1679 bool is_alias; 1680 }; 1681 1682 /* Get full model name for CPU version */ 1683 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, 1684 X86CPUVersion version) 1685 { 1686 assert(version > 0); 1687 return g_strdup_printf("%s-v%d", cpudef->name, (int)version); 1688 } 1689 1690 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) 1691 { 1692 /* When X86CPUDefinition::versions is NULL, we register only v1 */ 1693 static const X86CPUVersionDefinition default_version_list[] = { 1694 { 1 }, 1695 { /* end of list */ } 1696 }; 1697 1698 return def->versions ?: default_version_list; 1699 } 1700 1701 static CPUCaches epyc_cache_info = { 1702 .l1d_cache = &(CPUCacheInfo) { 1703 .type = DATA_CACHE, 1704 .level = 1, 1705 .size = 32 * KiB, 1706 .line_size = 64, 1707 .associativity = 8, 1708 .partitions = 1, 1709 .sets = 64, 1710 .lines_per_tag = 1, 1711 .self_init = 1, 1712 .no_invd_sharing = true, 1713 }, 1714 .l1i_cache = &(CPUCacheInfo) { 1715 .type = INSTRUCTION_CACHE, 1716 .level = 1, 1717 .size = 64 * KiB, 1718 .line_size = 64, 1719 .associativity = 4, 1720 .partitions = 1, 1721 .sets = 256, 1722 .lines_per_tag = 1, 1723 .self_init = 1, 1724 .no_invd_sharing = true, 1725 }, 1726 .l2_cache = &(CPUCacheInfo) { 1727 .type = UNIFIED_CACHE, 1728 .level = 2, 1729 .size = 512 * KiB, 1730 .line_size = 64, 1731 .associativity = 8, 1732 .partitions = 1, 1733 .sets = 1024, 1734 .lines_per_tag = 1, 1735 }, 1736 .l3_cache = &(CPUCacheInfo) { 1737 .type = UNIFIED_CACHE, 1738 .level = 3, 1739 .size = 8 * MiB, 1740 .line_size = 64, 1741 .associativity = 16, 1742 .partitions = 1, 1743 .sets = 8192, 1744 .lines_per_tag = 1, 1745 .self_init = true, 1746 .inclusive = true, 1747 .complex_indexing = true, 1748 }, 1749 }; 1750 1751 static CPUCaches epyc_rome_cache_info = { 1752 .l1d_cache = &(CPUCacheInfo) { 1753 .type = DATA_CACHE, 1754 .level = 1, 1755 .size = 32 * KiB, 1756 .line_size = 64, 1757 .associativity = 8, 1758 .partitions = 1, 1759 .sets = 64, 1760 .lines_per_tag = 1, 1761 .self_init = 1, 1762 .no_invd_sharing = true, 1763 }, 1764 .l1i_cache = &(CPUCacheInfo) { 1765 .type = INSTRUCTION_CACHE, 1766 .level = 1, 1767 .size = 32 * KiB, 1768 .line_size = 64, 1769 .associativity = 8, 1770 .partitions = 1, 1771 .sets = 64, 1772 .lines_per_tag = 1, 1773 .self_init = 1, 1774 .no_invd_sharing = true, 1775 }, 1776 .l2_cache = &(CPUCacheInfo) { 1777 .type = UNIFIED_CACHE, 1778 .level = 2, 1779 .size = 512 * KiB, 1780 .line_size = 64, 1781 .associativity = 8, 1782 .partitions = 1, 1783 .sets = 1024, 1784 .lines_per_tag = 1, 1785 }, 1786 .l3_cache = &(CPUCacheInfo) { 1787 .type = UNIFIED_CACHE, 1788 .level = 3, 1789 .size = 16 * MiB, 1790 .line_size = 64, 1791 .associativity = 16, 1792 .partitions = 1, 1793 .sets = 16384, 1794 .lines_per_tag = 1, 1795 .self_init = true, 1796 .inclusive = true, 1797 .complex_indexing = true, 1798 }, 1799 }; 1800 1801 /* The following VMX features are not supported by KVM and are left out in the 1802 * CPU definitions: 1803 * 1804 * Dual-monitor support (all processors) 1805 * Entry to SMM 1806 * Deactivate dual-monitor treatment 1807 * Number of CR3-target values 1808 * Shutdown activity state 1809 * Wait-for-SIPI activity state 1810 * PAUSE-loop exiting (Westmere and newer) 1811 * EPT-violation #VE (Broadwell and newer) 1812 * Inject event with insn length=0 (Skylake and newer) 1813 * Conceal non-root operation from PT 1814 * Conceal VM exits from PT 1815 * Conceal VM entries from PT 1816 * Enable ENCLS exiting 1817 * Mode-based execute control (XS/XU) 1818 s TSC scaling (Skylake Server and newer) 1819 * GPA translation for PT (IceLake and newer) 1820 * User wait and pause 1821 * ENCLV exiting 1822 * Load IA32_RTIT_CTL 1823 * Clear IA32_RTIT_CTL 1824 * Advanced VM-exit information for EPT violations 1825 * Sub-page write permissions 1826 * PT in VMX operation 1827 */ 1828 1829 static X86CPUDefinition builtin_x86_defs[] = { 1830 { 1831 .name = "qemu64", 1832 .level = 0xd, 1833 .vendor = CPUID_VENDOR_AMD, 1834 .family = 6, 1835 .model = 6, 1836 .stepping = 3, 1837 .features[FEAT_1_EDX] = 1838 PPRO_FEATURES | 1839 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1840 CPUID_PSE36, 1841 .features[FEAT_1_ECX] = 1842 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1843 .features[FEAT_8000_0001_EDX] = 1844 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1845 .features[FEAT_8000_0001_ECX] = 1846 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1847 .xlevel = 0x8000000A, 1848 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1849 }, 1850 { 1851 .name = "phenom", 1852 .level = 5, 1853 .vendor = CPUID_VENDOR_AMD, 1854 .family = 16, 1855 .model = 2, 1856 .stepping = 3, 1857 /* Missing: CPUID_HT */ 1858 .features[FEAT_1_EDX] = 1859 PPRO_FEATURES | 1860 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1861 CPUID_PSE36 | CPUID_VME, 1862 .features[FEAT_1_ECX] = 1863 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1864 CPUID_EXT_POPCNT, 1865 .features[FEAT_8000_0001_EDX] = 1866 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1867 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1868 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1869 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1870 CPUID_EXT3_CR8LEG, 1871 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1872 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1873 .features[FEAT_8000_0001_ECX] = 1874 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1875 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1876 /* Missing: CPUID_SVM_LBRV */ 1877 .features[FEAT_SVM] = 1878 CPUID_SVM_NPT, 1879 .xlevel = 0x8000001A, 1880 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1881 }, 1882 { 1883 .name = "core2duo", 1884 .level = 10, 1885 .vendor = CPUID_VENDOR_INTEL, 1886 .family = 6, 1887 .model = 15, 1888 .stepping = 11, 1889 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1890 .features[FEAT_1_EDX] = 1891 PPRO_FEATURES | 1892 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1893 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1894 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1895 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1896 .features[FEAT_1_ECX] = 1897 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1898 CPUID_EXT_CX16, 1899 .features[FEAT_8000_0001_EDX] = 1900 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1901 .features[FEAT_8000_0001_ECX] = 1902 CPUID_EXT3_LAHF_LM, 1903 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 1904 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1905 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1906 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1907 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1908 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 1909 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1910 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1911 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1912 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1913 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1914 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1915 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1916 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 1917 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 1918 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 1919 .features[FEAT_VMX_SECONDARY_CTLS] = 1920 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 1921 .xlevel = 0x80000008, 1922 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1923 }, 1924 { 1925 .name = "kvm64", 1926 .level = 0xd, 1927 .vendor = CPUID_VENDOR_INTEL, 1928 .family = 15, 1929 .model = 6, 1930 .stepping = 1, 1931 /* Missing: CPUID_HT */ 1932 .features[FEAT_1_EDX] = 1933 PPRO_FEATURES | CPUID_VME | 1934 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1935 CPUID_PSE36, 1936 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1937 .features[FEAT_1_ECX] = 1938 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1939 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1940 .features[FEAT_8000_0001_EDX] = 1941 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1942 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1943 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1944 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1945 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1946 .features[FEAT_8000_0001_ECX] = 1947 0, 1948 /* VMX features from Cedar Mill/Prescott */ 1949 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1950 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1951 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1952 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1953 VMX_PIN_BASED_NMI_EXITING, 1954 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1955 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1956 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1957 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1958 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1959 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1960 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1961 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING, 1962 .xlevel = 0x80000008, 1963 .model_id = "Common KVM processor" 1964 }, 1965 { 1966 .name = "qemu32", 1967 .level = 4, 1968 .vendor = CPUID_VENDOR_INTEL, 1969 .family = 6, 1970 .model = 6, 1971 .stepping = 3, 1972 .features[FEAT_1_EDX] = 1973 PPRO_FEATURES, 1974 .features[FEAT_1_ECX] = 1975 CPUID_EXT_SSE3, 1976 .xlevel = 0x80000004, 1977 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1978 }, 1979 { 1980 .name = "kvm32", 1981 .level = 5, 1982 .vendor = CPUID_VENDOR_INTEL, 1983 .family = 15, 1984 .model = 6, 1985 .stepping = 1, 1986 .features[FEAT_1_EDX] = 1987 PPRO_FEATURES | CPUID_VME | 1988 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1989 .features[FEAT_1_ECX] = 1990 CPUID_EXT_SSE3, 1991 .features[FEAT_8000_0001_ECX] = 1992 0, 1993 /* VMX features from Yonah */ 1994 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1995 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1996 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1997 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1998 VMX_PIN_BASED_NMI_EXITING, 1999 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2000 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2001 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2002 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2003 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2004 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2005 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2006 .xlevel = 0x80000008, 2007 .model_id = "Common 32-bit KVM processor" 2008 }, 2009 { 2010 .name = "coreduo", 2011 .level = 10, 2012 .vendor = CPUID_VENDOR_INTEL, 2013 .family = 6, 2014 .model = 14, 2015 .stepping = 8, 2016 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2017 .features[FEAT_1_EDX] = 2018 PPRO_FEATURES | CPUID_VME | 2019 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 2020 CPUID_SS, 2021 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 2022 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 2023 .features[FEAT_1_ECX] = 2024 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 2025 .features[FEAT_8000_0001_EDX] = 2026 CPUID_EXT2_NX, 2027 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2028 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2029 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2030 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2031 VMX_PIN_BASED_NMI_EXITING, 2032 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2033 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2034 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2035 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2036 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2037 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2038 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2039 .xlevel = 0x80000008, 2040 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 2041 }, 2042 { 2043 .name = "486", 2044 .level = 1, 2045 .vendor = CPUID_VENDOR_INTEL, 2046 .family = 4, 2047 .model = 8, 2048 .stepping = 0, 2049 .features[FEAT_1_EDX] = 2050 I486_FEATURES, 2051 .xlevel = 0, 2052 .model_id = "", 2053 }, 2054 { 2055 .name = "pentium", 2056 .level = 1, 2057 .vendor = CPUID_VENDOR_INTEL, 2058 .family = 5, 2059 .model = 4, 2060 .stepping = 3, 2061 .features[FEAT_1_EDX] = 2062 PENTIUM_FEATURES, 2063 .xlevel = 0, 2064 .model_id = "", 2065 }, 2066 { 2067 .name = "pentium2", 2068 .level = 2, 2069 .vendor = CPUID_VENDOR_INTEL, 2070 .family = 6, 2071 .model = 5, 2072 .stepping = 2, 2073 .features[FEAT_1_EDX] = 2074 PENTIUM2_FEATURES, 2075 .xlevel = 0, 2076 .model_id = "", 2077 }, 2078 { 2079 .name = "pentium3", 2080 .level = 3, 2081 .vendor = CPUID_VENDOR_INTEL, 2082 .family = 6, 2083 .model = 7, 2084 .stepping = 3, 2085 .features[FEAT_1_EDX] = 2086 PENTIUM3_FEATURES, 2087 .xlevel = 0, 2088 .model_id = "", 2089 }, 2090 { 2091 .name = "athlon", 2092 .level = 2, 2093 .vendor = CPUID_VENDOR_AMD, 2094 .family = 6, 2095 .model = 2, 2096 .stepping = 3, 2097 .features[FEAT_1_EDX] = 2098 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 2099 CPUID_MCA, 2100 .features[FEAT_8000_0001_EDX] = 2101 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 2102 .xlevel = 0x80000008, 2103 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 2104 }, 2105 { 2106 .name = "n270", 2107 .level = 10, 2108 .vendor = CPUID_VENDOR_INTEL, 2109 .family = 6, 2110 .model = 28, 2111 .stepping = 2, 2112 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2113 .features[FEAT_1_EDX] = 2114 PPRO_FEATURES | 2115 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 2116 CPUID_ACPI | CPUID_SS, 2117 /* Some CPUs got no CPUID_SEP */ 2118 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 2119 * CPUID_EXT_XTPR */ 2120 .features[FEAT_1_ECX] = 2121 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 2122 CPUID_EXT_MOVBE, 2123 .features[FEAT_8000_0001_EDX] = 2124 CPUID_EXT2_NX, 2125 .features[FEAT_8000_0001_ECX] = 2126 CPUID_EXT3_LAHF_LM, 2127 .xlevel = 0x80000008, 2128 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 2129 }, 2130 { 2131 .name = "Conroe", 2132 .level = 10, 2133 .vendor = CPUID_VENDOR_INTEL, 2134 .family = 6, 2135 .model = 15, 2136 .stepping = 3, 2137 .features[FEAT_1_EDX] = 2138 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2139 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2140 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2141 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2142 CPUID_DE | CPUID_FP87, 2143 .features[FEAT_1_ECX] = 2144 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2145 .features[FEAT_8000_0001_EDX] = 2146 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2147 .features[FEAT_8000_0001_ECX] = 2148 CPUID_EXT3_LAHF_LM, 2149 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2150 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2151 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2152 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2153 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2154 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2155 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2156 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2157 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2158 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2159 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2160 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2161 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2162 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2163 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2164 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2165 .features[FEAT_VMX_SECONDARY_CTLS] = 2166 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 2167 .xlevel = 0x80000008, 2168 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 2169 }, 2170 { 2171 .name = "Penryn", 2172 .level = 10, 2173 .vendor = CPUID_VENDOR_INTEL, 2174 .family = 6, 2175 .model = 23, 2176 .stepping = 3, 2177 .features[FEAT_1_EDX] = 2178 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2179 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2180 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2181 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2182 CPUID_DE | CPUID_FP87, 2183 .features[FEAT_1_ECX] = 2184 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2185 CPUID_EXT_SSE3, 2186 .features[FEAT_8000_0001_EDX] = 2187 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2188 .features[FEAT_8000_0001_ECX] = 2189 CPUID_EXT3_LAHF_LM, 2190 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2191 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2192 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 2193 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | 2194 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 2195 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2196 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2197 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2198 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2199 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2200 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2201 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2202 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2203 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2204 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2205 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2206 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2207 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2208 .features[FEAT_VMX_SECONDARY_CTLS] = 2209 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2210 VMX_SECONDARY_EXEC_WBINVD_EXITING, 2211 .xlevel = 0x80000008, 2212 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 2213 }, 2214 { 2215 .name = "Nehalem", 2216 .level = 11, 2217 .vendor = CPUID_VENDOR_INTEL, 2218 .family = 6, 2219 .model = 26, 2220 .stepping = 3, 2221 .features[FEAT_1_EDX] = 2222 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2223 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2224 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2225 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2226 CPUID_DE | CPUID_FP87, 2227 .features[FEAT_1_ECX] = 2228 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2229 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2230 .features[FEAT_8000_0001_EDX] = 2231 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2232 .features[FEAT_8000_0001_ECX] = 2233 CPUID_EXT3_LAHF_LM, 2234 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2235 MSR_VMX_BASIC_TRUE_CTLS, 2236 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2237 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2238 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2239 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2240 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2241 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2242 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2243 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2244 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2245 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2246 .features[FEAT_VMX_EXIT_CTLS] = 2247 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2248 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2249 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2250 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2251 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2252 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2253 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2254 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2255 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2256 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2257 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2258 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2259 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2260 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2261 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2262 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2263 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2264 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2265 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2266 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2267 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2268 .features[FEAT_VMX_SECONDARY_CTLS] = 2269 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2270 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2271 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2272 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2273 VMX_SECONDARY_EXEC_ENABLE_VPID, 2274 .xlevel = 0x80000008, 2275 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 2276 .versions = (X86CPUVersionDefinition[]) { 2277 { .version = 1 }, 2278 { 2279 .version = 2, 2280 .alias = "Nehalem-IBRS", 2281 .props = (PropValue[]) { 2282 { "spec-ctrl", "on" }, 2283 { "model-id", 2284 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, 2285 { /* end of list */ } 2286 } 2287 }, 2288 { /* end of list */ } 2289 } 2290 }, 2291 { 2292 .name = "Westmere", 2293 .level = 11, 2294 .vendor = CPUID_VENDOR_INTEL, 2295 .family = 6, 2296 .model = 44, 2297 .stepping = 1, 2298 .features[FEAT_1_EDX] = 2299 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2300 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2301 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2302 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2303 CPUID_DE | CPUID_FP87, 2304 .features[FEAT_1_ECX] = 2305 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2306 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2307 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2308 .features[FEAT_8000_0001_EDX] = 2309 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2310 .features[FEAT_8000_0001_ECX] = 2311 CPUID_EXT3_LAHF_LM, 2312 .features[FEAT_6_EAX] = 2313 CPUID_6_EAX_ARAT, 2314 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2315 MSR_VMX_BASIC_TRUE_CTLS, 2316 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2317 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2318 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2319 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2320 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2321 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2322 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2323 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2324 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2325 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2326 .features[FEAT_VMX_EXIT_CTLS] = 2327 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2328 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2329 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2330 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2331 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2332 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2333 MSR_VMX_MISC_STORE_LMA, 2334 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2335 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2336 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2337 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2338 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2339 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2340 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2341 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2342 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2343 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2344 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2345 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2346 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2347 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2348 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2349 .features[FEAT_VMX_SECONDARY_CTLS] = 2350 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2351 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2352 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2353 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2354 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2355 .xlevel = 0x80000008, 2356 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 2357 .versions = (X86CPUVersionDefinition[]) { 2358 { .version = 1 }, 2359 { 2360 .version = 2, 2361 .alias = "Westmere-IBRS", 2362 .props = (PropValue[]) { 2363 { "spec-ctrl", "on" }, 2364 { "model-id", 2365 "Westmere E56xx/L56xx/X56xx (IBRS update)" }, 2366 { /* end of list */ } 2367 } 2368 }, 2369 { /* end of list */ } 2370 } 2371 }, 2372 { 2373 .name = "SandyBridge", 2374 .level = 0xd, 2375 .vendor = CPUID_VENDOR_INTEL, 2376 .family = 6, 2377 .model = 42, 2378 .stepping = 1, 2379 .features[FEAT_1_EDX] = 2380 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2381 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2382 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2383 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2384 CPUID_DE | CPUID_FP87, 2385 .features[FEAT_1_ECX] = 2386 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2387 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2388 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2389 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2390 CPUID_EXT_SSE3, 2391 .features[FEAT_8000_0001_EDX] = 2392 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2393 CPUID_EXT2_SYSCALL, 2394 .features[FEAT_8000_0001_ECX] = 2395 CPUID_EXT3_LAHF_LM, 2396 .features[FEAT_XSAVE] = 2397 CPUID_XSAVE_XSAVEOPT, 2398 .features[FEAT_6_EAX] = 2399 CPUID_6_EAX_ARAT, 2400 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2401 MSR_VMX_BASIC_TRUE_CTLS, 2402 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2403 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2404 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2405 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2406 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2407 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2408 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2409 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2410 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2411 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2412 .features[FEAT_VMX_EXIT_CTLS] = 2413 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2414 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2415 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2416 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2417 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2418 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2419 MSR_VMX_MISC_STORE_LMA, 2420 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2421 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2422 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2423 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2424 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2425 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2426 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2427 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2428 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2429 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2430 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2431 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2432 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2433 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2434 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2435 .features[FEAT_VMX_SECONDARY_CTLS] = 2436 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2437 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2438 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2439 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2440 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2441 .xlevel = 0x80000008, 2442 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 2443 .versions = (X86CPUVersionDefinition[]) { 2444 { .version = 1 }, 2445 { 2446 .version = 2, 2447 .alias = "SandyBridge-IBRS", 2448 .props = (PropValue[]) { 2449 { "spec-ctrl", "on" }, 2450 { "model-id", 2451 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, 2452 { /* end of list */ } 2453 } 2454 }, 2455 { /* end of list */ } 2456 } 2457 }, 2458 { 2459 .name = "IvyBridge", 2460 .level = 0xd, 2461 .vendor = CPUID_VENDOR_INTEL, 2462 .family = 6, 2463 .model = 58, 2464 .stepping = 9, 2465 .features[FEAT_1_EDX] = 2466 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2467 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2468 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2469 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2470 CPUID_DE | CPUID_FP87, 2471 .features[FEAT_1_ECX] = 2472 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2473 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2474 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2475 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2476 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2477 .features[FEAT_7_0_EBX] = 2478 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 2479 CPUID_7_0_EBX_ERMS, 2480 .features[FEAT_8000_0001_EDX] = 2481 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2482 CPUID_EXT2_SYSCALL, 2483 .features[FEAT_8000_0001_ECX] = 2484 CPUID_EXT3_LAHF_LM, 2485 .features[FEAT_XSAVE] = 2486 CPUID_XSAVE_XSAVEOPT, 2487 .features[FEAT_6_EAX] = 2488 CPUID_6_EAX_ARAT, 2489 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2490 MSR_VMX_BASIC_TRUE_CTLS, 2491 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2492 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2493 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2494 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2495 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2496 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2497 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2498 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2499 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2500 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2501 .features[FEAT_VMX_EXIT_CTLS] = 2502 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2503 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2504 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2505 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2506 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2507 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2508 MSR_VMX_MISC_STORE_LMA, 2509 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2510 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2511 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2512 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2513 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2514 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2515 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2516 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2517 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2518 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2519 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2520 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2521 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2522 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2523 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2524 .features[FEAT_VMX_SECONDARY_CTLS] = 2525 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2526 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2527 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2528 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2529 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2530 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2531 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2532 VMX_SECONDARY_EXEC_RDRAND_EXITING, 2533 .xlevel = 0x80000008, 2534 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 2535 .versions = (X86CPUVersionDefinition[]) { 2536 { .version = 1 }, 2537 { 2538 .version = 2, 2539 .alias = "IvyBridge-IBRS", 2540 .props = (PropValue[]) { 2541 { "spec-ctrl", "on" }, 2542 { "model-id", 2543 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, 2544 { /* end of list */ } 2545 } 2546 }, 2547 { /* end of list */ } 2548 } 2549 }, 2550 { 2551 .name = "Haswell", 2552 .level = 0xd, 2553 .vendor = CPUID_VENDOR_INTEL, 2554 .family = 6, 2555 .model = 60, 2556 .stepping = 4, 2557 .features[FEAT_1_EDX] = 2558 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2559 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2560 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2561 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2562 CPUID_DE | CPUID_FP87, 2563 .features[FEAT_1_ECX] = 2564 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2565 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2566 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2567 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2568 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2569 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2570 .features[FEAT_8000_0001_EDX] = 2571 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2572 CPUID_EXT2_SYSCALL, 2573 .features[FEAT_8000_0001_ECX] = 2574 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2575 .features[FEAT_7_0_EBX] = 2576 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2577 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2578 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2579 CPUID_7_0_EBX_RTM, 2580 .features[FEAT_XSAVE] = 2581 CPUID_XSAVE_XSAVEOPT, 2582 .features[FEAT_6_EAX] = 2583 CPUID_6_EAX_ARAT, 2584 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2585 MSR_VMX_BASIC_TRUE_CTLS, 2586 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2587 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2588 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2589 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2590 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2591 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2592 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2593 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2594 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2595 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2596 .features[FEAT_VMX_EXIT_CTLS] = 2597 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2598 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2599 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2600 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2601 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2602 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2603 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2604 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2605 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2606 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2607 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2608 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2609 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2610 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2611 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2612 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2613 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2614 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2615 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2616 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2617 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2618 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2619 .features[FEAT_VMX_SECONDARY_CTLS] = 2620 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2621 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2622 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2623 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2624 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2625 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2626 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2627 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2628 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 2629 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2630 .xlevel = 0x80000008, 2631 .model_id = "Intel Core Processor (Haswell)", 2632 .versions = (X86CPUVersionDefinition[]) { 2633 { .version = 1 }, 2634 { 2635 .version = 2, 2636 .alias = "Haswell-noTSX", 2637 .props = (PropValue[]) { 2638 { "hle", "off" }, 2639 { "rtm", "off" }, 2640 { "stepping", "1" }, 2641 { "model-id", "Intel Core Processor (Haswell, no TSX)", }, 2642 { /* end of list */ } 2643 }, 2644 }, 2645 { 2646 .version = 3, 2647 .alias = "Haswell-IBRS", 2648 .props = (PropValue[]) { 2649 /* Restore TSX features removed by -v2 above */ 2650 { "hle", "on" }, 2651 { "rtm", "on" }, 2652 /* 2653 * Haswell and Haswell-IBRS had stepping=4 in 2654 * QEMU 4.0 and older 2655 */ 2656 { "stepping", "4" }, 2657 { "spec-ctrl", "on" }, 2658 { "model-id", 2659 "Intel Core Processor (Haswell, IBRS)" }, 2660 { /* end of list */ } 2661 } 2662 }, 2663 { 2664 .version = 4, 2665 .alias = "Haswell-noTSX-IBRS", 2666 .props = (PropValue[]) { 2667 { "hle", "off" }, 2668 { "rtm", "off" }, 2669 /* spec-ctrl was already enabled by -v3 above */ 2670 { "stepping", "1" }, 2671 { "model-id", 2672 "Intel Core Processor (Haswell, no TSX, IBRS)" }, 2673 { /* end of list */ } 2674 } 2675 }, 2676 { /* end of list */ } 2677 } 2678 }, 2679 { 2680 .name = "Broadwell", 2681 .level = 0xd, 2682 .vendor = CPUID_VENDOR_INTEL, 2683 .family = 6, 2684 .model = 61, 2685 .stepping = 2, 2686 .features[FEAT_1_EDX] = 2687 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2688 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2689 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2690 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2691 CPUID_DE | CPUID_FP87, 2692 .features[FEAT_1_ECX] = 2693 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2694 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2695 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2696 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2697 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2698 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2699 .features[FEAT_8000_0001_EDX] = 2700 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2701 CPUID_EXT2_SYSCALL, 2702 .features[FEAT_8000_0001_ECX] = 2703 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2704 .features[FEAT_7_0_EBX] = 2705 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2706 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2707 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2708 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2709 CPUID_7_0_EBX_SMAP, 2710 .features[FEAT_XSAVE] = 2711 CPUID_XSAVE_XSAVEOPT, 2712 .features[FEAT_6_EAX] = 2713 CPUID_6_EAX_ARAT, 2714 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2715 MSR_VMX_BASIC_TRUE_CTLS, 2716 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2717 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2718 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2719 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2720 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2721 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2722 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2723 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2724 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2725 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2726 .features[FEAT_VMX_EXIT_CTLS] = 2727 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2728 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2729 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2730 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2731 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2732 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2733 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2734 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2735 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2736 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2737 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2738 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2739 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2740 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2741 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2742 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2743 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2744 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2745 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2746 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2747 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2748 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2749 .features[FEAT_VMX_SECONDARY_CTLS] = 2750 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2751 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2752 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2753 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2754 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2755 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2756 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2757 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2758 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2759 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2760 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2761 .xlevel = 0x80000008, 2762 .model_id = "Intel Core Processor (Broadwell)", 2763 .versions = (X86CPUVersionDefinition[]) { 2764 { .version = 1 }, 2765 { 2766 .version = 2, 2767 .alias = "Broadwell-noTSX", 2768 .props = (PropValue[]) { 2769 { "hle", "off" }, 2770 { "rtm", "off" }, 2771 { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, 2772 { /* end of list */ } 2773 }, 2774 }, 2775 { 2776 .version = 3, 2777 .alias = "Broadwell-IBRS", 2778 .props = (PropValue[]) { 2779 /* Restore TSX features removed by -v2 above */ 2780 { "hle", "on" }, 2781 { "rtm", "on" }, 2782 { "spec-ctrl", "on" }, 2783 { "model-id", 2784 "Intel Core Processor (Broadwell, IBRS)" }, 2785 { /* end of list */ } 2786 } 2787 }, 2788 { 2789 .version = 4, 2790 .alias = "Broadwell-noTSX-IBRS", 2791 .props = (PropValue[]) { 2792 { "hle", "off" }, 2793 { "rtm", "off" }, 2794 /* spec-ctrl was already enabled by -v3 above */ 2795 { "model-id", 2796 "Intel Core Processor (Broadwell, no TSX, IBRS)" }, 2797 { /* end of list */ } 2798 } 2799 }, 2800 { /* end of list */ } 2801 } 2802 }, 2803 { 2804 .name = "Skylake-Client", 2805 .level = 0xd, 2806 .vendor = CPUID_VENDOR_INTEL, 2807 .family = 6, 2808 .model = 94, 2809 .stepping = 3, 2810 .features[FEAT_1_EDX] = 2811 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2812 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2813 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2814 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2815 CPUID_DE | CPUID_FP87, 2816 .features[FEAT_1_ECX] = 2817 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2818 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2819 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2820 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2821 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2822 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2823 .features[FEAT_8000_0001_EDX] = 2824 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2825 CPUID_EXT2_SYSCALL, 2826 .features[FEAT_8000_0001_ECX] = 2827 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2828 .features[FEAT_7_0_EBX] = 2829 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2830 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2831 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2832 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2833 CPUID_7_0_EBX_SMAP, 2834 /* Missing: XSAVES (not supported by some Linux versions, 2835 * including v4.1 to v4.12). 2836 * KVM doesn't yet expose any XSAVES state save component, 2837 * and the only one defined in Skylake (processor tracing) 2838 * probably will block migration anyway. 2839 */ 2840 .features[FEAT_XSAVE] = 2841 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2842 CPUID_XSAVE_XGETBV1, 2843 .features[FEAT_6_EAX] = 2844 CPUID_6_EAX_ARAT, 2845 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2846 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2847 MSR_VMX_BASIC_TRUE_CTLS, 2848 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2849 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2850 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2851 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2852 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2853 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2854 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2855 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2856 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2857 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2858 .features[FEAT_VMX_EXIT_CTLS] = 2859 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2860 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2861 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2862 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2863 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2864 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2865 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2866 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2867 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2868 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2869 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2870 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2871 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2872 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2873 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2874 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2875 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2876 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2877 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2878 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2879 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2880 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2881 .features[FEAT_VMX_SECONDARY_CTLS] = 2882 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2883 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2884 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2885 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2886 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2887 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2888 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2889 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2890 .xlevel = 0x80000008, 2891 .model_id = "Intel Core Processor (Skylake)", 2892 .versions = (X86CPUVersionDefinition[]) { 2893 { .version = 1 }, 2894 { 2895 .version = 2, 2896 .alias = "Skylake-Client-IBRS", 2897 .props = (PropValue[]) { 2898 { "spec-ctrl", "on" }, 2899 { "model-id", 2900 "Intel Core Processor (Skylake, IBRS)" }, 2901 { /* end of list */ } 2902 } 2903 }, 2904 { 2905 .version = 3, 2906 .alias = "Skylake-Client-noTSX-IBRS", 2907 .props = (PropValue[]) { 2908 { "hle", "off" }, 2909 { "rtm", "off" }, 2910 { "model-id", 2911 "Intel Core Processor (Skylake, IBRS, no TSX)" }, 2912 { /* end of list */ } 2913 } 2914 }, 2915 { /* end of list */ } 2916 } 2917 }, 2918 { 2919 .name = "Skylake-Server", 2920 .level = 0xd, 2921 .vendor = CPUID_VENDOR_INTEL, 2922 .family = 6, 2923 .model = 85, 2924 .stepping = 4, 2925 .features[FEAT_1_EDX] = 2926 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2927 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2928 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2929 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2930 CPUID_DE | CPUID_FP87, 2931 .features[FEAT_1_ECX] = 2932 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2933 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2934 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2935 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2936 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2937 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2938 .features[FEAT_8000_0001_EDX] = 2939 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2940 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2941 .features[FEAT_8000_0001_ECX] = 2942 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2943 .features[FEAT_7_0_EBX] = 2944 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2945 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2946 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2947 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2948 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2949 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2950 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2951 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2952 .features[FEAT_7_0_ECX] = 2953 CPUID_7_0_ECX_PKU, 2954 /* Missing: XSAVES (not supported by some Linux versions, 2955 * including v4.1 to v4.12). 2956 * KVM doesn't yet expose any XSAVES state save component, 2957 * and the only one defined in Skylake (processor tracing) 2958 * probably will block migration anyway. 2959 */ 2960 .features[FEAT_XSAVE] = 2961 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2962 CPUID_XSAVE_XGETBV1, 2963 .features[FEAT_6_EAX] = 2964 CPUID_6_EAX_ARAT, 2965 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2966 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2967 MSR_VMX_BASIC_TRUE_CTLS, 2968 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2969 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2970 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2971 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2972 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2973 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2974 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2975 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2976 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2977 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2978 .features[FEAT_VMX_EXIT_CTLS] = 2979 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2980 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2981 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2982 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2983 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2984 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2985 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2986 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2987 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2988 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2989 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2990 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2991 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2992 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2993 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2994 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2995 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2996 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2997 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2998 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2999 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3000 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3001 .features[FEAT_VMX_SECONDARY_CTLS] = 3002 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3003 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3004 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3005 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3006 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3007 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3008 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3009 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3010 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3011 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3012 .xlevel = 0x80000008, 3013 .model_id = "Intel Xeon Processor (Skylake)", 3014 .versions = (X86CPUVersionDefinition[]) { 3015 { .version = 1 }, 3016 { 3017 .version = 2, 3018 .alias = "Skylake-Server-IBRS", 3019 .props = (PropValue[]) { 3020 /* clflushopt was not added to Skylake-Server-IBRS */ 3021 /* TODO: add -v3 including clflushopt */ 3022 { "clflushopt", "off" }, 3023 { "spec-ctrl", "on" }, 3024 { "model-id", 3025 "Intel Xeon Processor (Skylake, IBRS)" }, 3026 { /* end of list */ } 3027 } 3028 }, 3029 { 3030 .version = 3, 3031 .alias = "Skylake-Server-noTSX-IBRS", 3032 .props = (PropValue[]) { 3033 { "hle", "off" }, 3034 { "rtm", "off" }, 3035 { "model-id", 3036 "Intel Xeon Processor (Skylake, IBRS, no TSX)" }, 3037 { /* end of list */ } 3038 } 3039 }, 3040 { 3041 .version = 4, 3042 .props = (PropValue[]) { 3043 { "vmx-eptp-switching", "on" }, 3044 { /* end of list */ } 3045 } 3046 }, 3047 { /* end of list */ } 3048 } 3049 }, 3050 { 3051 .name = "Cascadelake-Server", 3052 .level = 0xd, 3053 .vendor = CPUID_VENDOR_INTEL, 3054 .family = 6, 3055 .model = 85, 3056 .stepping = 6, 3057 .features[FEAT_1_EDX] = 3058 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3059 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3060 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3061 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3062 CPUID_DE | CPUID_FP87, 3063 .features[FEAT_1_ECX] = 3064 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3065 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3066 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3067 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3068 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3069 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3070 .features[FEAT_8000_0001_EDX] = 3071 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3072 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3073 .features[FEAT_8000_0001_ECX] = 3074 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3075 .features[FEAT_7_0_EBX] = 3076 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3077 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3078 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3079 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3080 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3081 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3082 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3083 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3084 .features[FEAT_7_0_ECX] = 3085 CPUID_7_0_ECX_PKU | 3086 CPUID_7_0_ECX_AVX512VNNI, 3087 .features[FEAT_7_0_EDX] = 3088 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3089 /* Missing: XSAVES (not supported by some Linux versions, 3090 * including v4.1 to v4.12). 3091 * KVM doesn't yet expose any XSAVES state save component, 3092 * and the only one defined in Skylake (processor tracing) 3093 * probably will block migration anyway. 3094 */ 3095 .features[FEAT_XSAVE] = 3096 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3097 CPUID_XSAVE_XGETBV1, 3098 .features[FEAT_6_EAX] = 3099 CPUID_6_EAX_ARAT, 3100 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3101 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3102 MSR_VMX_BASIC_TRUE_CTLS, 3103 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3104 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3105 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3106 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3107 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3108 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3109 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3110 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3111 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3112 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3113 .features[FEAT_VMX_EXIT_CTLS] = 3114 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3115 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3116 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3117 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3118 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3119 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3120 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3121 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3122 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3123 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3124 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3125 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3126 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3127 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3128 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3129 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3130 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3131 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3132 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3133 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3134 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3135 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3136 .features[FEAT_VMX_SECONDARY_CTLS] = 3137 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3138 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3139 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3140 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3141 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3142 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3143 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3144 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3145 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3146 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3147 .xlevel = 0x80000008, 3148 .model_id = "Intel Xeon Processor (Cascadelake)", 3149 .versions = (X86CPUVersionDefinition[]) { 3150 { .version = 1 }, 3151 { .version = 2, 3152 .note = "ARCH_CAPABILITIES", 3153 .props = (PropValue[]) { 3154 { "arch-capabilities", "on" }, 3155 { "rdctl-no", "on" }, 3156 { "ibrs-all", "on" }, 3157 { "skip-l1dfl-vmentry", "on" }, 3158 { "mds-no", "on" }, 3159 { /* end of list */ } 3160 }, 3161 }, 3162 { .version = 3, 3163 .alias = "Cascadelake-Server-noTSX", 3164 .note = "ARCH_CAPABILITIES, no TSX", 3165 .props = (PropValue[]) { 3166 { "hle", "off" }, 3167 { "rtm", "off" }, 3168 { /* end of list */ } 3169 }, 3170 }, 3171 { .version = 4, 3172 .note = "ARCH_CAPABILITIES, no TSX", 3173 .props = (PropValue[]) { 3174 { "vmx-eptp-switching", "on" }, 3175 { /* end of list */ } 3176 }, 3177 }, 3178 { /* end of list */ } 3179 } 3180 }, 3181 { 3182 .name = "Cooperlake", 3183 .level = 0xd, 3184 .vendor = CPUID_VENDOR_INTEL, 3185 .family = 6, 3186 .model = 85, 3187 .stepping = 10, 3188 .features[FEAT_1_EDX] = 3189 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3190 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3191 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3192 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3193 CPUID_DE | CPUID_FP87, 3194 .features[FEAT_1_ECX] = 3195 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3196 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3197 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3198 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3199 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3200 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3201 .features[FEAT_8000_0001_EDX] = 3202 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3203 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3204 .features[FEAT_8000_0001_ECX] = 3205 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3206 .features[FEAT_7_0_EBX] = 3207 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3208 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3209 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3210 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3211 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3212 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3213 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3214 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3215 .features[FEAT_7_0_ECX] = 3216 CPUID_7_0_ECX_PKU | 3217 CPUID_7_0_ECX_AVX512VNNI, 3218 .features[FEAT_7_0_EDX] = 3219 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP | 3220 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES, 3221 .features[FEAT_ARCH_CAPABILITIES] = 3222 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | 3223 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | 3224 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, 3225 .features[FEAT_7_1_EAX] = 3226 CPUID_7_1_EAX_AVX512_BF16, 3227 /* 3228 * Missing: XSAVES (not supported by some Linux versions, 3229 * including v4.1 to v4.12). 3230 * KVM doesn't yet expose any XSAVES state save component, 3231 * and the only one defined in Skylake (processor tracing) 3232 * probably will block migration anyway. 3233 */ 3234 .features[FEAT_XSAVE] = 3235 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3236 CPUID_XSAVE_XGETBV1, 3237 .features[FEAT_6_EAX] = 3238 CPUID_6_EAX_ARAT, 3239 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3240 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3241 MSR_VMX_BASIC_TRUE_CTLS, 3242 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3243 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3244 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3245 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3246 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3247 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3248 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3249 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3250 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3251 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3252 .features[FEAT_VMX_EXIT_CTLS] = 3253 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3254 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3255 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3256 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3257 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3258 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3259 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3260 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3261 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3262 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3263 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3264 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3265 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3266 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3267 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3268 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3269 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3270 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3271 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3272 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3273 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3274 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3275 .features[FEAT_VMX_SECONDARY_CTLS] = 3276 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3277 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3278 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3279 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3280 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3281 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3282 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3283 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3284 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3285 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3286 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3287 .xlevel = 0x80000008, 3288 .model_id = "Intel Xeon Processor (Cooperlake)", 3289 }, 3290 { 3291 .name = "Icelake-Client", 3292 .level = 0xd, 3293 .vendor = CPUID_VENDOR_INTEL, 3294 .family = 6, 3295 .model = 126, 3296 .stepping = 0, 3297 .features[FEAT_1_EDX] = 3298 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3299 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3300 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3301 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3302 CPUID_DE | CPUID_FP87, 3303 .features[FEAT_1_ECX] = 3304 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3305 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3306 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3307 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3308 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3309 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3310 .features[FEAT_8000_0001_EDX] = 3311 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 3312 CPUID_EXT2_SYSCALL, 3313 .features[FEAT_8000_0001_ECX] = 3314 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3315 .features[FEAT_8000_0008_EBX] = 3316 CPUID_8000_0008_EBX_WBNOINVD, 3317 .features[FEAT_7_0_EBX] = 3318 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3319 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3320 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3321 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3322 CPUID_7_0_EBX_SMAP, 3323 .features[FEAT_7_0_ECX] = 3324 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3325 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3326 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3327 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3328 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3329 .features[FEAT_7_0_EDX] = 3330 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3331 /* Missing: XSAVES (not supported by some Linux versions, 3332 * including v4.1 to v4.12). 3333 * KVM doesn't yet expose any XSAVES state save component, 3334 * and the only one defined in Skylake (processor tracing) 3335 * probably will block migration anyway. 3336 */ 3337 .features[FEAT_XSAVE] = 3338 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3339 CPUID_XSAVE_XGETBV1, 3340 .features[FEAT_6_EAX] = 3341 CPUID_6_EAX_ARAT, 3342 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3343 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3344 MSR_VMX_BASIC_TRUE_CTLS, 3345 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3346 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3347 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3348 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3349 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3350 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3351 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3352 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3353 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3354 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3355 .features[FEAT_VMX_EXIT_CTLS] = 3356 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3357 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3358 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3359 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3360 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3361 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3362 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3363 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3364 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3365 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 3366 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3367 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3368 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3369 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3370 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3371 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3372 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3373 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3374 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3375 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3376 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3377 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3378 .features[FEAT_VMX_SECONDARY_CTLS] = 3379 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3380 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3381 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3382 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3383 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3384 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3385 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3386 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3387 .xlevel = 0x80000008, 3388 .model_id = "Intel Core Processor (Icelake)", 3389 .versions = (X86CPUVersionDefinition[]) { 3390 { 3391 .version = 1, 3392 .note = "deprecated" 3393 }, 3394 { 3395 .version = 2, 3396 .note = "no TSX, deprecated", 3397 .alias = "Icelake-Client-noTSX", 3398 .props = (PropValue[]) { 3399 { "hle", "off" }, 3400 { "rtm", "off" }, 3401 { /* end of list */ } 3402 }, 3403 }, 3404 { /* end of list */ } 3405 }, 3406 .deprecation_note = "use Icelake-Server instead" 3407 }, 3408 { 3409 .name = "Icelake-Server", 3410 .level = 0xd, 3411 .vendor = CPUID_VENDOR_INTEL, 3412 .family = 6, 3413 .model = 134, 3414 .stepping = 0, 3415 .features[FEAT_1_EDX] = 3416 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3417 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3418 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3419 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3420 CPUID_DE | CPUID_FP87, 3421 .features[FEAT_1_ECX] = 3422 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3423 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3424 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3425 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3426 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3427 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3428 .features[FEAT_8000_0001_EDX] = 3429 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3430 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3431 .features[FEAT_8000_0001_ECX] = 3432 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3433 .features[FEAT_8000_0008_EBX] = 3434 CPUID_8000_0008_EBX_WBNOINVD, 3435 .features[FEAT_7_0_EBX] = 3436 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3437 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3438 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3439 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3440 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3441 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3442 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3443 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3444 .features[FEAT_7_0_ECX] = 3445 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3446 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3447 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3448 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3449 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 3450 .features[FEAT_7_0_EDX] = 3451 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3452 /* Missing: XSAVES (not supported by some Linux versions, 3453 * including v4.1 to v4.12). 3454 * KVM doesn't yet expose any XSAVES state save component, 3455 * and the only one defined in Skylake (processor tracing) 3456 * probably will block migration anyway. 3457 */ 3458 .features[FEAT_XSAVE] = 3459 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3460 CPUID_XSAVE_XGETBV1, 3461 .features[FEAT_6_EAX] = 3462 CPUID_6_EAX_ARAT, 3463 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3464 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3465 MSR_VMX_BASIC_TRUE_CTLS, 3466 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3467 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3468 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3469 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3470 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3471 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3472 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3473 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3474 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3475 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3476 .features[FEAT_VMX_EXIT_CTLS] = 3477 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3478 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3479 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3480 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3481 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3482 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3483 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3484 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3485 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3486 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3487 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3488 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3489 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3490 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3491 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3492 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3493 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3494 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3495 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3496 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3497 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3498 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3499 .features[FEAT_VMX_SECONDARY_CTLS] = 3500 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3501 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3502 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3503 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3504 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3505 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3506 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3507 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3508 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 3509 .xlevel = 0x80000008, 3510 .model_id = "Intel Xeon Processor (Icelake)", 3511 .versions = (X86CPUVersionDefinition[]) { 3512 { .version = 1 }, 3513 { 3514 .version = 2, 3515 .note = "no TSX", 3516 .alias = "Icelake-Server-noTSX", 3517 .props = (PropValue[]) { 3518 { "hle", "off" }, 3519 { "rtm", "off" }, 3520 { /* end of list */ } 3521 }, 3522 }, 3523 { 3524 .version = 3, 3525 .props = (PropValue[]) { 3526 { "arch-capabilities", "on" }, 3527 { "rdctl-no", "on" }, 3528 { "ibrs-all", "on" }, 3529 { "skip-l1dfl-vmentry", "on" }, 3530 { "mds-no", "on" }, 3531 { "pschange-mc-no", "on" }, 3532 { "taa-no", "on" }, 3533 { /* end of list */ } 3534 }, 3535 }, 3536 { 3537 .version = 4, 3538 .props = (PropValue[]) { 3539 { "sha-ni", "on" }, 3540 { "avx512ifma", "on" }, 3541 { "rdpid", "on" }, 3542 { "fsrm", "on" }, 3543 { "vmx-rdseed-exit", "on" }, 3544 { "vmx-pml", "on" }, 3545 { "vmx-eptp-switching", "on" }, 3546 { "model", "106" }, 3547 { /* end of list */ } 3548 }, 3549 }, 3550 { /* end of list */ } 3551 } 3552 }, 3553 { 3554 .name = "Denverton", 3555 .level = 21, 3556 .vendor = CPUID_VENDOR_INTEL, 3557 .family = 6, 3558 .model = 95, 3559 .stepping = 1, 3560 .features[FEAT_1_EDX] = 3561 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | 3562 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | 3563 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3564 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | 3565 CPUID_SSE | CPUID_SSE2, 3566 .features[FEAT_1_ECX] = 3567 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3568 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 | 3569 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3570 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | 3571 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND, 3572 .features[FEAT_8000_0001_EDX] = 3573 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | 3574 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, 3575 .features[FEAT_8000_0001_ECX] = 3576 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3577 .features[FEAT_7_0_EBX] = 3578 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS | 3579 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP | 3580 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI, 3581 .features[FEAT_7_0_EDX] = 3582 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | 3583 CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3584 /* 3585 * Missing: XSAVES (not supported by some Linux versions, 3586 * including v4.1 to v4.12). 3587 * KVM doesn't yet expose any XSAVES state save component, 3588 * and the only one defined in Skylake (processor tracing) 3589 * probably will block migration anyway. 3590 */ 3591 .features[FEAT_XSAVE] = 3592 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, 3593 .features[FEAT_6_EAX] = 3594 CPUID_6_EAX_ARAT, 3595 .features[FEAT_ARCH_CAPABILITIES] = 3596 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY, 3597 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3598 MSR_VMX_BASIC_TRUE_CTLS, 3599 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3600 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3601 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3602 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3603 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3604 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3605 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3606 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3607 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3608 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3609 .features[FEAT_VMX_EXIT_CTLS] = 3610 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3611 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3612 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3613 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3614 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3615 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3616 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3617 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3618 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3619 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3620 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3621 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3622 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3623 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3624 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3625 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3626 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3627 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3628 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3629 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3630 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3631 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3632 .features[FEAT_VMX_SECONDARY_CTLS] = 3633 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3634 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3635 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3636 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3637 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3638 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3639 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3640 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3641 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3642 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3643 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3644 .xlevel = 0x80000008, 3645 .model_id = "Intel Atom Processor (Denverton)", 3646 .versions = (X86CPUVersionDefinition[]) { 3647 { .version = 1 }, 3648 { 3649 .version = 2, 3650 .note = "no MPX, no MONITOR", 3651 .props = (PropValue[]) { 3652 { "monitor", "off" }, 3653 { "mpx", "off" }, 3654 { /* end of list */ }, 3655 }, 3656 }, 3657 { /* end of list */ }, 3658 }, 3659 }, 3660 { 3661 .name = "Snowridge", 3662 .level = 27, 3663 .vendor = CPUID_VENDOR_INTEL, 3664 .family = 6, 3665 .model = 134, 3666 .stepping = 1, 3667 .features[FEAT_1_EDX] = 3668 /* missing: CPUID_PN CPUID_IA64 */ 3669 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 3670 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | 3671 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | 3672 CPUID_CX8 | CPUID_APIC | CPUID_SEP | 3673 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3674 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | 3675 CPUID_MMX | 3676 CPUID_FXSR | CPUID_SSE | CPUID_SSE2, 3677 .features[FEAT_1_ECX] = 3678 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3679 CPUID_EXT_SSSE3 | 3680 CPUID_EXT_CX16 | 3681 CPUID_EXT_SSE41 | 3682 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3683 CPUID_EXT_POPCNT | 3684 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | 3685 CPUID_EXT_RDRAND, 3686 .features[FEAT_8000_0001_EDX] = 3687 CPUID_EXT2_SYSCALL | 3688 CPUID_EXT2_NX | 3689 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3690 CPUID_EXT2_LM, 3691 .features[FEAT_8000_0001_ECX] = 3692 CPUID_EXT3_LAHF_LM | 3693 CPUID_EXT3_3DNOWPREFETCH, 3694 .features[FEAT_7_0_EBX] = 3695 CPUID_7_0_EBX_FSGSBASE | 3696 CPUID_7_0_EBX_SMEP | 3697 CPUID_7_0_EBX_ERMS | 3698 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ 3699 CPUID_7_0_EBX_RDSEED | 3700 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3701 CPUID_7_0_EBX_CLWB | 3702 CPUID_7_0_EBX_SHA_NI, 3703 .features[FEAT_7_0_ECX] = 3704 CPUID_7_0_ECX_UMIP | 3705 /* missing bit 5 */ 3706 CPUID_7_0_ECX_GFNI | 3707 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | 3708 CPUID_7_0_ECX_MOVDIR64B, 3709 .features[FEAT_7_0_EDX] = 3710 CPUID_7_0_EDX_SPEC_CTRL | 3711 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | 3712 CPUID_7_0_EDX_CORE_CAPABILITY, 3713 .features[FEAT_CORE_CAPABILITY] = 3714 MSR_CORE_CAP_SPLIT_LOCK_DETECT, 3715 /* 3716 * Missing: XSAVES (not supported by some Linux versions, 3717 * including v4.1 to v4.12). 3718 * KVM doesn't yet expose any XSAVES state save component, 3719 * and the only one defined in Skylake (processor tracing) 3720 * probably will block migration anyway. 3721 */ 3722 .features[FEAT_XSAVE] = 3723 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3724 CPUID_XSAVE_XGETBV1, 3725 .features[FEAT_6_EAX] = 3726 CPUID_6_EAX_ARAT, 3727 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3728 MSR_VMX_BASIC_TRUE_CTLS, 3729 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3730 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3731 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3732 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3733 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3734 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3735 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3736 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3737 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3738 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3739 .features[FEAT_VMX_EXIT_CTLS] = 3740 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3741 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3742 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3743 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3744 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3745 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3746 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3747 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3748 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3749 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3750 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3751 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3752 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3753 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3754 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3755 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3756 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3757 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3758 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3759 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3760 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3761 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3762 .features[FEAT_VMX_SECONDARY_CTLS] = 3763 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3764 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3765 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3766 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3767 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3768 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3769 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3770 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3771 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3772 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3773 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3774 .xlevel = 0x80000008, 3775 .model_id = "Intel Atom Processor (SnowRidge)", 3776 .versions = (X86CPUVersionDefinition[]) { 3777 { .version = 1 }, 3778 { 3779 .version = 2, 3780 .props = (PropValue[]) { 3781 { "mpx", "off" }, 3782 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" }, 3783 { /* end of list */ }, 3784 }, 3785 }, 3786 { /* end of list */ }, 3787 }, 3788 }, 3789 { 3790 .name = "KnightsMill", 3791 .level = 0xd, 3792 .vendor = CPUID_VENDOR_INTEL, 3793 .family = 6, 3794 .model = 133, 3795 .stepping = 0, 3796 .features[FEAT_1_EDX] = 3797 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 3798 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 3799 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 3800 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 3801 CPUID_PSE | CPUID_DE | CPUID_FP87, 3802 .features[FEAT_1_ECX] = 3803 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3804 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3805 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3806 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3807 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3808 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3809 .features[FEAT_8000_0001_EDX] = 3810 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3811 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3812 .features[FEAT_8000_0001_ECX] = 3813 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3814 .features[FEAT_7_0_EBX] = 3815 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3816 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 3817 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 3818 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 3819 CPUID_7_0_EBX_AVX512ER, 3820 .features[FEAT_7_0_ECX] = 3821 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3822 .features[FEAT_7_0_EDX] = 3823 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 3824 .features[FEAT_XSAVE] = 3825 CPUID_XSAVE_XSAVEOPT, 3826 .features[FEAT_6_EAX] = 3827 CPUID_6_EAX_ARAT, 3828 .xlevel = 0x80000008, 3829 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 3830 }, 3831 { 3832 .name = "Opteron_G1", 3833 .level = 5, 3834 .vendor = CPUID_VENDOR_AMD, 3835 .family = 15, 3836 .model = 6, 3837 .stepping = 1, 3838 .features[FEAT_1_EDX] = 3839 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3840 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3841 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3842 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3843 CPUID_DE | CPUID_FP87, 3844 .features[FEAT_1_ECX] = 3845 CPUID_EXT_SSE3, 3846 .features[FEAT_8000_0001_EDX] = 3847 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3848 .xlevel = 0x80000008, 3849 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 3850 }, 3851 { 3852 .name = "Opteron_G2", 3853 .level = 5, 3854 .vendor = CPUID_VENDOR_AMD, 3855 .family = 15, 3856 .model = 6, 3857 .stepping = 1, 3858 .features[FEAT_1_EDX] = 3859 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3860 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3861 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3862 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3863 CPUID_DE | CPUID_FP87, 3864 .features[FEAT_1_ECX] = 3865 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 3866 .features[FEAT_8000_0001_EDX] = 3867 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3868 .features[FEAT_8000_0001_ECX] = 3869 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3870 .xlevel = 0x80000008, 3871 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 3872 }, 3873 { 3874 .name = "Opteron_G3", 3875 .level = 5, 3876 .vendor = CPUID_VENDOR_AMD, 3877 .family = 16, 3878 .model = 2, 3879 .stepping = 3, 3880 .features[FEAT_1_EDX] = 3881 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3882 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3883 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3884 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3885 CPUID_DE | CPUID_FP87, 3886 .features[FEAT_1_ECX] = 3887 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 3888 CPUID_EXT_SSE3, 3889 .features[FEAT_8000_0001_EDX] = 3890 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 3891 CPUID_EXT2_RDTSCP, 3892 .features[FEAT_8000_0001_ECX] = 3893 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 3894 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3895 .xlevel = 0x80000008, 3896 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 3897 }, 3898 { 3899 .name = "Opteron_G4", 3900 .level = 0xd, 3901 .vendor = CPUID_VENDOR_AMD, 3902 .family = 21, 3903 .model = 1, 3904 .stepping = 2, 3905 .features[FEAT_1_EDX] = 3906 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3907 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3908 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3909 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3910 CPUID_DE | CPUID_FP87, 3911 .features[FEAT_1_ECX] = 3912 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3913 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3914 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 3915 CPUID_EXT_SSE3, 3916 .features[FEAT_8000_0001_EDX] = 3917 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3918 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3919 .features[FEAT_8000_0001_ECX] = 3920 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3921 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3922 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3923 CPUID_EXT3_LAHF_LM, 3924 .features[FEAT_SVM] = 3925 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3926 /* no xsaveopt! */ 3927 .xlevel = 0x8000001A, 3928 .model_id = "AMD Opteron 62xx class CPU", 3929 }, 3930 { 3931 .name = "Opteron_G5", 3932 .level = 0xd, 3933 .vendor = CPUID_VENDOR_AMD, 3934 .family = 21, 3935 .model = 2, 3936 .stepping = 0, 3937 .features[FEAT_1_EDX] = 3938 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3939 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3940 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3941 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3942 CPUID_DE | CPUID_FP87, 3943 .features[FEAT_1_ECX] = 3944 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 3945 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 3946 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 3947 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3948 .features[FEAT_8000_0001_EDX] = 3949 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3950 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3951 .features[FEAT_8000_0001_ECX] = 3952 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3953 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3954 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3955 CPUID_EXT3_LAHF_LM, 3956 .features[FEAT_SVM] = 3957 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3958 /* no xsaveopt! */ 3959 .xlevel = 0x8000001A, 3960 .model_id = "AMD Opteron 63xx class CPU", 3961 }, 3962 { 3963 .name = "EPYC", 3964 .level = 0xd, 3965 .vendor = CPUID_VENDOR_AMD, 3966 .family = 23, 3967 .model = 1, 3968 .stepping = 2, 3969 .features[FEAT_1_EDX] = 3970 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 3971 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 3972 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 3973 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 3974 CPUID_VME | CPUID_FP87, 3975 .features[FEAT_1_ECX] = 3976 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 3977 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 3978 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3979 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 3980 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3981 .features[FEAT_8000_0001_EDX] = 3982 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 3983 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 3984 CPUID_EXT2_SYSCALL, 3985 .features[FEAT_8000_0001_ECX] = 3986 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 3987 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 3988 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 3989 CPUID_EXT3_TOPOEXT, 3990 .features[FEAT_7_0_EBX] = 3991 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3992 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 3993 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3994 CPUID_7_0_EBX_SHA_NI, 3995 .features[FEAT_XSAVE] = 3996 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3997 CPUID_XSAVE_XGETBV1, 3998 .features[FEAT_6_EAX] = 3999 CPUID_6_EAX_ARAT, 4000 .features[FEAT_SVM] = 4001 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4002 .xlevel = 0x8000001E, 4003 .model_id = "AMD EPYC Processor", 4004 .cache_info = &epyc_cache_info, 4005 .versions = (X86CPUVersionDefinition[]) { 4006 { .version = 1 }, 4007 { 4008 .version = 2, 4009 .alias = "EPYC-IBPB", 4010 .props = (PropValue[]) { 4011 { "ibpb", "on" }, 4012 { "model-id", 4013 "AMD EPYC Processor (with IBPB)" }, 4014 { /* end of list */ } 4015 } 4016 }, 4017 { 4018 .version = 3, 4019 .props = (PropValue[]) { 4020 { "ibpb", "on" }, 4021 { "perfctr-core", "on" }, 4022 { "clzero", "on" }, 4023 { "xsaveerptr", "on" }, 4024 { "xsaves", "on" }, 4025 { "model-id", 4026 "AMD EPYC Processor" }, 4027 { /* end of list */ } 4028 } 4029 }, 4030 { /* end of list */ } 4031 } 4032 }, 4033 { 4034 .name = "Dhyana", 4035 .level = 0xd, 4036 .vendor = CPUID_VENDOR_HYGON, 4037 .family = 24, 4038 .model = 0, 4039 .stepping = 1, 4040 .features[FEAT_1_EDX] = 4041 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4042 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4043 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4044 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4045 CPUID_VME | CPUID_FP87, 4046 .features[FEAT_1_ECX] = 4047 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4048 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 4049 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4050 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4051 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 4052 .features[FEAT_8000_0001_EDX] = 4053 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4054 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4055 CPUID_EXT2_SYSCALL, 4056 .features[FEAT_8000_0001_ECX] = 4057 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4058 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4059 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4060 CPUID_EXT3_TOPOEXT, 4061 .features[FEAT_8000_0008_EBX] = 4062 CPUID_8000_0008_EBX_IBPB, 4063 .features[FEAT_7_0_EBX] = 4064 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4065 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4066 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 4067 /* 4068 * Missing: XSAVES (not supported by some Linux versions, 4069 * including v4.1 to v4.12). 4070 * KVM doesn't yet expose any XSAVES state save component. 4071 */ 4072 .features[FEAT_XSAVE] = 4073 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4074 CPUID_XSAVE_XGETBV1, 4075 .features[FEAT_6_EAX] = 4076 CPUID_6_EAX_ARAT, 4077 .features[FEAT_SVM] = 4078 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4079 .xlevel = 0x8000001E, 4080 .model_id = "Hygon Dhyana Processor", 4081 .cache_info = &epyc_cache_info, 4082 }, 4083 { 4084 .name = "EPYC-Rome", 4085 .level = 0xd, 4086 .vendor = CPUID_VENDOR_AMD, 4087 .family = 23, 4088 .model = 49, 4089 .stepping = 0, 4090 .features[FEAT_1_EDX] = 4091 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4092 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4093 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4094 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4095 CPUID_VME | CPUID_FP87, 4096 .features[FEAT_1_ECX] = 4097 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4098 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 4099 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4100 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4101 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 4102 .features[FEAT_8000_0001_EDX] = 4103 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4104 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4105 CPUID_EXT2_SYSCALL, 4106 .features[FEAT_8000_0001_ECX] = 4107 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4108 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4109 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4110 CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, 4111 .features[FEAT_8000_0008_EBX] = 4112 CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | 4113 CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | 4114 CPUID_8000_0008_EBX_STIBP, 4115 .features[FEAT_7_0_EBX] = 4116 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4117 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4118 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 4119 CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB, 4120 .features[FEAT_7_0_ECX] = 4121 CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID, 4122 .features[FEAT_XSAVE] = 4123 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4124 CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, 4125 .features[FEAT_6_EAX] = 4126 CPUID_6_EAX_ARAT, 4127 .features[FEAT_SVM] = 4128 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4129 .xlevel = 0x8000001E, 4130 .model_id = "AMD EPYC-Rome Processor", 4131 .cache_info = &epyc_rome_cache_info, 4132 }, 4133 }; 4134 4135 /* KVM-specific features that are automatically added/removed 4136 * from all CPU models when KVM is enabled. 4137 */ 4138 static PropValue kvm_default_props[] = { 4139 { "kvmclock", "on" }, 4140 { "kvm-nopiodelay", "on" }, 4141 { "kvm-asyncpf", "on" }, 4142 { "kvm-steal-time", "on" }, 4143 { "kvm-pv-eoi", "on" }, 4144 { "kvmclock-stable-bit", "on" }, 4145 { "x2apic", "on" }, 4146 { "kvm-msi-ext-dest-id", "off" }, 4147 { "acpi", "off" }, 4148 { "monitor", "off" }, 4149 { "svm", "off" }, 4150 { NULL, NULL }, 4151 }; 4152 4153 /* TCG-specific defaults that override all CPU models when using TCG 4154 */ 4155 static PropValue tcg_default_props[] = { 4156 { "vme", "off" }, 4157 { NULL, NULL }, 4158 }; 4159 4160 4161 /* 4162 * We resolve CPU model aliases using -v1 when using "-machine 4163 * none", but this is just for compatibility while libvirt isn't 4164 * adapted to resolve CPU model versions before creating VMs. 4165 * See "Runnability guarantee of CPU models" at 4166 * docs/system/deprecated.rst. 4167 */ 4168 X86CPUVersion default_cpu_version = 1; 4169 4170 void x86_cpu_set_default_version(X86CPUVersion version) 4171 { 4172 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ 4173 assert(version != CPU_VERSION_AUTO); 4174 default_cpu_version = version; 4175 } 4176 4177 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) 4178 { 4179 int v = 0; 4180 const X86CPUVersionDefinition *vdef = 4181 x86_cpu_def_get_versions(model->cpudef); 4182 while (vdef->version) { 4183 v = vdef->version; 4184 vdef++; 4185 } 4186 return v; 4187 } 4188 4189 /* Return the actual version being used for a specific CPU model */ 4190 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) 4191 { 4192 X86CPUVersion v = model->version; 4193 if (v == CPU_VERSION_AUTO) { 4194 v = default_cpu_version; 4195 } 4196 if (v == CPU_VERSION_LATEST) { 4197 return x86_cpu_model_last_version(model); 4198 } 4199 return v; 4200 } 4201 4202 void x86_cpu_change_kvm_default(const char *prop, const char *value) 4203 { 4204 PropValue *pv; 4205 for (pv = kvm_default_props; pv->prop; pv++) { 4206 if (!strcmp(pv->prop, prop)) { 4207 pv->value = value; 4208 break; 4209 } 4210 } 4211 4212 /* It is valid to call this function only for properties that 4213 * are already present in the kvm_default_props table. 4214 */ 4215 assert(pv->prop); 4216 } 4217 4218 static bool lmce_supported(void) 4219 { 4220 uint64_t mce_cap = 0; 4221 4222 #ifdef CONFIG_KVM 4223 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 4224 return false; 4225 } 4226 #endif 4227 4228 return !!(mce_cap & MCG_LMCE_P); 4229 } 4230 4231 #define CPUID_MODEL_ID_SZ 48 4232 4233 /** 4234 * cpu_x86_fill_model_id: 4235 * Get CPUID model ID string from host CPU. 4236 * 4237 * @str should have at least CPUID_MODEL_ID_SZ bytes 4238 * 4239 * The function does NOT add a null terminator to the string 4240 * automatically. 4241 */ 4242 static int cpu_x86_fill_model_id(char *str) 4243 { 4244 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 4245 int i; 4246 4247 for (i = 0; i < 3; i++) { 4248 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 4249 memcpy(str + i * 16 + 0, &eax, 4); 4250 memcpy(str + i * 16 + 4, &ebx, 4); 4251 memcpy(str + i * 16 + 8, &ecx, 4); 4252 memcpy(str + i * 16 + 12, &edx, 4); 4253 } 4254 return 0; 4255 } 4256 4257 static Property max_x86_cpu_properties[] = { 4258 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 4259 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 4260 DEFINE_PROP_END_OF_LIST() 4261 }; 4262 4263 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 4264 { 4265 DeviceClass *dc = DEVICE_CLASS(oc); 4266 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4267 4268 xcc->ordering = 9; 4269 4270 xcc->model_description = 4271 "Enables all features supported by the accelerator in the current host"; 4272 4273 device_class_set_props(dc, max_x86_cpu_properties); 4274 } 4275 4276 static void max_x86_cpu_initfn(Object *obj) 4277 { 4278 X86CPU *cpu = X86_CPU(obj); 4279 CPUX86State *env = &cpu->env; 4280 KVMState *s = kvm_state; 4281 4282 /* We can't fill the features array here because we don't know yet if 4283 * "migratable" is true or false. 4284 */ 4285 cpu->max_features = true; 4286 4287 if (accel_uses_host_cpuid()) { 4288 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 4289 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 4290 int family, model, stepping; 4291 4292 host_vendor_fms(vendor, &family, &model, &stepping); 4293 cpu_x86_fill_model_id(model_id); 4294 4295 object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); 4296 object_property_set_int(OBJECT(cpu), "family", family, &error_abort); 4297 object_property_set_int(OBJECT(cpu), "model", model, &error_abort); 4298 object_property_set_int(OBJECT(cpu), "stepping", stepping, 4299 &error_abort); 4300 object_property_set_str(OBJECT(cpu), "model-id", model_id, 4301 &error_abort); 4302 4303 if (kvm_enabled()) { 4304 env->cpuid_min_level = 4305 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 4306 env->cpuid_min_xlevel = 4307 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 4308 env->cpuid_min_xlevel2 = 4309 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 4310 } else { 4311 env->cpuid_min_level = 4312 hvf_get_supported_cpuid(0x0, 0, R_EAX); 4313 env->cpuid_min_xlevel = 4314 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 4315 env->cpuid_min_xlevel2 = 4316 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 4317 } 4318 4319 if (lmce_supported()) { 4320 object_property_set_bool(OBJECT(cpu), "lmce", true, &error_abort); 4321 } 4322 object_property_set_bool(OBJECT(cpu), "host-phys-bits", true, &error_abort); 4323 } else { 4324 object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD, 4325 &error_abort); 4326 object_property_set_int(OBJECT(cpu), "family", 6, &error_abort); 4327 object_property_set_int(OBJECT(cpu), "model", 6, &error_abort); 4328 object_property_set_int(OBJECT(cpu), "stepping", 3, &error_abort); 4329 object_property_set_str(OBJECT(cpu), "model-id", 4330 "QEMU TCG CPU version " QEMU_HW_VERSION, 4331 &error_abort); 4332 } 4333 4334 object_property_set_bool(OBJECT(cpu), "pmu", true, &error_abort); 4335 } 4336 4337 static const TypeInfo max_x86_cpu_type_info = { 4338 .name = X86_CPU_TYPE_NAME("max"), 4339 .parent = TYPE_X86_CPU, 4340 .instance_init = max_x86_cpu_initfn, 4341 .class_init = max_x86_cpu_class_init, 4342 }; 4343 4344 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 4345 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 4346 { 4347 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4348 4349 xcc->host_cpuid_required = true; 4350 xcc->ordering = 8; 4351 4352 #if defined(CONFIG_KVM) 4353 xcc->model_description = 4354 "KVM processor with all supported host features "; 4355 #elif defined(CONFIG_HVF) 4356 xcc->model_description = 4357 "HVF processor with all supported host features "; 4358 #endif 4359 } 4360 4361 static const TypeInfo host_x86_cpu_type_info = { 4362 .name = X86_CPU_TYPE_NAME("host"), 4363 .parent = X86_CPU_TYPE_NAME("max"), 4364 .class_init = host_x86_cpu_class_init, 4365 }; 4366 4367 #endif 4368 4369 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 4370 { 4371 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 4372 4373 switch (f->type) { 4374 case CPUID_FEATURE_WORD: 4375 { 4376 const char *reg = get_register_name_32(f->cpuid.reg); 4377 assert(reg); 4378 return g_strdup_printf("CPUID.%02XH:%s", 4379 f->cpuid.eax, reg); 4380 } 4381 case MSR_FEATURE_WORD: 4382 return g_strdup_printf("MSR(%02XH)", 4383 f->msr.index); 4384 } 4385 4386 return NULL; 4387 } 4388 4389 static bool x86_cpu_have_filtered_features(X86CPU *cpu) 4390 { 4391 FeatureWord w; 4392 4393 for (w = 0; w < FEATURE_WORDS; w++) { 4394 if (cpu->filtered_features[w]) { 4395 return true; 4396 } 4397 } 4398 4399 return false; 4400 } 4401 4402 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, 4403 const char *verbose_prefix) 4404 { 4405 CPUX86State *env = &cpu->env; 4406 FeatureWordInfo *f = &feature_word_info[w]; 4407 int i; 4408 4409 if (!cpu->force_features) { 4410 env->features[w] &= ~mask; 4411 } 4412 cpu->filtered_features[w] |= mask; 4413 4414 if (!verbose_prefix) { 4415 return; 4416 } 4417 4418 for (i = 0; i < 64; ++i) { 4419 if ((1ULL << i) & mask) { 4420 g_autofree char *feat_word_str = feature_word_description(f, i); 4421 warn_report("%s: %s%s%s [bit %d]", 4422 verbose_prefix, 4423 feat_word_str, 4424 f->feat_names[i] ? "." : "", 4425 f->feat_names[i] ? f->feat_names[i] : "", i); 4426 } 4427 } 4428 } 4429 4430 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 4431 const char *name, void *opaque, 4432 Error **errp) 4433 { 4434 X86CPU *cpu = X86_CPU(obj); 4435 CPUX86State *env = &cpu->env; 4436 int64_t value; 4437 4438 value = (env->cpuid_version >> 8) & 0xf; 4439 if (value == 0xf) { 4440 value += (env->cpuid_version >> 20) & 0xff; 4441 } 4442 visit_type_int(v, name, &value, errp); 4443 } 4444 4445 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 4446 const char *name, void *opaque, 4447 Error **errp) 4448 { 4449 X86CPU *cpu = X86_CPU(obj); 4450 CPUX86State *env = &cpu->env; 4451 const int64_t min = 0; 4452 const int64_t max = 0xff + 0xf; 4453 int64_t value; 4454 4455 if (!visit_type_int(v, name, &value, errp)) { 4456 return; 4457 } 4458 if (value < min || value > max) { 4459 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4460 name ? name : "null", value, min, max); 4461 return; 4462 } 4463 4464 env->cpuid_version &= ~0xff00f00; 4465 if (value > 0x0f) { 4466 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 4467 } else { 4468 env->cpuid_version |= value << 8; 4469 } 4470 } 4471 4472 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 4473 const char *name, void *opaque, 4474 Error **errp) 4475 { 4476 X86CPU *cpu = X86_CPU(obj); 4477 CPUX86State *env = &cpu->env; 4478 int64_t value; 4479 4480 value = (env->cpuid_version >> 4) & 0xf; 4481 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 4482 visit_type_int(v, name, &value, errp); 4483 } 4484 4485 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 4486 const char *name, void *opaque, 4487 Error **errp) 4488 { 4489 X86CPU *cpu = X86_CPU(obj); 4490 CPUX86State *env = &cpu->env; 4491 const int64_t min = 0; 4492 const int64_t max = 0xff; 4493 int64_t value; 4494 4495 if (!visit_type_int(v, name, &value, errp)) { 4496 return; 4497 } 4498 if (value < min || value > max) { 4499 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4500 name ? name : "null", value, min, max); 4501 return; 4502 } 4503 4504 env->cpuid_version &= ~0xf00f0; 4505 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 4506 } 4507 4508 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 4509 const char *name, void *opaque, 4510 Error **errp) 4511 { 4512 X86CPU *cpu = X86_CPU(obj); 4513 CPUX86State *env = &cpu->env; 4514 int64_t value; 4515 4516 value = env->cpuid_version & 0xf; 4517 visit_type_int(v, name, &value, errp); 4518 } 4519 4520 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 4521 const char *name, void *opaque, 4522 Error **errp) 4523 { 4524 X86CPU *cpu = X86_CPU(obj); 4525 CPUX86State *env = &cpu->env; 4526 const int64_t min = 0; 4527 const int64_t max = 0xf; 4528 int64_t value; 4529 4530 if (!visit_type_int(v, name, &value, errp)) { 4531 return; 4532 } 4533 if (value < min || value > max) { 4534 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4535 name ? name : "null", value, min, max); 4536 return; 4537 } 4538 4539 env->cpuid_version &= ~0xf; 4540 env->cpuid_version |= value & 0xf; 4541 } 4542 4543 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 4544 { 4545 X86CPU *cpu = X86_CPU(obj); 4546 CPUX86State *env = &cpu->env; 4547 char *value; 4548 4549 value = g_malloc(CPUID_VENDOR_SZ + 1); 4550 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 4551 env->cpuid_vendor3); 4552 return value; 4553 } 4554 4555 static void x86_cpuid_set_vendor(Object *obj, const char *value, 4556 Error **errp) 4557 { 4558 X86CPU *cpu = X86_CPU(obj); 4559 CPUX86State *env = &cpu->env; 4560 int i; 4561 4562 if (strlen(value) != CPUID_VENDOR_SZ) { 4563 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 4564 return; 4565 } 4566 4567 env->cpuid_vendor1 = 0; 4568 env->cpuid_vendor2 = 0; 4569 env->cpuid_vendor3 = 0; 4570 for (i = 0; i < 4; i++) { 4571 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 4572 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 4573 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 4574 } 4575 } 4576 4577 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 4578 { 4579 X86CPU *cpu = X86_CPU(obj); 4580 CPUX86State *env = &cpu->env; 4581 char *value; 4582 int i; 4583 4584 value = g_malloc(48 + 1); 4585 for (i = 0; i < 48; i++) { 4586 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 4587 } 4588 value[48] = '\0'; 4589 return value; 4590 } 4591 4592 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 4593 Error **errp) 4594 { 4595 X86CPU *cpu = X86_CPU(obj); 4596 CPUX86State *env = &cpu->env; 4597 int c, len, i; 4598 4599 if (model_id == NULL) { 4600 model_id = ""; 4601 } 4602 len = strlen(model_id); 4603 memset(env->cpuid_model, 0, 48); 4604 for (i = 0; i < 48; i++) { 4605 if (i >= len) { 4606 c = '\0'; 4607 } else { 4608 c = (uint8_t)model_id[i]; 4609 } 4610 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 4611 } 4612 } 4613 4614 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 4615 void *opaque, Error **errp) 4616 { 4617 X86CPU *cpu = X86_CPU(obj); 4618 int64_t value; 4619 4620 value = cpu->env.tsc_khz * 1000; 4621 visit_type_int(v, name, &value, errp); 4622 } 4623 4624 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 4625 void *opaque, Error **errp) 4626 { 4627 X86CPU *cpu = X86_CPU(obj); 4628 const int64_t min = 0; 4629 const int64_t max = INT64_MAX; 4630 int64_t value; 4631 4632 if (!visit_type_int(v, name, &value, errp)) { 4633 return; 4634 } 4635 if (value < min || value > max) { 4636 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4637 name ? name : "null", value, min, max); 4638 return; 4639 } 4640 4641 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 4642 } 4643 4644 /* Generic getter for "feature-words" and "filtered-features" properties */ 4645 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 4646 const char *name, void *opaque, 4647 Error **errp) 4648 { 4649 uint64_t *array = (uint64_t *)opaque; 4650 FeatureWord w; 4651 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 4652 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 4653 X86CPUFeatureWordInfoList *list = NULL; 4654 4655 for (w = 0; w < FEATURE_WORDS; w++) { 4656 FeatureWordInfo *wi = &feature_word_info[w]; 4657 /* 4658 * We didn't have MSR features when "feature-words" was 4659 * introduced. Therefore skipped other type entries. 4660 */ 4661 if (wi->type != CPUID_FEATURE_WORD) { 4662 continue; 4663 } 4664 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 4665 qwi->cpuid_input_eax = wi->cpuid.eax; 4666 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 4667 qwi->cpuid_input_ecx = wi->cpuid.ecx; 4668 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 4669 qwi->features = array[w]; 4670 4671 /* List will be in reverse order, but order shouldn't matter */ 4672 list_entries[w].next = list; 4673 list_entries[w].value = &word_infos[w]; 4674 list = &list_entries[w]; 4675 } 4676 4677 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 4678 } 4679 4680 /* Convert all '_' in a feature string option name to '-', to make feature 4681 * name conform to QOM property naming rule, which uses '-' instead of '_'. 4682 */ 4683 static inline void feat2prop(char *s) 4684 { 4685 while ((s = strchr(s, '_'))) { 4686 *s = '-'; 4687 } 4688 } 4689 4690 /* Return the feature property name for a feature flag bit */ 4691 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 4692 { 4693 const char *name; 4694 /* XSAVE components are automatically enabled by other features, 4695 * so return the original feature name instead 4696 */ 4697 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 4698 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 4699 4700 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 4701 x86_ext_save_areas[comp].bits) { 4702 w = x86_ext_save_areas[comp].feature; 4703 bitnr = ctz32(x86_ext_save_areas[comp].bits); 4704 } 4705 } 4706 4707 assert(bitnr < 64); 4708 assert(w < FEATURE_WORDS); 4709 name = feature_word_info[w].feat_names[bitnr]; 4710 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD)); 4711 return name; 4712 } 4713 4714 /* Compatibily hack to maintain legacy +-feat semantic, 4715 * where +-feat overwrites any feature set by 4716 * feat=on|feat even if the later is parsed after +-feat 4717 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 4718 */ 4719 static GList *plus_features, *minus_features; 4720 4721 static gint compare_string(gconstpointer a, gconstpointer b) 4722 { 4723 return g_strcmp0(a, b); 4724 } 4725 4726 /* Parse "+feature,-feature,feature=foo" CPU feature string 4727 */ 4728 static void x86_cpu_parse_featurestr(const char *typename, char *features, 4729 Error **errp) 4730 { 4731 char *featurestr; /* Single 'key=value" string being parsed */ 4732 static bool cpu_globals_initialized; 4733 bool ambiguous = false; 4734 4735 if (cpu_globals_initialized) { 4736 return; 4737 } 4738 cpu_globals_initialized = true; 4739 4740 if (!features) { 4741 return; 4742 } 4743 4744 for (featurestr = strtok(features, ","); 4745 featurestr; 4746 featurestr = strtok(NULL, ",")) { 4747 const char *name; 4748 const char *val = NULL; 4749 char *eq = NULL; 4750 char num[32]; 4751 GlobalProperty *prop; 4752 4753 /* Compatibility syntax: */ 4754 if (featurestr[0] == '+') { 4755 plus_features = g_list_append(plus_features, 4756 g_strdup(featurestr + 1)); 4757 continue; 4758 } else if (featurestr[0] == '-') { 4759 minus_features = g_list_append(minus_features, 4760 g_strdup(featurestr + 1)); 4761 continue; 4762 } 4763 4764 eq = strchr(featurestr, '='); 4765 if (eq) { 4766 *eq++ = 0; 4767 val = eq; 4768 } else { 4769 val = "on"; 4770 } 4771 4772 feat2prop(featurestr); 4773 name = featurestr; 4774 4775 if (g_list_find_custom(plus_features, name, compare_string)) { 4776 warn_report("Ambiguous CPU model string. " 4777 "Don't mix both \"+%s\" and \"%s=%s\"", 4778 name, name, val); 4779 ambiguous = true; 4780 } 4781 if (g_list_find_custom(minus_features, name, compare_string)) { 4782 warn_report("Ambiguous CPU model string. " 4783 "Don't mix both \"-%s\" and \"%s=%s\"", 4784 name, name, val); 4785 ambiguous = true; 4786 } 4787 4788 /* Special case: */ 4789 if (!strcmp(name, "tsc-freq")) { 4790 int ret; 4791 uint64_t tsc_freq; 4792 4793 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 4794 if (ret < 0 || tsc_freq > INT64_MAX) { 4795 error_setg(errp, "bad numerical value %s", val); 4796 return; 4797 } 4798 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 4799 val = num; 4800 name = "tsc-frequency"; 4801 } 4802 4803 prop = g_new0(typeof(*prop), 1); 4804 prop->driver = typename; 4805 prop->property = g_strdup(name); 4806 prop->value = g_strdup(val); 4807 qdev_prop_register_global(prop); 4808 } 4809 4810 if (ambiguous) { 4811 warn_report("Compatibility of ambiguous CPU model " 4812 "strings won't be kept on future QEMU versions"); 4813 } 4814 } 4815 4816 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 4817 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); 4818 4819 /* Build a list with the name of all features on a feature word array */ 4820 static void x86_cpu_list_feature_names(FeatureWordArray features, 4821 strList **list) 4822 { 4823 strList **tail = list; 4824 FeatureWord w; 4825 4826 for (w = 0; w < FEATURE_WORDS; w++) { 4827 uint64_t filtered = features[w]; 4828 int i; 4829 for (i = 0; i < 64; i++) { 4830 if (filtered & (1ULL << i)) { 4831 QAPI_LIST_APPEND(tail, g_strdup(x86_cpu_feature_name(w, i))); 4832 } 4833 } 4834 } 4835 } 4836 4837 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 4838 const char *name, void *opaque, 4839 Error **errp) 4840 { 4841 X86CPU *xc = X86_CPU(obj); 4842 strList *result = NULL; 4843 4844 x86_cpu_list_feature_names(xc->filtered_features, &result); 4845 visit_type_strList(v, "unavailable-features", &result, errp); 4846 } 4847 4848 /* Check for missing features that may prevent the CPU class from 4849 * running using the current machine and accelerator. 4850 */ 4851 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 4852 strList **list) 4853 { 4854 strList **tail = list; 4855 X86CPU *xc; 4856 Error *err = NULL; 4857 4858 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 4859 QAPI_LIST_APPEND(tail, g_strdup("kvm")); 4860 return; 4861 } 4862 4863 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 4864 4865 x86_cpu_expand_features(xc, &err); 4866 if (err) { 4867 /* Errors at x86_cpu_expand_features should never happen, 4868 * but in case it does, just report the model as not 4869 * runnable at all using the "type" property. 4870 */ 4871 QAPI_LIST_APPEND(tail, g_strdup("type")); 4872 error_free(err); 4873 } 4874 4875 x86_cpu_filter_features(xc, false); 4876 4877 x86_cpu_list_feature_names(xc->filtered_features, tail); 4878 4879 object_unref(OBJECT(xc)); 4880 } 4881 4882 /* Print all cpuid feature names in featureset 4883 */ 4884 static void listflags(GList *features) 4885 { 4886 size_t len = 0; 4887 GList *tmp; 4888 4889 for (tmp = features; tmp; tmp = tmp->next) { 4890 const char *name = tmp->data; 4891 if ((len + strlen(name) + 1) >= 75) { 4892 qemu_printf("\n"); 4893 len = 0; 4894 } 4895 qemu_printf("%s%s", len == 0 ? " " : " ", name); 4896 len += strlen(name) + 1; 4897 } 4898 qemu_printf("\n"); 4899 } 4900 4901 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 4902 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 4903 { 4904 ObjectClass *class_a = (ObjectClass *)a; 4905 ObjectClass *class_b = (ObjectClass *)b; 4906 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 4907 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 4908 int ret; 4909 4910 if (cc_a->ordering != cc_b->ordering) { 4911 ret = cc_a->ordering - cc_b->ordering; 4912 } else { 4913 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a); 4914 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b); 4915 ret = strcmp(name_a, name_b); 4916 } 4917 return ret; 4918 } 4919 4920 static GSList *get_sorted_cpu_model_list(void) 4921 { 4922 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 4923 list = g_slist_sort(list, x86_cpu_list_compare); 4924 return list; 4925 } 4926 4927 static char *x86_cpu_class_get_model_id(X86CPUClass *xc) 4928 { 4929 Object *obj = object_new_with_class(OBJECT_CLASS(xc)); 4930 char *r = object_property_get_str(obj, "model-id", &error_abort); 4931 object_unref(obj); 4932 return r; 4933 } 4934 4935 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) 4936 { 4937 X86CPUVersion version; 4938 4939 if (!cc->model || !cc->model->is_alias) { 4940 return NULL; 4941 } 4942 version = x86_cpu_model_resolve_version(cc->model); 4943 if (version <= 0) { 4944 return NULL; 4945 } 4946 return x86_cpu_versioned_model_name(cc->model->cpudef, version); 4947 } 4948 4949 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 4950 { 4951 ObjectClass *oc = data; 4952 X86CPUClass *cc = X86_CPU_CLASS(oc); 4953 g_autofree char *name = x86_cpu_class_get_model_name(cc); 4954 g_autofree char *desc = g_strdup(cc->model_description); 4955 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc); 4956 g_autofree char *model_id = x86_cpu_class_get_model_id(cc); 4957 4958 if (!desc && alias_of) { 4959 if (cc->model && cc->model->version == CPU_VERSION_AUTO) { 4960 desc = g_strdup("(alias configured by machine type)"); 4961 } else { 4962 desc = g_strdup_printf("(alias of %s)", alias_of); 4963 } 4964 } 4965 if (!desc && cc->model && cc->model->note) { 4966 desc = g_strdup_printf("%s [%s]", model_id, cc->model->note); 4967 } 4968 if (!desc) { 4969 desc = g_strdup_printf("%s", model_id); 4970 } 4971 4972 qemu_printf("x86 %-20s %-58s\n", name, desc); 4973 } 4974 4975 /* list available CPU models and flags */ 4976 void x86_cpu_list(void) 4977 { 4978 int i, j; 4979 GSList *list; 4980 GList *names = NULL; 4981 4982 qemu_printf("Available CPUs:\n"); 4983 list = get_sorted_cpu_model_list(); 4984 g_slist_foreach(list, x86_cpu_list_entry, NULL); 4985 g_slist_free(list); 4986 4987 names = NULL; 4988 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 4989 FeatureWordInfo *fw = &feature_word_info[i]; 4990 for (j = 0; j < 64; j++) { 4991 if (fw->feat_names[j]) { 4992 names = g_list_append(names, (gpointer)fw->feat_names[j]); 4993 } 4994 } 4995 } 4996 4997 names = g_list_sort(names, (GCompareFunc)strcmp); 4998 4999 qemu_printf("\nRecognized CPUID flags:\n"); 5000 listflags(names); 5001 qemu_printf("\n"); 5002 g_list_free(names); 5003 } 5004 5005 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 5006 { 5007 ObjectClass *oc = data; 5008 X86CPUClass *cc = X86_CPU_CLASS(oc); 5009 CpuDefinitionInfoList **cpu_list = user_data; 5010 CpuDefinitionInfo *info; 5011 5012 info = g_malloc0(sizeof(*info)); 5013 info->name = x86_cpu_class_get_model_name(cc); 5014 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 5015 info->has_unavailable_features = true; 5016 info->q_typename = g_strdup(object_class_get_name(oc)); 5017 info->migration_safe = cc->migration_safe; 5018 info->has_migration_safe = true; 5019 info->q_static = cc->static_model; 5020 if (cc->model && cc->model->cpudef->deprecation_note) { 5021 info->deprecated = true; 5022 } else { 5023 info->deprecated = false; 5024 } 5025 /* 5026 * Old machine types won't report aliases, so that alias translation 5027 * doesn't break compatibility with previous QEMU versions. 5028 */ 5029 if (default_cpu_version != CPU_VERSION_LEGACY) { 5030 info->alias_of = x86_cpu_class_get_alias_of(cc); 5031 info->has_alias_of = !!info->alias_of; 5032 } 5033 5034 QAPI_LIST_PREPEND(*cpu_list, info); 5035 } 5036 5037 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 5038 { 5039 CpuDefinitionInfoList *cpu_list = NULL; 5040 GSList *list = get_sorted_cpu_model_list(); 5041 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 5042 g_slist_free(list); 5043 return cpu_list; 5044 } 5045 5046 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 5047 bool migratable_only) 5048 { 5049 FeatureWordInfo *wi = &feature_word_info[w]; 5050 uint64_t r = 0; 5051 5052 if (kvm_enabled()) { 5053 switch (wi->type) { 5054 case CPUID_FEATURE_WORD: 5055 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 5056 wi->cpuid.ecx, 5057 wi->cpuid.reg); 5058 break; 5059 case MSR_FEATURE_WORD: 5060 r = kvm_arch_get_supported_msr_feature(kvm_state, 5061 wi->msr.index); 5062 break; 5063 } 5064 } else if (hvf_enabled()) { 5065 if (wi->type != CPUID_FEATURE_WORD) { 5066 return 0; 5067 } 5068 r = hvf_get_supported_cpuid(wi->cpuid.eax, 5069 wi->cpuid.ecx, 5070 wi->cpuid.reg); 5071 } else if (tcg_enabled()) { 5072 r = wi->tcg_features; 5073 } else { 5074 return ~0; 5075 } 5076 #ifndef TARGET_X86_64 5077 if (w == FEAT_8000_0001_EDX) { 5078 r &= ~CPUID_EXT2_LM; 5079 } 5080 #endif 5081 if (migratable_only) { 5082 r &= x86_cpu_get_migratable_flags(w); 5083 } 5084 return r; 5085 } 5086 5087 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 5088 { 5089 PropValue *pv; 5090 for (pv = props; pv->prop; pv++) { 5091 if (!pv->value) { 5092 continue; 5093 } 5094 object_property_parse(OBJECT(cpu), pv->prop, pv->value, 5095 &error_abort); 5096 } 5097 } 5098 5099 /* Apply properties for the CPU model version specified in model */ 5100 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) 5101 { 5102 const X86CPUVersionDefinition *vdef; 5103 X86CPUVersion version = x86_cpu_model_resolve_version(model); 5104 5105 if (version == CPU_VERSION_LEGACY) { 5106 return; 5107 } 5108 5109 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { 5110 PropValue *p; 5111 5112 for (p = vdef->props; p && p->prop; p++) { 5113 object_property_parse(OBJECT(cpu), p->prop, p->value, 5114 &error_abort); 5115 } 5116 5117 if (vdef->version == version) { 5118 break; 5119 } 5120 } 5121 5122 /* 5123 * If we reached the end of the list, version number was invalid 5124 */ 5125 assert(vdef->version == version); 5126 } 5127 5128 /* Load data from X86CPUDefinition into a X86CPU object 5129 */ 5130 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) 5131 { 5132 X86CPUDefinition *def = model->cpudef; 5133 CPUX86State *env = &cpu->env; 5134 const char *vendor; 5135 char host_vendor[CPUID_VENDOR_SZ + 1]; 5136 FeatureWord w; 5137 5138 /*NOTE: any property set by this function should be returned by 5139 * x86_cpu_static_props(), so static expansion of 5140 * query-cpu-model-expansion is always complete. 5141 */ 5142 5143 /* CPU models only set _minimum_ values for level/xlevel: */ 5144 object_property_set_uint(OBJECT(cpu), "min-level", def->level, 5145 &error_abort); 5146 object_property_set_uint(OBJECT(cpu), "min-xlevel", def->xlevel, 5147 &error_abort); 5148 5149 object_property_set_int(OBJECT(cpu), "family", def->family, &error_abort); 5150 object_property_set_int(OBJECT(cpu), "model", def->model, &error_abort); 5151 object_property_set_int(OBJECT(cpu), "stepping", def->stepping, 5152 &error_abort); 5153 object_property_set_str(OBJECT(cpu), "model-id", def->model_id, 5154 &error_abort); 5155 for (w = 0; w < FEATURE_WORDS; w++) { 5156 env->features[w] = def->features[w]; 5157 } 5158 5159 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 5160 cpu->legacy_cache = !def->cache_info; 5161 5162 /* Special cases not set in the X86CPUDefinition structs: */ 5163 /* TODO: in-kernel irqchip for hvf */ 5164 if (kvm_enabled()) { 5165 if (!kvm_irqchip_in_kernel()) { 5166 x86_cpu_change_kvm_default("x2apic", "off"); 5167 } else if (kvm_irqchip_is_split() && kvm_enable_x2apic()) { 5168 x86_cpu_change_kvm_default("kvm-msi-ext-dest-id", "on"); 5169 } 5170 5171 x86_cpu_apply_props(cpu, kvm_default_props); 5172 } else if (tcg_enabled()) { 5173 x86_cpu_apply_props(cpu, tcg_default_props); 5174 } 5175 5176 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 5177 5178 /* sysenter isn't supported in compatibility mode on AMD, 5179 * syscall isn't supported in compatibility mode on Intel. 5180 * Normally we advertise the actual CPU vendor, but you can 5181 * override this using the 'vendor' property if you want to use 5182 * KVM's sysenter/syscall emulation in compatibility mode and 5183 * when doing cross vendor migration 5184 */ 5185 vendor = def->vendor; 5186 if (accel_uses_host_cpuid()) { 5187 uint32_t ebx = 0, ecx = 0, edx = 0; 5188 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 5189 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 5190 vendor = host_vendor; 5191 } 5192 5193 object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); 5194 5195 x86_cpu_apply_version_props(cpu, model); 5196 5197 /* 5198 * Properties in versioned CPU model are not user specified features. 5199 * We can simply clear env->user_features here since it will be filled later 5200 * in x86_cpu_expand_features() based on plus_features and minus_features. 5201 */ 5202 memset(&env->user_features, 0, sizeof(env->user_features)); 5203 } 5204 5205 #ifndef CONFIG_USER_ONLY 5206 /* Return a QDict containing keys for all properties that can be included 5207 * in static expansion of CPU models. All properties set by x86_cpu_load_model() 5208 * must be included in the dictionary. 5209 */ 5210 static QDict *x86_cpu_static_props(void) 5211 { 5212 FeatureWord w; 5213 int i; 5214 static const char *props[] = { 5215 "min-level", 5216 "min-xlevel", 5217 "family", 5218 "model", 5219 "stepping", 5220 "model-id", 5221 "vendor", 5222 "lmce", 5223 NULL, 5224 }; 5225 static QDict *d; 5226 5227 if (d) { 5228 return d; 5229 } 5230 5231 d = qdict_new(); 5232 for (i = 0; props[i]; i++) { 5233 qdict_put_null(d, props[i]); 5234 } 5235 5236 for (w = 0; w < FEATURE_WORDS; w++) { 5237 FeatureWordInfo *fi = &feature_word_info[w]; 5238 int bit; 5239 for (bit = 0; bit < 64; bit++) { 5240 if (!fi->feat_names[bit]) { 5241 continue; 5242 } 5243 qdict_put_null(d, fi->feat_names[bit]); 5244 } 5245 } 5246 5247 return d; 5248 } 5249 5250 /* Add an entry to @props dict, with the value for property. */ 5251 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 5252 { 5253 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 5254 &error_abort); 5255 5256 qdict_put_obj(props, prop, value); 5257 } 5258 5259 /* Convert CPU model data from X86CPU object to a property dictionary 5260 * that can recreate exactly the same CPU model. 5261 */ 5262 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 5263 { 5264 QDict *sprops = x86_cpu_static_props(); 5265 const QDictEntry *e; 5266 5267 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 5268 const char *prop = qdict_entry_key(e); 5269 x86_cpu_expand_prop(cpu, props, prop); 5270 } 5271 } 5272 5273 /* Convert CPU model data from X86CPU object to a property dictionary 5274 * that can recreate exactly the same CPU model, including every 5275 * writeable QOM property. 5276 */ 5277 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 5278 { 5279 ObjectPropertyIterator iter; 5280 ObjectProperty *prop; 5281 5282 object_property_iter_init(&iter, OBJECT(cpu)); 5283 while ((prop = object_property_iter_next(&iter))) { 5284 /* skip read-only or write-only properties */ 5285 if (!prop->get || !prop->set) { 5286 continue; 5287 } 5288 5289 /* "hotplugged" is the only property that is configurable 5290 * on the command-line but will be set differently on CPUs 5291 * created using "-cpu ... -smp ..." and by CPUs created 5292 * on the fly by x86_cpu_from_model() for querying. Skip it. 5293 */ 5294 if (!strcmp(prop->name, "hotplugged")) { 5295 continue; 5296 } 5297 x86_cpu_expand_prop(cpu, props, prop->name); 5298 } 5299 } 5300 5301 static void object_apply_props(Object *obj, QDict *props, Error **errp) 5302 { 5303 const QDictEntry *prop; 5304 5305 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 5306 if (!object_property_set_qobject(obj, qdict_entry_key(prop), 5307 qdict_entry_value(prop), errp)) { 5308 break; 5309 } 5310 } 5311 } 5312 5313 /* Create X86CPU object according to model+props specification */ 5314 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 5315 { 5316 X86CPU *xc = NULL; 5317 X86CPUClass *xcc; 5318 Error *err = NULL; 5319 5320 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 5321 if (xcc == NULL) { 5322 error_setg(&err, "CPU model '%s' not found", model); 5323 goto out; 5324 } 5325 5326 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 5327 if (props) { 5328 object_apply_props(OBJECT(xc), props, &err); 5329 if (err) { 5330 goto out; 5331 } 5332 } 5333 5334 x86_cpu_expand_features(xc, &err); 5335 if (err) { 5336 goto out; 5337 } 5338 5339 out: 5340 if (err) { 5341 error_propagate(errp, err); 5342 object_unref(OBJECT(xc)); 5343 xc = NULL; 5344 } 5345 return xc; 5346 } 5347 5348 CpuModelExpansionInfo * 5349 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 5350 CpuModelInfo *model, 5351 Error **errp) 5352 { 5353 X86CPU *xc = NULL; 5354 Error *err = NULL; 5355 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 5356 QDict *props = NULL; 5357 const char *base_name; 5358 5359 xc = x86_cpu_from_model(model->name, 5360 model->has_props ? 5361 qobject_to(QDict, model->props) : 5362 NULL, &err); 5363 if (err) { 5364 goto out; 5365 } 5366 5367 props = qdict_new(); 5368 ret->model = g_new0(CpuModelInfo, 1); 5369 ret->model->props = QOBJECT(props); 5370 ret->model->has_props = true; 5371 5372 switch (type) { 5373 case CPU_MODEL_EXPANSION_TYPE_STATIC: 5374 /* Static expansion will be based on "base" only */ 5375 base_name = "base"; 5376 x86_cpu_to_dict(xc, props); 5377 break; 5378 case CPU_MODEL_EXPANSION_TYPE_FULL: 5379 /* As we don't return every single property, full expansion needs 5380 * to keep the original model name+props, and add extra 5381 * properties on top of that. 5382 */ 5383 base_name = model->name; 5384 x86_cpu_to_dict_full(xc, props); 5385 break; 5386 default: 5387 error_setg(&err, "Unsupported expansion type"); 5388 goto out; 5389 } 5390 5391 x86_cpu_to_dict(xc, props); 5392 5393 ret->model->name = g_strdup(base_name); 5394 5395 out: 5396 object_unref(OBJECT(xc)); 5397 if (err) { 5398 error_propagate(errp, err); 5399 qapi_free_CpuModelExpansionInfo(ret); 5400 ret = NULL; 5401 } 5402 return ret; 5403 } 5404 #endif /* !CONFIG_USER_ONLY */ 5405 5406 static gchar *x86_gdb_arch_name(CPUState *cs) 5407 { 5408 #ifdef TARGET_X86_64 5409 return g_strdup("i386:x86-64"); 5410 #else 5411 return g_strdup("i386"); 5412 #endif 5413 } 5414 5415 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 5416 { 5417 X86CPUModel *model = data; 5418 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5419 CPUClass *cc = CPU_CLASS(oc); 5420 5421 xcc->model = model; 5422 xcc->migration_safe = true; 5423 cc->deprecation_note = model->cpudef->deprecation_note; 5424 } 5425 5426 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) 5427 { 5428 g_autofree char *typename = x86_cpu_type_name(name); 5429 TypeInfo ti = { 5430 .name = typename, 5431 .parent = TYPE_X86_CPU, 5432 .class_init = x86_cpu_cpudef_class_init, 5433 .class_data = model, 5434 }; 5435 5436 type_register(&ti); 5437 } 5438 5439 static void x86_register_cpudef_types(X86CPUDefinition *def) 5440 { 5441 X86CPUModel *m; 5442 const X86CPUVersionDefinition *vdef; 5443 5444 /* AMD aliases are handled at runtime based on CPUID vendor, so 5445 * they shouldn't be set on the CPU model table. 5446 */ 5447 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 5448 /* catch mistakes instead of silently truncating model_id when too long */ 5449 assert(def->model_id && strlen(def->model_id) <= 48); 5450 5451 /* Unversioned model: */ 5452 m = g_new0(X86CPUModel, 1); 5453 m->cpudef = def; 5454 m->version = CPU_VERSION_AUTO; 5455 m->is_alias = true; 5456 x86_register_cpu_model_type(def->name, m); 5457 5458 /* Versioned models: */ 5459 5460 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { 5461 X86CPUModel *m = g_new0(X86CPUModel, 1); 5462 g_autofree char *name = 5463 x86_cpu_versioned_model_name(def, vdef->version); 5464 m->cpudef = def; 5465 m->version = vdef->version; 5466 m->note = vdef->note; 5467 x86_register_cpu_model_type(name, m); 5468 5469 if (vdef->alias) { 5470 X86CPUModel *am = g_new0(X86CPUModel, 1); 5471 am->cpudef = def; 5472 am->version = vdef->version; 5473 am->is_alias = true; 5474 x86_register_cpu_model_type(vdef->alias, am); 5475 } 5476 } 5477 5478 } 5479 5480 #if !defined(CONFIG_USER_ONLY) 5481 5482 void cpu_clear_apic_feature(CPUX86State *env) 5483 { 5484 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 5485 } 5486 5487 #endif /* !CONFIG_USER_ONLY */ 5488 5489 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 5490 uint32_t *eax, uint32_t *ebx, 5491 uint32_t *ecx, uint32_t *edx) 5492 { 5493 X86CPU *cpu = env_archcpu(env); 5494 CPUState *cs = env_cpu(env); 5495 uint32_t die_offset; 5496 uint32_t limit; 5497 uint32_t signature[3]; 5498 X86CPUTopoInfo topo_info; 5499 5500 topo_info.dies_per_pkg = env->nr_dies; 5501 topo_info.cores_per_die = cs->nr_cores; 5502 topo_info.threads_per_core = cs->nr_threads; 5503 5504 /* Calculate & apply limits for different index ranges */ 5505 if (index >= 0xC0000000) { 5506 limit = env->cpuid_xlevel2; 5507 } else if (index >= 0x80000000) { 5508 limit = env->cpuid_xlevel; 5509 } else if (index >= 0x40000000) { 5510 limit = 0x40000001; 5511 } else { 5512 limit = env->cpuid_level; 5513 } 5514 5515 if (index > limit) { 5516 /* Intel documentation states that invalid EAX input will 5517 * return the same information as EAX=cpuid_level 5518 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 5519 */ 5520 index = env->cpuid_level; 5521 } 5522 5523 switch(index) { 5524 case 0: 5525 *eax = env->cpuid_level; 5526 *ebx = env->cpuid_vendor1; 5527 *edx = env->cpuid_vendor2; 5528 *ecx = env->cpuid_vendor3; 5529 break; 5530 case 1: 5531 *eax = env->cpuid_version; 5532 *ebx = (cpu->apic_id << 24) | 5533 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 5534 *ecx = env->features[FEAT_1_ECX]; 5535 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 5536 *ecx |= CPUID_EXT_OSXSAVE; 5537 } 5538 *edx = env->features[FEAT_1_EDX]; 5539 if (cs->nr_cores * cs->nr_threads > 1) { 5540 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 5541 *edx |= CPUID_HT; 5542 } 5543 if (!cpu->enable_pmu) { 5544 *ecx &= ~CPUID_EXT_PDCM; 5545 } 5546 break; 5547 case 2: 5548 /* cache info: needed for Pentium Pro compatibility */ 5549 if (cpu->cache_info_passthrough) { 5550 host_cpuid(index, 0, eax, ebx, ecx, edx); 5551 break; 5552 } 5553 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 5554 *ebx = 0; 5555 if (!cpu->enable_l3_cache) { 5556 *ecx = 0; 5557 } else { 5558 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 5559 } 5560 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 5561 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 5562 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 5563 break; 5564 case 4: 5565 /* cache info: needed for Core compatibility */ 5566 if (cpu->cache_info_passthrough) { 5567 host_cpuid(index, count, eax, ebx, ecx, edx); 5568 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 5569 *eax &= ~0xFC000000; 5570 if ((*eax & 31) && cs->nr_cores > 1) { 5571 *eax |= (cs->nr_cores - 1) << 26; 5572 } 5573 } else { 5574 *eax = 0; 5575 switch (count) { 5576 case 0: /* L1 dcache info */ 5577 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 5578 1, cs->nr_cores, 5579 eax, ebx, ecx, edx); 5580 break; 5581 case 1: /* L1 icache info */ 5582 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 5583 1, cs->nr_cores, 5584 eax, ebx, ecx, edx); 5585 break; 5586 case 2: /* L2 cache info */ 5587 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 5588 cs->nr_threads, cs->nr_cores, 5589 eax, ebx, ecx, edx); 5590 break; 5591 case 3: /* L3 cache info */ 5592 die_offset = apicid_die_offset(&topo_info); 5593 if (cpu->enable_l3_cache) { 5594 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 5595 (1 << die_offset), cs->nr_cores, 5596 eax, ebx, ecx, edx); 5597 break; 5598 } 5599 /* fall through */ 5600 default: /* end of info */ 5601 *eax = *ebx = *ecx = *edx = 0; 5602 break; 5603 } 5604 } 5605 break; 5606 case 5: 5607 /* MONITOR/MWAIT Leaf */ 5608 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 5609 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 5610 *ecx = cpu->mwait.ecx; /* flags */ 5611 *edx = cpu->mwait.edx; /* mwait substates */ 5612 break; 5613 case 6: 5614 /* Thermal and Power Leaf */ 5615 *eax = env->features[FEAT_6_EAX]; 5616 *ebx = 0; 5617 *ecx = 0; 5618 *edx = 0; 5619 break; 5620 case 7: 5621 /* Structured Extended Feature Flags Enumeration Leaf */ 5622 if (count == 0) { 5623 /* Maximum ECX value for sub-leaves */ 5624 *eax = env->cpuid_level_func7; 5625 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 5626 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 5627 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 5628 *ecx |= CPUID_7_0_ECX_OSPKE; 5629 } 5630 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 5631 } else if (count == 1) { 5632 *eax = env->features[FEAT_7_1_EAX]; 5633 *ebx = 0; 5634 *ecx = 0; 5635 *edx = 0; 5636 } else { 5637 *eax = 0; 5638 *ebx = 0; 5639 *ecx = 0; 5640 *edx = 0; 5641 } 5642 break; 5643 case 9: 5644 /* Direct Cache Access Information Leaf */ 5645 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 5646 *ebx = 0; 5647 *ecx = 0; 5648 *edx = 0; 5649 break; 5650 case 0xA: 5651 /* Architectural Performance Monitoring Leaf */ 5652 if (kvm_enabled() && cpu->enable_pmu) { 5653 KVMState *s = cs->kvm_state; 5654 5655 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 5656 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 5657 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 5658 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 5659 } else if (hvf_enabled() && cpu->enable_pmu) { 5660 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 5661 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 5662 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 5663 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 5664 } else { 5665 *eax = 0; 5666 *ebx = 0; 5667 *ecx = 0; 5668 *edx = 0; 5669 } 5670 break; 5671 case 0xB: 5672 /* Extended Topology Enumeration Leaf */ 5673 if (!cpu->enable_cpuid_0xb) { 5674 *eax = *ebx = *ecx = *edx = 0; 5675 break; 5676 } 5677 5678 *ecx = count & 0xff; 5679 *edx = cpu->apic_id; 5680 5681 switch (count) { 5682 case 0: 5683 *eax = apicid_core_offset(&topo_info); 5684 *ebx = cs->nr_threads; 5685 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5686 break; 5687 case 1: 5688 *eax = apicid_pkg_offset(&topo_info); 5689 *ebx = cs->nr_cores * cs->nr_threads; 5690 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5691 break; 5692 default: 5693 *eax = 0; 5694 *ebx = 0; 5695 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5696 } 5697 5698 assert(!(*eax & ~0x1f)); 5699 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5700 break; 5701 case 0x1F: 5702 /* V2 Extended Topology Enumeration Leaf */ 5703 if (env->nr_dies < 2) { 5704 *eax = *ebx = *ecx = *edx = 0; 5705 break; 5706 } 5707 5708 *ecx = count & 0xff; 5709 *edx = cpu->apic_id; 5710 switch (count) { 5711 case 0: 5712 *eax = apicid_core_offset(&topo_info); 5713 *ebx = cs->nr_threads; 5714 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5715 break; 5716 case 1: 5717 *eax = apicid_die_offset(&topo_info); 5718 *ebx = cs->nr_cores * cs->nr_threads; 5719 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5720 break; 5721 case 2: 5722 *eax = apicid_pkg_offset(&topo_info); 5723 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; 5724 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; 5725 break; 5726 default: 5727 *eax = 0; 5728 *ebx = 0; 5729 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5730 } 5731 assert(!(*eax & ~0x1f)); 5732 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5733 break; 5734 case 0xD: { 5735 /* Processor Extended State */ 5736 *eax = 0; 5737 *ebx = 0; 5738 *ecx = 0; 5739 *edx = 0; 5740 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 5741 break; 5742 } 5743 5744 if (count == 0) { 5745 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 5746 *eax = env->features[FEAT_XSAVE_COMP_LO]; 5747 *edx = env->features[FEAT_XSAVE_COMP_HI]; 5748 /* 5749 * The initial value of xcr0 and ebx == 0, On host without kvm 5750 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 5751 * even through guest update xcr0, this will crash some legacy guest 5752 * (e.g., CentOS 6), So set ebx == ecx to workaroud it. 5753 */ 5754 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0); 5755 } else if (count == 1) { 5756 *eax = env->features[FEAT_XSAVE]; 5757 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 5758 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 5759 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 5760 *eax = esa->size; 5761 *ebx = esa->offset; 5762 } 5763 } 5764 break; 5765 } 5766 case 0x14: { 5767 /* Intel Processor Trace Enumeration */ 5768 *eax = 0; 5769 *ebx = 0; 5770 *ecx = 0; 5771 *edx = 0; 5772 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 5773 !kvm_enabled()) { 5774 break; 5775 } 5776 5777 if (count == 0) { 5778 *eax = INTEL_PT_MAX_SUBLEAF; 5779 *ebx = INTEL_PT_MINIMAL_EBX; 5780 *ecx = INTEL_PT_MINIMAL_ECX; 5781 if (env->features[FEAT_14_0_ECX] & CPUID_14_0_ECX_LIP) { 5782 *ecx |= CPUID_14_0_ECX_LIP; 5783 } 5784 } else if (count == 1) { 5785 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 5786 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 5787 } 5788 break; 5789 } 5790 case 0x40000000: 5791 /* 5792 * CPUID code in kvm_arch_init_vcpu() ignores stuff 5793 * set here, but we restrict to TCG none the less. 5794 */ 5795 if (tcg_enabled() && cpu->expose_tcg) { 5796 memcpy(signature, "TCGTCGTCGTCG", 12); 5797 *eax = 0x40000001; 5798 *ebx = signature[0]; 5799 *ecx = signature[1]; 5800 *edx = signature[2]; 5801 } else { 5802 *eax = 0; 5803 *ebx = 0; 5804 *ecx = 0; 5805 *edx = 0; 5806 } 5807 break; 5808 case 0x40000001: 5809 *eax = 0; 5810 *ebx = 0; 5811 *ecx = 0; 5812 *edx = 0; 5813 break; 5814 case 0x80000000: 5815 *eax = env->cpuid_xlevel; 5816 *ebx = env->cpuid_vendor1; 5817 *edx = env->cpuid_vendor2; 5818 *ecx = env->cpuid_vendor3; 5819 break; 5820 case 0x80000001: 5821 *eax = env->cpuid_version; 5822 *ebx = 0; 5823 *ecx = env->features[FEAT_8000_0001_ECX]; 5824 *edx = env->features[FEAT_8000_0001_EDX]; 5825 5826 /* The Linux kernel checks for the CMPLegacy bit and 5827 * discards multiple thread information if it is set. 5828 * So don't set it here for Intel to make Linux guests happy. 5829 */ 5830 if (cs->nr_cores * cs->nr_threads > 1) { 5831 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 5832 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 5833 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 5834 *ecx |= 1 << 1; /* CmpLegacy bit */ 5835 } 5836 } 5837 break; 5838 case 0x80000002: 5839 case 0x80000003: 5840 case 0x80000004: 5841 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 5842 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 5843 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 5844 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 5845 break; 5846 case 0x80000005: 5847 /* cache info (L1 cache) */ 5848 if (cpu->cache_info_passthrough) { 5849 host_cpuid(index, 0, eax, ebx, ecx, edx); 5850 break; 5851 } 5852 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | 5853 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 5854 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | 5855 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 5856 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 5857 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 5858 break; 5859 case 0x80000006: 5860 /* cache info (L2 cache) */ 5861 if (cpu->cache_info_passthrough) { 5862 host_cpuid(index, 0, eax, ebx, ecx, edx); 5863 break; 5864 } 5865 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | 5866 (L2_DTLB_2M_ENTRIES << 16) | 5867 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | 5868 (L2_ITLB_2M_ENTRIES); 5869 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | 5870 (L2_DTLB_4K_ENTRIES << 16) | 5871 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | 5872 (L2_ITLB_4K_ENTRIES); 5873 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 5874 cpu->enable_l3_cache ? 5875 env->cache_info_amd.l3_cache : NULL, 5876 ecx, edx); 5877 break; 5878 case 0x80000007: 5879 *eax = 0; 5880 *ebx = 0; 5881 *ecx = 0; 5882 *edx = env->features[FEAT_8000_0007_EDX]; 5883 break; 5884 case 0x80000008: 5885 /* virtual & phys address size in low 2 bytes. */ 5886 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5887 /* 64 bit processor */ 5888 *eax = cpu->phys_bits; /* configurable physical bits */ 5889 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 5890 *eax |= 0x00003900; /* 57 bits virtual */ 5891 } else { 5892 *eax |= 0x00003000; /* 48 bits virtual */ 5893 } 5894 } else { 5895 *eax = cpu->phys_bits; 5896 } 5897 *ebx = env->features[FEAT_8000_0008_EBX]; 5898 if (cs->nr_cores * cs->nr_threads > 1) { 5899 /* 5900 * Bits 15:12 is "The number of bits in the initial 5901 * Core::X86::Apic::ApicId[ApicId] value that indicate 5902 * thread ID within a package". 5903 * Bits 7:0 is "The number of threads in the package is NC+1" 5904 */ 5905 *ecx = (apicid_pkg_offset(&topo_info) << 12) | 5906 ((cs->nr_cores * cs->nr_threads) - 1); 5907 } else { 5908 *ecx = 0; 5909 } 5910 *edx = 0; 5911 break; 5912 case 0x8000000A: 5913 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5914 *eax = 0x00000001; /* SVM Revision */ 5915 *ebx = 0x00000010; /* nr of ASIDs */ 5916 *ecx = 0; 5917 *edx = env->features[FEAT_SVM]; /* optional features */ 5918 } else { 5919 *eax = 0; 5920 *ebx = 0; 5921 *ecx = 0; 5922 *edx = 0; 5923 } 5924 break; 5925 case 0x8000001D: 5926 *eax = 0; 5927 if (cpu->cache_info_passthrough) { 5928 host_cpuid(index, count, eax, ebx, ecx, edx); 5929 break; 5930 } 5931 switch (count) { 5932 case 0: /* L1 dcache info */ 5933 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, 5934 &topo_info, eax, ebx, ecx, edx); 5935 break; 5936 case 1: /* L1 icache info */ 5937 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, 5938 &topo_info, eax, ebx, ecx, edx); 5939 break; 5940 case 2: /* L2 cache info */ 5941 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, 5942 &topo_info, eax, ebx, ecx, edx); 5943 break; 5944 case 3: /* L3 cache info */ 5945 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, 5946 &topo_info, eax, ebx, ecx, edx); 5947 break; 5948 default: /* end of info */ 5949 *eax = *ebx = *ecx = *edx = 0; 5950 break; 5951 } 5952 break; 5953 case 0x8000001E: 5954 if (cpu->core_id <= 255) { 5955 encode_topo_cpuid8000001e(cpu, &topo_info, eax, ebx, ecx, edx); 5956 } else { 5957 *eax = 0; 5958 *ebx = 0; 5959 *ecx = 0; 5960 *edx = 0; 5961 } 5962 break; 5963 case 0xC0000000: 5964 *eax = env->cpuid_xlevel2; 5965 *ebx = 0; 5966 *ecx = 0; 5967 *edx = 0; 5968 break; 5969 case 0xC0000001: 5970 /* Support for VIA CPU's CPUID instruction */ 5971 *eax = env->cpuid_version; 5972 *ebx = 0; 5973 *ecx = 0; 5974 *edx = env->features[FEAT_C000_0001_EDX]; 5975 break; 5976 case 0xC0000002: 5977 case 0xC0000003: 5978 case 0xC0000004: 5979 /* Reserved for the future, and now filled with zero */ 5980 *eax = 0; 5981 *ebx = 0; 5982 *ecx = 0; 5983 *edx = 0; 5984 break; 5985 case 0x8000001F: 5986 *eax = sev_enabled() ? 0x2 : 0; 5987 *eax |= sev_es_enabled() ? 0x8 : 0; 5988 *ebx = sev_get_cbit_position(); 5989 *ebx |= sev_get_reduced_phys_bits() << 6; 5990 *ecx = 0; 5991 *edx = 0; 5992 break; 5993 default: 5994 /* reserved values: zero */ 5995 *eax = 0; 5996 *ebx = 0; 5997 *ecx = 0; 5998 *edx = 0; 5999 break; 6000 } 6001 } 6002 6003 static void x86_cpu_reset(DeviceState *dev) 6004 { 6005 CPUState *s = CPU(dev); 6006 X86CPU *cpu = X86_CPU(s); 6007 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 6008 CPUX86State *env = &cpu->env; 6009 target_ulong cr4; 6010 uint64_t xcr0; 6011 int i; 6012 6013 xcc->parent_reset(dev); 6014 6015 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 6016 6017 env->old_exception = -1; 6018 6019 /* init to reset state */ 6020 6021 env->hflags2 |= HF2_GIF_MASK; 6022 env->hflags &= ~HF_GUEST_MASK; 6023 6024 cpu_x86_update_cr0(env, 0x60000010); 6025 env->a20_mask = ~0x0; 6026 env->smbase = 0x30000; 6027 env->msr_smi_count = 0; 6028 6029 env->idt.limit = 0xffff; 6030 env->gdt.limit = 0xffff; 6031 env->ldt.limit = 0xffff; 6032 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 6033 env->tr.limit = 0xffff; 6034 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 6035 6036 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 6037 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 6038 DESC_R_MASK | DESC_A_MASK); 6039 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 6040 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6041 DESC_A_MASK); 6042 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 6043 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6044 DESC_A_MASK); 6045 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 6046 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6047 DESC_A_MASK); 6048 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 6049 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6050 DESC_A_MASK); 6051 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 6052 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6053 DESC_A_MASK); 6054 6055 env->eip = 0xfff0; 6056 env->regs[R_EDX] = env->cpuid_version; 6057 6058 env->eflags = 0x2; 6059 6060 /* FPU init */ 6061 for (i = 0; i < 8; i++) { 6062 env->fptags[i] = 1; 6063 } 6064 cpu_set_fpuc(env, 0x37f); 6065 6066 env->mxcsr = 0x1f80; 6067 /* All units are in INIT state. */ 6068 env->xstate_bv = 0; 6069 6070 env->pat = 0x0007040600070406ULL; 6071 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 6072 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 6073 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 6074 } 6075 6076 memset(env->dr, 0, sizeof(env->dr)); 6077 env->dr[6] = DR6_FIXED_1; 6078 env->dr[7] = DR7_FIXED_1; 6079 cpu_breakpoint_remove_all(s, BP_CPU); 6080 cpu_watchpoint_remove_all(s, BP_CPU); 6081 6082 cr4 = 0; 6083 xcr0 = XSTATE_FP_MASK; 6084 6085 #ifdef CONFIG_USER_ONLY 6086 /* Enable all the features for user-mode. */ 6087 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 6088 xcr0 |= XSTATE_SSE_MASK; 6089 } 6090 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6091 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6092 if (env->features[esa->feature] & esa->bits) { 6093 xcr0 |= 1ull << i; 6094 } 6095 } 6096 6097 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 6098 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 6099 } 6100 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 6101 cr4 |= CR4_FSGSBASE_MASK; 6102 } 6103 #endif 6104 6105 env->xcr0 = xcr0; 6106 cpu_x86_update_cr4(env, cr4); 6107 6108 /* 6109 * SDM 11.11.5 requires: 6110 * - IA32_MTRR_DEF_TYPE MSR.E = 0 6111 * - IA32_MTRR_PHYSMASKn.V = 0 6112 * All other bits are undefined. For simplification, zero it all. 6113 */ 6114 env->mtrr_deftype = 0; 6115 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 6116 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 6117 6118 env->interrupt_injected = -1; 6119 env->exception_nr = -1; 6120 env->exception_pending = 0; 6121 env->exception_injected = 0; 6122 env->exception_has_payload = false; 6123 env->exception_payload = 0; 6124 env->nmi_injected = false; 6125 #if !defined(CONFIG_USER_ONLY) 6126 /* We hard-wire the BSP to the first CPU. */ 6127 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 6128 6129 s->halted = !cpu_is_bsp(cpu); 6130 6131 if (kvm_enabled()) { 6132 kvm_arch_reset_vcpu(cpu); 6133 } 6134 #endif 6135 } 6136 6137 #ifndef CONFIG_USER_ONLY 6138 bool cpu_is_bsp(X86CPU *cpu) 6139 { 6140 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 6141 } 6142 6143 /* TODO: remove me, when reset over QOM tree is implemented */ 6144 static void x86_cpu_machine_reset_cb(void *opaque) 6145 { 6146 X86CPU *cpu = opaque; 6147 cpu_reset(CPU(cpu)); 6148 } 6149 #endif 6150 6151 static void mce_init(X86CPU *cpu) 6152 { 6153 CPUX86State *cenv = &cpu->env; 6154 unsigned int bank; 6155 6156 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 6157 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 6158 (CPUID_MCE | CPUID_MCA)) { 6159 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 6160 (cpu->enable_lmce ? MCG_LMCE_P : 0); 6161 cenv->mcg_ctl = ~(uint64_t)0; 6162 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 6163 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 6164 } 6165 } 6166 } 6167 6168 #ifndef CONFIG_USER_ONLY 6169 APICCommonClass *apic_get_class(void) 6170 { 6171 const char *apic_type = "apic"; 6172 6173 /* TODO: in-kernel irqchip for hvf */ 6174 if (kvm_apic_in_kernel()) { 6175 apic_type = "kvm-apic"; 6176 } else if (xen_enabled()) { 6177 apic_type = "xen-apic"; 6178 } else if (whpx_apic_in_platform()) { 6179 apic_type = "whpx-apic"; 6180 } 6181 6182 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 6183 } 6184 6185 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 6186 { 6187 APICCommonState *apic; 6188 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 6189 6190 cpu->apic_state = DEVICE(object_new_with_class(apic_class)); 6191 6192 object_property_add_child(OBJECT(cpu), "lapic", 6193 OBJECT(cpu->apic_state)); 6194 object_unref(OBJECT(cpu->apic_state)); 6195 6196 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 6197 /* TODO: convert to link<> */ 6198 apic = APIC_COMMON(cpu->apic_state); 6199 apic->cpu = cpu; 6200 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 6201 } 6202 6203 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6204 { 6205 APICCommonState *apic; 6206 static bool apic_mmio_map_once; 6207 6208 if (cpu->apic_state == NULL) { 6209 return; 6210 } 6211 qdev_realize(DEVICE(cpu->apic_state), NULL, errp); 6212 6213 /* Map APIC MMIO area */ 6214 apic = APIC_COMMON(cpu->apic_state); 6215 if (!apic_mmio_map_once) { 6216 memory_region_add_subregion_overlap(get_system_memory(), 6217 apic->apicbase & 6218 MSR_IA32_APICBASE_BASE, 6219 &apic->io_memory, 6220 0x1000); 6221 apic_mmio_map_once = true; 6222 } 6223 } 6224 6225 static void x86_cpu_machine_done(Notifier *n, void *unused) 6226 { 6227 X86CPU *cpu = container_of(n, X86CPU, machine_done); 6228 MemoryRegion *smram = 6229 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 6230 6231 if (smram) { 6232 cpu->smram = g_new(MemoryRegion, 1); 6233 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 6234 smram, 0, 4 * GiB); 6235 memory_region_set_enabled(cpu->smram, true); 6236 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 6237 } 6238 } 6239 #else 6240 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6241 { 6242 } 6243 #endif 6244 6245 /* Note: Only safe for use on x86(-64) hosts */ 6246 static uint32_t x86_host_phys_bits(void) 6247 { 6248 uint32_t eax; 6249 uint32_t host_phys_bits; 6250 6251 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 6252 if (eax >= 0x80000008) { 6253 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 6254 /* Note: According to AMD doc 25481 rev 2.34 they have a field 6255 * at 23:16 that can specify a maximum physical address bits for 6256 * the guest that can override this value; but I've not seen 6257 * anything with that set. 6258 */ 6259 host_phys_bits = eax & 0xff; 6260 } else { 6261 /* It's an odd 64 bit machine that doesn't have the leaf for 6262 * physical address bits; fall back to 36 that's most older 6263 * Intel. 6264 */ 6265 host_phys_bits = 36; 6266 } 6267 6268 return host_phys_bits; 6269 } 6270 6271 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 6272 { 6273 if (*min < value) { 6274 *min = value; 6275 } 6276 } 6277 6278 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 6279 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 6280 { 6281 CPUX86State *env = &cpu->env; 6282 FeatureWordInfo *fi = &feature_word_info[w]; 6283 uint32_t eax = fi->cpuid.eax; 6284 uint32_t region = eax & 0xF0000000; 6285 6286 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 6287 if (!env->features[w]) { 6288 return; 6289 } 6290 6291 switch (region) { 6292 case 0x00000000: 6293 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 6294 break; 6295 case 0x80000000: 6296 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 6297 break; 6298 case 0xC0000000: 6299 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 6300 break; 6301 } 6302 6303 if (eax == 7) { 6304 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7, 6305 fi->cpuid.ecx); 6306 } 6307 } 6308 6309 /* Calculate XSAVE components based on the configured CPU feature flags */ 6310 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 6311 { 6312 CPUX86State *env = &cpu->env; 6313 int i; 6314 uint64_t mask; 6315 6316 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 6317 env->features[FEAT_XSAVE_COMP_LO] = 0; 6318 env->features[FEAT_XSAVE_COMP_HI] = 0; 6319 return; 6320 } 6321 6322 mask = 0; 6323 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6324 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6325 if (env->features[esa->feature] & esa->bits) { 6326 mask |= (1ULL << i); 6327 } 6328 } 6329 6330 env->features[FEAT_XSAVE_COMP_LO] = mask; 6331 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 6332 } 6333 6334 /***** Steps involved on loading and filtering CPUID data 6335 * 6336 * When initializing and realizing a CPU object, the steps 6337 * involved in setting up CPUID data are: 6338 * 6339 * 1) Loading CPU model definition (X86CPUDefinition). This is 6340 * implemented by x86_cpu_load_model() and should be completely 6341 * transparent, as it is done automatically by instance_init. 6342 * No code should need to look at X86CPUDefinition structs 6343 * outside instance_init. 6344 * 6345 * 2) CPU expansion. This is done by realize before CPUID 6346 * filtering, and will make sure host/accelerator data is 6347 * loaded for CPU models that depend on host capabilities 6348 * (e.g. "host"). Done by x86_cpu_expand_features(). 6349 * 6350 * 3) CPUID filtering. This initializes extra data related to 6351 * CPUID, and checks if the host supports all capabilities 6352 * required by the CPU. Runnability of a CPU model is 6353 * determined at this step. Done by x86_cpu_filter_features(). 6354 * 6355 * Some operations don't require all steps to be performed. 6356 * More precisely: 6357 * 6358 * - CPU instance creation (instance_init) will run only CPU 6359 * model loading. CPU expansion can't run at instance_init-time 6360 * because host/accelerator data may be not available yet. 6361 * - CPU realization will perform both CPU model expansion and CPUID 6362 * filtering, and return an error in case one of them fails. 6363 * - query-cpu-definitions needs to run all 3 steps. It needs 6364 * to run CPUID filtering, as the 'unavailable-features' 6365 * field is set based on the filtering results. 6366 * - The query-cpu-model-expansion QMP command only needs to run 6367 * CPU model loading and CPU expansion. It should not filter 6368 * any CPUID data based on host capabilities. 6369 */ 6370 6371 /* Expand CPU configuration data, based on configured features 6372 * and host/accelerator capabilities when appropriate. 6373 */ 6374 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 6375 { 6376 CPUX86State *env = &cpu->env; 6377 FeatureWord w; 6378 int i; 6379 GList *l; 6380 6381 for (l = plus_features; l; l = l->next) { 6382 const char *prop = l->data; 6383 if (!object_property_set_bool(OBJECT(cpu), prop, true, errp)) { 6384 return; 6385 } 6386 } 6387 6388 for (l = minus_features; l; l = l->next) { 6389 const char *prop = l->data; 6390 if (!object_property_set_bool(OBJECT(cpu), prop, false, errp)) { 6391 return; 6392 } 6393 } 6394 6395 /*TODO: Now cpu->max_features doesn't overwrite features 6396 * set using QOM properties, and we can convert 6397 * plus_features & minus_features to global properties 6398 * inside x86_cpu_parse_featurestr() too. 6399 */ 6400 if (cpu->max_features) { 6401 for (w = 0; w < FEATURE_WORDS; w++) { 6402 /* Override only features that weren't set explicitly 6403 * by the user. 6404 */ 6405 env->features[w] |= 6406 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 6407 ~env->user_features[w] & 6408 ~feature_word_info[w].no_autoenable_flags; 6409 } 6410 } 6411 6412 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) { 6413 FeatureDep *d = &feature_dependencies[i]; 6414 if (!(env->features[d->from.index] & d->from.mask)) { 6415 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask; 6416 6417 /* Not an error unless the dependent feature was added explicitly. */ 6418 mark_unavailable_features(cpu, d->to.index, 6419 unavailable_features & env->user_features[d->to.index], 6420 "This feature depends on other features that were not requested"); 6421 6422 env->features[d->to.index] &= ~unavailable_features; 6423 } 6424 } 6425 6426 if (!kvm_enabled() || !cpu->expose_kvm) { 6427 env->features[FEAT_KVM] = 0; 6428 } 6429 6430 x86_cpu_enable_xsave_components(cpu); 6431 6432 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 6433 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 6434 if (cpu->full_cpuid_auto_level) { 6435 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 6436 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 6437 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 6438 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 6439 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); 6440 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 6441 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 6442 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 6443 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 6444 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 6445 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 6446 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 6447 6448 /* Intel Processor Trace requires CPUID[0x14] */ 6449 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) { 6450 if (cpu->intel_pt_auto_level) { 6451 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 6452 } else if (cpu->env.cpuid_min_level < 0x14) { 6453 mark_unavailable_features(cpu, FEAT_7_0_EBX, 6454 CPUID_7_0_EBX_INTEL_PT, 6455 "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,min-level=0x14\""); 6456 } 6457 } 6458 6459 /* CPU topology with multi-dies support requires CPUID[0x1F] */ 6460 if (env->nr_dies > 1) { 6461 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); 6462 } 6463 6464 /* SVM requires CPUID[0x8000000A] */ 6465 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 6466 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 6467 } 6468 6469 /* SEV requires CPUID[0x8000001F] */ 6470 if (sev_enabled()) { 6471 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 6472 } 6473 } 6474 6475 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 6476 if (env->cpuid_level_func7 == UINT32_MAX) { 6477 env->cpuid_level_func7 = env->cpuid_min_level_func7; 6478 } 6479 if (env->cpuid_level == UINT32_MAX) { 6480 env->cpuid_level = env->cpuid_min_level; 6481 } 6482 if (env->cpuid_xlevel == UINT32_MAX) { 6483 env->cpuid_xlevel = env->cpuid_min_xlevel; 6484 } 6485 if (env->cpuid_xlevel2 == UINT32_MAX) { 6486 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 6487 } 6488 } 6489 6490 /* 6491 * Finishes initialization of CPUID data, filters CPU feature 6492 * words based on host availability of each feature. 6493 * 6494 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 6495 */ 6496 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) 6497 { 6498 CPUX86State *env = &cpu->env; 6499 FeatureWord w; 6500 const char *prefix = NULL; 6501 6502 if (verbose) { 6503 prefix = accel_uses_host_cpuid() 6504 ? "host doesn't support requested feature" 6505 : "TCG doesn't support requested feature"; 6506 } 6507 6508 for (w = 0; w < FEATURE_WORDS; w++) { 6509 uint64_t host_feat = 6510 x86_cpu_get_supported_feature_word(w, false); 6511 uint64_t requested_features = env->features[w]; 6512 uint64_t unavailable_features = requested_features & ~host_feat; 6513 mark_unavailable_features(cpu, w, unavailable_features, prefix); 6514 } 6515 6516 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 6517 kvm_enabled()) { 6518 KVMState *s = CPU(cpu)->kvm_state; 6519 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 6520 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 6521 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 6522 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 6523 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 6524 6525 if (!eax_0 || 6526 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 6527 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 6528 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 6529 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 6530 INTEL_PT_ADDR_RANGES_NUM) || 6531 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 6532 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 6533 ((ecx_0 & CPUID_14_0_ECX_LIP) != 6534 (env->features[FEAT_14_0_ECX] & CPUID_14_0_ECX_LIP))) { 6535 /* 6536 * Processor Trace capabilities aren't configurable, so if the 6537 * host can't emulate the capabilities we report on 6538 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 6539 */ 6540 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix); 6541 } 6542 } 6543 } 6544 6545 static void x86_cpu_hyperv_realize(X86CPU *cpu) 6546 { 6547 size_t len; 6548 6549 /* Hyper-V vendor id */ 6550 if (!cpu->hyperv_vendor) { 6551 memcpy(cpu->hyperv_vendor_id, "Microsoft Hv", 12); 6552 } else { 6553 len = strlen(cpu->hyperv_vendor); 6554 6555 if (len > 12) { 6556 warn_report("hv-vendor-id truncated to 12 characters"); 6557 len = 12; 6558 } 6559 memset(cpu->hyperv_vendor_id, 0, 12); 6560 memcpy(cpu->hyperv_vendor_id, cpu->hyperv_vendor, len); 6561 } 6562 6563 /* 'Hv#1' interface identification*/ 6564 cpu->hyperv_interface_id[0] = 0x31237648; 6565 cpu->hyperv_interface_id[1] = 0; 6566 cpu->hyperv_interface_id[2] = 0; 6567 cpu->hyperv_interface_id[3] = 0; 6568 6569 /* Hypervisor system identity */ 6570 cpu->hyperv_version_id[0] = 0x00001bbc; 6571 cpu->hyperv_version_id[1] = 0x00060001; 6572 6573 /* Hypervisor implementation limits */ 6574 cpu->hyperv_limits[0] = 64; 6575 cpu->hyperv_limits[1] = 0; 6576 cpu->hyperv_limits[2] = 0; 6577 } 6578 6579 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 6580 { 6581 CPUState *cs = CPU(dev); 6582 X86CPU *cpu = X86_CPU(dev); 6583 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6584 CPUX86State *env = &cpu->env; 6585 Error *local_err = NULL; 6586 static bool ht_warned; 6587 6588 if (xcc->host_cpuid_required) { 6589 if (!accel_uses_host_cpuid()) { 6590 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6591 error_setg(&local_err, "CPU model '%s' requires KVM", name); 6592 goto out; 6593 } 6594 } 6595 6596 if (cpu->max_features && accel_uses_host_cpuid()) { 6597 if (enable_cpu_pm) { 6598 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 6599 &cpu->mwait.ecx, &cpu->mwait.edx); 6600 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 6601 if (kvm_enabled() && kvm_has_waitpkg()) { 6602 env->features[FEAT_7_0_ECX] |= CPUID_7_0_ECX_WAITPKG; 6603 } 6604 } 6605 if (kvm_enabled() && cpu->ucode_rev == 0) { 6606 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state, 6607 MSR_IA32_UCODE_REV); 6608 } 6609 } 6610 6611 if (cpu->ucode_rev == 0) { 6612 /* The default is the same as KVM's. */ 6613 if (IS_AMD_CPU(env)) { 6614 cpu->ucode_rev = 0x01000065; 6615 } else { 6616 cpu->ucode_rev = 0x100000000ULL; 6617 } 6618 } 6619 6620 /* mwait extended info: needed for Core compatibility */ 6621 /* We always wake on interrupt even if host does not have the capability */ 6622 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 6623 6624 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 6625 error_setg(errp, "apic-id property was not initialized properly"); 6626 return; 6627 } 6628 6629 x86_cpu_expand_features(cpu, &local_err); 6630 if (local_err) { 6631 goto out; 6632 } 6633 6634 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); 6635 6636 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { 6637 error_setg(&local_err, 6638 accel_uses_host_cpuid() ? 6639 "Host doesn't support requested features" : 6640 "TCG doesn't support requested features"); 6641 goto out; 6642 } 6643 6644 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 6645 * CPUID[1].EDX. 6646 */ 6647 if (IS_AMD_CPU(env)) { 6648 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 6649 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 6650 & CPUID_EXT2_AMD_ALIASES); 6651 } 6652 6653 /* For 64bit systems think about the number of physical bits to present. 6654 * ideally this should be the same as the host; anything other than matching 6655 * the host can cause incorrect guest behaviour. 6656 * QEMU used to pick the magic value of 40 bits that corresponds to 6657 * consumer AMD devices but nothing else. 6658 */ 6659 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 6660 if (accel_uses_host_cpuid()) { 6661 uint32_t host_phys_bits = x86_host_phys_bits(); 6662 static bool warned; 6663 6664 /* Print a warning if the user set it to a value that's not the 6665 * host value. 6666 */ 6667 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 6668 !warned) { 6669 warn_report("Host physical bits (%u)" 6670 " does not match phys-bits property (%u)", 6671 host_phys_bits, cpu->phys_bits); 6672 warned = true; 6673 } 6674 6675 if (cpu->host_phys_bits) { 6676 /* The user asked for us to use the host physical bits */ 6677 cpu->phys_bits = host_phys_bits; 6678 if (cpu->host_phys_bits_limit && 6679 cpu->phys_bits > cpu->host_phys_bits_limit) { 6680 cpu->phys_bits = cpu->host_phys_bits_limit; 6681 } 6682 } 6683 6684 if (cpu->phys_bits && 6685 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 6686 cpu->phys_bits < 32)) { 6687 error_setg(errp, "phys-bits should be between 32 and %u " 6688 " (but is %u)", 6689 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 6690 return; 6691 } 6692 } else { 6693 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 6694 error_setg(errp, "TCG only supports phys-bits=%u", 6695 TCG_PHYS_ADDR_BITS); 6696 return; 6697 } 6698 } 6699 /* 0 means it was not explicitly set by the user (or by machine 6700 * compat_props or by the host code above). In this case, the default 6701 * is the value used by TCG (40). 6702 */ 6703 if (cpu->phys_bits == 0) { 6704 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 6705 } 6706 } else { 6707 /* For 32 bit systems don't use the user set value, but keep 6708 * phys_bits consistent with what we tell the guest. 6709 */ 6710 if (cpu->phys_bits != 0) { 6711 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 6712 return; 6713 } 6714 6715 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 6716 cpu->phys_bits = 36; 6717 } else { 6718 cpu->phys_bits = 32; 6719 } 6720 } 6721 6722 /* Cache information initialization */ 6723 if (!cpu->legacy_cache) { 6724 if (!xcc->model || !xcc->model->cpudef->cache_info) { 6725 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6726 error_setg(errp, 6727 "CPU model '%s' doesn't support legacy-cache=off", name); 6728 return; 6729 } 6730 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 6731 *xcc->model->cpudef->cache_info; 6732 } else { 6733 /* Build legacy cache information */ 6734 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 6735 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 6736 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 6737 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 6738 6739 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 6740 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 6741 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 6742 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 6743 6744 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 6745 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 6746 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 6747 env->cache_info_amd.l3_cache = &legacy_l3_cache; 6748 } 6749 6750 /* Process Hyper-V enlightenments */ 6751 x86_cpu_hyperv_realize(cpu); 6752 6753 cpu_exec_realizefn(cs, &local_err); 6754 if (local_err != NULL) { 6755 error_propagate(errp, local_err); 6756 return; 6757 } 6758 6759 #ifndef CONFIG_USER_ONLY 6760 MachineState *ms = MACHINE(qdev_get_machine()); 6761 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 6762 6763 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { 6764 x86_cpu_apic_create(cpu, &local_err); 6765 if (local_err != NULL) { 6766 goto out; 6767 } 6768 } 6769 #endif 6770 6771 mce_init(cpu); 6772 6773 #ifndef CONFIG_USER_ONLY 6774 if (tcg_enabled()) { 6775 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 6776 cpu->cpu_as_root = g_new(MemoryRegion, 1); 6777 6778 /* Outer container... */ 6779 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 6780 memory_region_set_enabled(cpu->cpu_as_root, true); 6781 6782 /* ... with two regions inside: normal system memory with low 6783 * priority, and... 6784 */ 6785 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 6786 get_system_memory(), 0, ~0ull); 6787 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 6788 memory_region_set_enabled(cpu->cpu_as_mem, true); 6789 6790 cs->num_ases = 2; 6791 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 6792 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 6793 6794 /* ... SMRAM with higher priority, linked from /machine/smram. */ 6795 cpu->machine_done.notify = x86_cpu_machine_done; 6796 qemu_add_machine_init_done_notifier(&cpu->machine_done); 6797 } 6798 #endif 6799 6800 qemu_init_vcpu(cs); 6801 6802 /* 6803 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 6804 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 6805 * based on inputs (sockets,cores,threads), it is still better to give 6806 * users a warning. 6807 * 6808 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 6809 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 6810 */ 6811 if (IS_AMD_CPU(env) && 6812 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 6813 cs->nr_threads > 1 && !ht_warned) { 6814 warn_report("This family of AMD CPU doesn't support " 6815 "hyperthreading(%d)", 6816 cs->nr_threads); 6817 error_printf("Please configure -smp options properly" 6818 " or try enabling topoext feature.\n"); 6819 ht_warned = true; 6820 } 6821 6822 x86_cpu_apic_realize(cpu, &local_err); 6823 if (local_err != NULL) { 6824 goto out; 6825 } 6826 cpu_reset(cs); 6827 6828 xcc->parent_realize(dev, &local_err); 6829 6830 out: 6831 if (local_err != NULL) { 6832 error_propagate(errp, local_err); 6833 return; 6834 } 6835 } 6836 6837 static void x86_cpu_unrealizefn(DeviceState *dev) 6838 { 6839 X86CPU *cpu = X86_CPU(dev); 6840 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6841 6842 #ifndef CONFIG_USER_ONLY 6843 cpu_remove_sync(CPU(dev)); 6844 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 6845 #endif 6846 6847 if (cpu->apic_state) { 6848 object_unparent(OBJECT(cpu->apic_state)); 6849 cpu->apic_state = NULL; 6850 } 6851 6852 xcc->parent_unrealize(dev); 6853 } 6854 6855 typedef struct BitProperty { 6856 FeatureWord w; 6857 uint64_t mask; 6858 } BitProperty; 6859 6860 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 6861 void *opaque, Error **errp) 6862 { 6863 X86CPU *cpu = X86_CPU(obj); 6864 BitProperty *fp = opaque; 6865 uint64_t f = cpu->env.features[fp->w]; 6866 bool value = (f & fp->mask) == fp->mask; 6867 visit_type_bool(v, name, &value, errp); 6868 } 6869 6870 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 6871 void *opaque, Error **errp) 6872 { 6873 DeviceState *dev = DEVICE(obj); 6874 X86CPU *cpu = X86_CPU(obj); 6875 BitProperty *fp = opaque; 6876 bool value; 6877 6878 if (dev->realized) { 6879 qdev_prop_set_after_realize(dev, name, errp); 6880 return; 6881 } 6882 6883 if (!visit_type_bool(v, name, &value, errp)) { 6884 return; 6885 } 6886 6887 if (value) { 6888 cpu->env.features[fp->w] |= fp->mask; 6889 } else { 6890 cpu->env.features[fp->w] &= ~fp->mask; 6891 } 6892 cpu->env.user_features[fp->w] |= fp->mask; 6893 } 6894 6895 /* Register a boolean property to get/set a single bit in a uint32_t field. 6896 * 6897 * The same property name can be registered multiple times to make it affect 6898 * multiple bits in the same FeatureWord. In that case, the getter will return 6899 * true only if all bits are set. 6900 */ 6901 static void x86_cpu_register_bit_prop(X86CPUClass *xcc, 6902 const char *prop_name, 6903 FeatureWord w, 6904 int bitnr) 6905 { 6906 ObjectClass *oc = OBJECT_CLASS(xcc); 6907 BitProperty *fp; 6908 ObjectProperty *op; 6909 uint64_t mask = (1ULL << bitnr); 6910 6911 op = object_class_property_find(oc, prop_name); 6912 if (op) { 6913 fp = op->opaque; 6914 assert(fp->w == w); 6915 fp->mask |= mask; 6916 } else { 6917 fp = g_new0(BitProperty, 1); 6918 fp->w = w; 6919 fp->mask = mask; 6920 object_class_property_add(oc, prop_name, "bool", 6921 x86_cpu_get_bit_prop, 6922 x86_cpu_set_bit_prop, 6923 NULL, fp); 6924 } 6925 } 6926 6927 static void x86_cpu_register_feature_bit_props(X86CPUClass *xcc, 6928 FeatureWord w, 6929 int bitnr) 6930 { 6931 FeatureWordInfo *fi = &feature_word_info[w]; 6932 const char *name = fi->feat_names[bitnr]; 6933 6934 if (!name) { 6935 return; 6936 } 6937 6938 /* Property names should use "-" instead of "_". 6939 * Old names containing underscores are registered as aliases 6940 * using object_property_add_alias() 6941 */ 6942 assert(!strchr(name, '_')); 6943 /* aliases don't use "|" delimiters anymore, they are registered 6944 * manually using object_property_add_alias() */ 6945 assert(!strchr(name, '|')); 6946 x86_cpu_register_bit_prop(xcc, name, w, bitnr); 6947 } 6948 6949 #if !defined(CONFIG_USER_ONLY) 6950 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 6951 { 6952 X86CPU *cpu = X86_CPU(cs); 6953 CPUX86State *env = &cpu->env; 6954 GuestPanicInformation *panic_info = NULL; 6955 6956 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 6957 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 6958 6959 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 6960 6961 assert(HV_CRASH_PARAMS >= 5); 6962 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 6963 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 6964 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 6965 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 6966 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 6967 } 6968 6969 return panic_info; 6970 } 6971 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 6972 const char *name, void *opaque, 6973 Error **errp) 6974 { 6975 CPUState *cs = CPU(obj); 6976 GuestPanicInformation *panic_info; 6977 6978 if (!cs->crash_occurred) { 6979 error_setg(errp, "No crash occured"); 6980 return; 6981 } 6982 6983 panic_info = x86_cpu_get_crash_info(cs); 6984 if (panic_info == NULL) { 6985 error_setg(errp, "No crash information"); 6986 return; 6987 } 6988 6989 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 6990 errp); 6991 qapi_free_GuestPanicInformation(panic_info); 6992 } 6993 #endif /* !CONFIG_USER_ONLY */ 6994 6995 static void x86_cpu_initfn(Object *obj) 6996 { 6997 X86CPU *cpu = X86_CPU(obj); 6998 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 6999 CPUX86State *env = &cpu->env; 7000 7001 env->nr_dies = 1; 7002 cpu_set_cpustate_pointers(cpu); 7003 7004 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 7005 x86_cpu_get_feature_words, 7006 NULL, NULL, (void *)env->features); 7007 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 7008 x86_cpu_get_feature_words, 7009 NULL, NULL, (void *)cpu->filtered_features); 7010 7011 object_property_add_alias(obj, "sse3", obj, "pni"); 7012 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq"); 7013 object_property_add_alias(obj, "sse4-1", obj, "sse4.1"); 7014 object_property_add_alias(obj, "sse4-2", obj, "sse4.2"); 7015 object_property_add_alias(obj, "xd", obj, "nx"); 7016 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt"); 7017 object_property_add_alias(obj, "i64", obj, "lm"); 7018 7019 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl"); 7020 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust"); 7021 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt"); 7022 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm"); 7023 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy"); 7024 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr"); 7025 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core"); 7026 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb"); 7027 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay"); 7028 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu"); 7029 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf"); 7030 object_property_add_alias(obj, "kvm_asyncpf_int", obj, "kvm-asyncpf-int"); 7031 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time"); 7032 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi"); 7033 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt"); 7034 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control"); 7035 object_property_add_alias(obj, "svm_lock", obj, "svm-lock"); 7036 object_property_add_alias(obj, "nrip_save", obj, "nrip-save"); 7037 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale"); 7038 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean"); 7039 object_property_add_alias(obj, "pause_filter", obj, "pause-filter"); 7040 object_property_add_alias(obj, "sse4_1", obj, "sse4.1"); 7041 object_property_add_alias(obj, "sse4_2", obj, "sse4.2"); 7042 7043 if (xcc->model) { 7044 x86_cpu_load_model(cpu, xcc->model); 7045 } 7046 } 7047 7048 static int64_t x86_cpu_get_arch_id(CPUState *cs) 7049 { 7050 X86CPU *cpu = X86_CPU(cs); 7051 7052 return cpu->apic_id; 7053 } 7054 7055 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 7056 { 7057 X86CPU *cpu = X86_CPU(cs); 7058 7059 return cpu->env.cr[0] & CR0_PG_MASK; 7060 } 7061 7062 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 7063 { 7064 X86CPU *cpu = X86_CPU(cs); 7065 7066 cpu->env.eip = value; 7067 } 7068 7069 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 7070 { 7071 X86CPU *cpu = X86_CPU(cs); 7072 CPUX86State *env = &cpu->env; 7073 7074 #if !defined(CONFIG_USER_ONLY) 7075 if (interrupt_request & CPU_INTERRUPT_POLL) { 7076 return CPU_INTERRUPT_POLL; 7077 } 7078 #endif 7079 if (interrupt_request & CPU_INTERRUPT_SIPI) { 7080 return CPU_INTERRUPT_SIPI; 7081 } 7082 7083 if (env->hflags2 & HF2_GIF_MASK) { 7084 if ((interrupt_request & CPU_INTERRUPT_SMI) && 7085 !(env->hflags & HF_SMM_MASK)) { 7086 return CPU_INTERRUPT_SMI; 7087 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 7088 !(env->hflags2 & HF2_NMI_MASK)) { 7089 return CPU_INTERRUPT_NMI; 7090 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 7091 return CPU_INTERRUPT_MCE; 7092 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 7093 (((env->hflags2 & HF2_VINTR_MASK) && 7094 (env->hflags2 & HF2_HIF_MASK)) || 7095 (!(env->hflags2 & HF2_VINTR_MASK) && 7096 (env->eflags & IF_MASK && 7097 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 7098 return CPU_INTERRUPT_HARD; 7099 #if !defined(CONFIG_USER_ONLY) 7100 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 7101 (env->eflags & IF_MASK) && 7102 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 7103 return CPU_INTERRUPT_VIRQ; 7104 #endif 7105 } 7106 } 7107 7108 return 0; 7109 } 7110 7111 static bool x86_cpu_has_work(CPUState *cs) 7112 { 7113 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 7114 } 7115 7116 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 7117 { 7118 X86CPU *cpu = X86_CPU(cs); 7119 CPUX86State *env = &cpu->env; 7120 7121 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 7122 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 7123 : bfd_mach_i386_i8086); 7124 info->print_insn = print_insn_i386; 7125 7126 info->cap_arch = CS_ARCH_X86; 7127 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 7128 : env->hflags & HF_CS32_MASK ? CS_MODE_32 7129 : CS_MODE_16); 7130 info->cap_insn_unit = 1; 7131 info->cap_insn_split = 8; 7132 } 7133 7134 void x86_update_hflags(CPUX86State *env) 7135 { 7136 uint32_t hflags; 7137 #define HFLAG_COPY_MASK \ 7138 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 7139 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 7140 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 7141 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 7142 7143 hflags = env->hflags & HFLAG_COPY_MASK; 7144 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 7145 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 7146 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 7147 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 7148 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 7149 7150 if (env->cr[4] & CR4_OSFXSR_MASK) { 7151 hflags |= HF_OSFXSR_MASK; 7152 } 7153 7154 if (env->efer & MSR_EFER_LMA) { 7155 hflags |= HF_LMA_MASK; 7156 } 7157 7158 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 7159 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 7160 } else { 7161 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 7162 (DESC_B_SHIFT - HF_CS32_SHIFT); 7163 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 7164 (DESC_B_SHIFT - HF_SS32_SHIFT); 7165 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 7166 !(hflags & HF_CS32_MASK)) { 7167 hflags |= HF_ADDSEG_MASK; 7168 } else { 7169 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 7170 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 7171 } 7172 } 7173 env->hflags = hflags; 7174 } 7175 7176 static Property x86_cpu_properties[] = { 7177 #ifdef CONFIG_USER_ONLY 7178 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 7179 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 7180 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 7181 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 7182 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0), 7183 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 7184 #else 7185 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 7186 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 7187 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 7188 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1), 7189 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 7190 #endif 7191 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 7192 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 7193 7194 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, 7195 HYPERV_SPINLOCK_NEVER_NOTIFY), 7196 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 7197 HYPERV_FEAT_RELAXED, 0), 7198 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 7199 HYPERV_FEAT_VAPIC, 0), 7200 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 7201 HYPERV_FEAT_TIME, 0), 7202 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 7203 HYPERV_FEAT_CRASH, 0), 7204 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 7205 HYPERV_FEAT_RESET, 0), 7206 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 7207 HYPERV_FEAT_VPINDEX, 0), 7208 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 7209 HYPERV_FEAT_RUNTIME, 0), 7210 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 7211 HYPERV_FEAT_SYNIC, 0), 7212 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 7213 HYPERV_FEAT_STIMER, 0), 7214 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 7215 HYPERV_FEAT_FREQUENCIES, 0), 7216 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 7217 HYPERV_FEAT_REENLIGHTENMENT, 0), 7218 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 7219 HYPERV_FEAT_TLBFLUSH, 0), 7220 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 7221 HYPERV_FEAT_EVMCS, 0), 7222 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 7223 HYPERV_FEAT_IPI, 0), 7224 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 7225 HYPERV_FEAT_STIMER_DIRECT, 0), 7226 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, 7227 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), 7228 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 7229 7230 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 7231 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 7232 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false), 7233 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 7234 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 7235 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 7236 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 7237 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 7238 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7, 7239 UINT32_MAX), 7240 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 7241 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 7242 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 7243 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 7244 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 7245 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 7246 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0), 7247 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 7248 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor), 7249 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 7250 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 7251 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 7252 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 7253 false), 7254 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 7255 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 7256 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 7257 true), 7258 /* 7259 * lecacy_cache defaults to true unless the CPU model provides its 7260 * own cache information (see x86_cpu_load_def()). 7261 */ 7262 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 7263 7264 /* 7265 * From "Requirements for Implementing the Microsoft 7266 * Hypervisor Interface": 7267 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 7268 * 7269 * "Starting with Windows Server 2012 and Windows 8, if 7270 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 7271 * the hypervisor imposes no specific limit to the number of VPs. 7272 * In this case, Windows Server 2012 guest VMs may use more than 7273 * 64 VPs, up to the maximum supported number of processors applicable 7274 * to the specific Windows version being used." 7275 */ 7276 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 7277 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 7278 false), 7279 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 7280 true), 7281 DEFINE_PROP_END_OF_LIST() 7282 }; 7283 7284 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 7285 { 7286 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7287 CPUClass *cc = CPU_CLASS(oc); 7288 DeviceClass *dc = DEVICE_CLASS(oc); 7289 FeatureWord w; 7290 7291 device_class_set_parent_realize(dc, x86_cpu_realizefn, 7292 &xcc->parent_realize); 7293 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 7294 &xcc->parent_unrealize); 7295 device_class_set_props(dc, x86_cpu_properties); 7296 7297 device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset); 7298 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 7299 7300 cc->class_by_name = x86_cpu_class_by_name; 7301 cc->parse_features = x86_cpu_parse_featurestr; 7302 cc->has_work = x86_cpu_has_work; 7303 7304 #ifdef CONFIG_TCG 7305 tcg_cpu_common_class_init(cc); 7306 #endif /* CONFIG_TCG */ 7307 7308 cc->dump_state = x86_cpu_dump_state; 7309 cc->set_pc = x86_cpu_set_pc; 7310 cc->gdb_read_register = x86_cpu_gdb_read_register; 7311 cc->gdb_write_register = x86_cpu_gdb_write_register; 7312 cc->get_arch_id = x86_cpu_get_arch_id; 7313 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 7314 7315 #ifndef CONFIG_USER_ONLY 7316 cc->asidx_from_attrs = x86_asidx_from_attrs; 7317 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 7318 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug; 7319 cc->get_crash_info = x86_cpu_get_crash_info; 7320 cc->write_elf64_note = x86_cpu_write_elf64_note; 7321 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 7322 cc->write_elf32_note = x86_cpu_write_elf32_note; 7323 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 7324 cc->vmsd = &vmstate_x86_cpu; 7325 #endif /* !CONFIG_USER_ONLY */ 7326 7327 cc->gdb_arch_name = x86_gdb_arch_name; 7328 #ifdef TARGET_X86_64 7329 cc->gdb_core_xml_file = "i386-64bit.xml"; 7330 cc->gdb_num_core_regs = 66; 7331 #else 7332 cc->gdb_core_xml_file = "i386-32bit.xml"; 7333 cc->gdb_num_core_regs = 50; 7334 #endif 7335 cc->disas_set_info = x86_disas_set_info; 7336 7337 dc->user_creatable = true; 7338 7339 object_class_property_add(oc, "family", "int", 7340 x86_cpuid_version_get_family, 7341 x86_cpuid_version_set_family, NULL, NULL); 7342 object_class_property_add(oc, "model", "int", 7343 x86_cpuid_version_get_model, 7344 x86_cpuid_version_set_model, NULL, NULL); 7345 object_class_property_add(oc, "stepping", "int", 7346 x86_cpuid_version_get_stepping, 7347 x86_cpuid_version_set_stepping, NULL, NULL); 7348 object_class_property_add_str(oc, "vendor", 7349 x86_cpuid_get_vendor, 7350 x86_cpuid_set_vendor); 7351 object_class_property_add_str(oc, "model-id", 7352 x86_cpuid_get_model_id, 7353 x86_cpuid_set_model_id); 7354 object_class_property_add(oc, "tsc-frequency", "int", 7355 x86_cpuid_get_tsc_freq, 7356 x86_cpuid_set_tsc_freq, NULL, NULL); 7357 /* 7358 * The "unavailable-features" property has the same semantics as 7359 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 7360 * QMP command: they list the features that would have prevented the 7361 * CPU from running if the "enforce" flag was set. 7362 */ 7363 object_class_property_add(oc, "unavailable-features", "strList", 7364 x86_cpu_get_unavailable_features, 7365 NULL, NULL, NULL); 7366 7367 #if !defined(CONFIG_USER_ONLY) 7368 object_class_property_add(oc, "crash-information", "GuestPanicInformation", 7369 x86_cpu_get_crash_info_qom, NULL, NULL, NULL); 7370 #endif 7371 7372 for (w = 0; w < FEATURE_WORDS; w++) { 7373 int bitnr; 7374 for (bitnr = 0; bitnr < 64; bitnr++) { 7375 x86_cpu_register_feature_bit_props(xcc, w, bitnr); 7376 } 7377 } 7378 } 7379 7380 static const TypeInfo x86_cpu_type_info = { 7381 .name = TYPE_X86_CPU, 7382 .parent = TYPE_CPU, 7383 .instance_size = sizeof(X86CPU), 7384 .instance_init = x86_cpu_initfn, 7385 .abstract = true, 7386 .class_size = sizeof(X86CPUClass), 7387 .class_init = x86_cpu_common_class_init, 7388 }; 7389 7390 7391 /* "base" CPU model, used by query-cpu-model-expansion */ 7392 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 7393 { 7394 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7395 7396 xcc->static_model = true; 7397 xcc->migration_safe = true; 7398 xcc->model_description = "base CPU model type with no features enabled"; 7399 xcc->ordering = 8; 7400 } 7401 7402 static const TypeInfo x86_base_cpu_type_info = { 7403 .name = X86_CPU_TYPE_NAME("base"), 7404 .parent = TYPE_X86_CPU, 7405 .class_init = x86_cpu_base_class_init, 7406 }; 7407 7408 static void x86_cpu_register_types(void) 7409 { 7410 int i; 7411 7412 type_register_static(&x86_cpu_type_info); 7413 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 7414 x86_register_cpudef_types(&builtin_x86_defs[i]); 7415 } 7416 type_register_static(&max_x86_cpu_type_info); 7417 type_register_static(&x86_base_cpu_type_info); 7418 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 7419 type_register_static(&host_x86_cpu_type_info); 7420 #endif 7421 } 7422 7423 type_init(x86_cpu_register_types) 7424