1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "tcg/tcg-cpu.h" 28 #include "tcg/helper-tcg.h" 29 #include "exec/exec-all.h" 30 #include "sysemu/kvm.h" 31 #include "sysemu/reset.h" 32 #include "sysemu/hvf.h" 33 #include "sysemu/cpus.h" 34 #include "sysemu/xen.h" 35 #include "sysemu/whpx.h" 36 #include "kvm/kvm_i386.h" 37 #include "sev_i386.h" 38 39 #include "qemu/error-report.h" 40 #include "qemu/module.h" 41 #include "qemu/option.h" 42 #include "qemu/config-file.h" 43 #include "qapi/error.h" 44 #include "qapi/qapi-visit-machine.h" 45 #include "qapi/qapi-visit-run-state.h" 46 #include "qapi/qmp/qdict.h" 47 #include "qapi/qmp/qerror.h" 48 #include "qapi/visitor.h" 49 #include "qom/qom-qobject.h" 50 #include "sysemu/arch_init.h" 51 #include "qapi/qapi-commands-machine-target.h" 52 53 #include "standard-headers/asm-x86/kvm_para.h" 54 55 #include "sysemu/sysemu.h" 56 #include "sysemu/tcg.h" 57 #include "hw/qdev-properties.h" 58 #include "hw/i386/topology.h" 59 #ifndef CONFIG_USER_ONLY 60 #include "exec/address-spaces.h" 61 #include "hw/i386/apic_internal.h" 62 #include "hw/boards.h" 63 #endif 64 65 #include "disas/capstone.h" 66 67 /* Helpers for building CPUID[2] descriptors: */ 68 69 struct CPUID2CacheDescriptorInfo { 70 enum CacheType type; 71 int level; 72 int size; 73 int line_size; 74 int associativity; 75 }; 76 77 /* 78 * Known CPUID 2 cache descriptors. 79 * From Intel SDM Volume 2A, CPUID instruction 80 */ 81 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 82 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 83 .associativity = 4, .line_size = 32, }, 84 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 85 .associativity = 4, .line_size = 32, }, 86 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 87 .associativity = 4, .line_size = 64, }, 88 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 89 .associativity = 2, .line_size = 32, }, 90 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 91 .associativity = 4, .line_size = 32, }, 92 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 93 .associativity = 4, .line_size = 64, }, 94 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 95 .associativity = 6, .line_size = 64, }, 96 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 97 .associativity = 2, .line_size = 64, }, 98 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 99 .associativity = 8, .line_size = 64, }, 100 /* lines per sector is not supported cpuid2_cache_descriptor(), 101 * so descriptors 0x22, 0x23 are not included 102 */ 103 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 104 .associativity = 16, .line_size = 64, }, 105 /* lines per sector is not supported cpuid2_cache_descriptor(), 106 * so descriptors 0x25, 0x20 are not included 107 */ 108 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 109 .associativity = 8, .line_size = 64, }, 110 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 111 .associativity = 8, .line_size = 64, }, 112 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 113 .associativity = 4, .line_size = 32, }, 114 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 115 .associativity = 4, .line_size = 32, }, 116 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 117 .associativity = 4, .line_size = 32, }, 118 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 119 .associativity = 4, .line_size = 32, }, 120 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 121 .associativity = 4, .line_size = 32, }, 122 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 123 .associativity = 4, .line_size = 64, }, 124 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 125 .associativity = 8, .line_size = 64, }, 126 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 127 .associativity = 12, .line_size = 64, }, 128 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 129 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 130 .associativity = 12, .line_size = 64, }, 131 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 132 .associativity = 16, .line_size = 64, }, 133 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 134 .associativity = 12, .line_size = 64, }, 135 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 136 .associativity = 16, .line_size = 64, }, 137 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 138 .associativity = 24, .line_size = 64, }, 139 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 140 .associativity = 8, .line_size = 64, }, 141 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 142 .associativity = 4, .line_size = 64, }, 143 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 144 .associativity = 4, .line_size = 64, }, 145 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 146 .associativity = 4, .line_size = 64, }, 147 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 148 .associativity = 4, .line_size = 64, }, 149 /* lines per sector is not supported cpuid2_cache_descriptor(), 150 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 151 */ 152 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 153 .associativity = 8, .line_size = 64, }, 154 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 155 .associativity = 2, .line_size = 64, }, 156 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 157 .associativity = 8, .line_size = 64, }, 158 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 159 .associativity = 8, .line_size = 32, }, 160 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 161 .associativity = 8, .line_size = 32, }, 162 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 163 .associativity = 8, .line_size = 32, }, 164 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 165 .associativity = 8, .line_size = 32, }, 166 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 167 .associativity = 4, .line_size = 64, }, 168 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 169 .associativity = 8, .line_size = 64, }, 170 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 171 .associativity = 4, .line_size = 64, }, 172 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 173 .associativity = 4, .line_size = 64, }, 174 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 175 .associativity = 4, .line_size = 64, }, 176 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 177 .associativity = 8, .line_size = 64, }, 178 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 179 .associativity = 8, .line_size = 64, }, 180 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 181 .associativity = 8, .line_size = 64, }, 182 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 183 .associativity = 12, .line_size = 64, }, 184 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 185 .associativity = 12, .line_size = 64, }, 186 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 187 .associativity = 12, .line_size = 64, }, 188 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 189 .associativity = 16, .line_size = 64, }, 190 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 191 .associativity = 16, .line_size = 64, }, 192 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 193 .associativity = 16, .line_size = 64, }, 194 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 195 .associativity = 24, .line_size = 64, }, 196 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 197 .associativity = 24, .line_size = 64, }, 198 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 199 .associativity = 24, .line_size = 64, }, 200 }; 201 202 /* 203 * "CPUID leaf 2 does not report cache descriptor information, 204 * use CPUID leaf 4 to query cache parameters" 205 */ 206 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 207 208 /* 209 * Return a CPUID 2 cache descriptor for a given cache. 210 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 211 */ 212 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 213 { 214 int i; 215 216 assert(cache->size > 0); 217 assert(cache->level > 0); 218 assert(cache->line_size > 0); 219 assert(cache->associativity > 0); 220 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 221 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 222 if (d->level == cache->level && d->type == cache->type && 223 d->size == cache->size && d->line_size == cache->line_size && 224 d->associativity == cache->associativity) { 225 return i; 226 } 227 } 228 229 return CACHE_DESCRIPTOR_UNAVAILABLE; 230 } 231 232 /* CPUID Leaf 4 constants: */ 233 234 /* EAX: */ 235 #define CACHE_TYPE_D 1 236 #define CACHE_TYPE_I 2 237 #define CACHE_TYPE_UNIFIED 3 238 239 #define CACHE_LEVEL(l) (l << 5) 240 241 #define CACHE_SELF_INIT_LEVEL (1 << 8) 242 243 /* EDX: */ 244 #define CACHE_NO_INVD_SHARING (1 << 0) 245 #define CACHE_INCLUSIVE (1 << 1) 246 #define CACHE_COMPLEX_IDX (1 << 2) 247 248 /* Encode CacheType for CPUID[4].EAX */ 249 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 250 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 251 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 252 0 /* Invalid value */) 253 254 255 /* Encode cache info for CPUID[4] */ 256 static void encode_cache_cpuid4(CPUCacheInfo *cache, 257 int num_apic_ids, int num_cores, 258 uint32_t *eax, uint32_t *ebx, 259 uint32_t *ecx, uint32_t *edx) 260 { 261 assert(cache->size == cache->line_size * cache->associativity * 262 cache->partitions * cache->sets); 263 264 assert(num_apic_ids > 0); 265 *eax = CACHE_TYPE(cache->type) | 266 CACHE_LEVEL(cache->level) | 267 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 268 ((num_cores - 1) << 26) | 269 ((num_apic_ids - 1) << 14); 270 271 assert(cache->line_size > 0); 272 assert(cache->partitions > 0); 273 assert(cache->associativity > 0); 274 /* We don't implement fully-associative caches */ 275 assert(cache->associativity < cache->sets); 276 *ebx = (cache->line_size - 1) | 277 ((cache->partitions - 1) << 12) | 278 ((cache->associativity - 1) << 22); 279 280 assert(cache->sets > 0); 281 *ecx = cache->sets - 1; 282 283 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 284 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 285 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 286 } 287 288 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 289 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 290 { 291 assert(cache->size % 1024 == 0); 292 assert(cache->lines_per_tag > 0); 293 assert(cache->associativity > 0); 294 assert(cache->line_size > 0); 295 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 296 (cache->lines_per_tag << 8) | (cache->line_size); 297 } 298 299 #define ASSOC_FULL 0xFF 300 301 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 302 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 303 a == 2 ? 0x2 : \ 304 a == 4 ? 0x4 : \ 305 a == 8 ? 0x6 : \ 306 a == 16 ? 0x8 : \ 307 a == 32 ? 0xA : \ 308 a == 48 ? 0xB : \ 309 a == 64 ? 0xC : \ 310 a == 96 ? 0xD : \ 311 a == 128 ? 0xE : \ 312 a == ASSOC_FULL ? 0xF : \ 313 0 /* invalid value */) 314 315 /* 316 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 317 * @l3 can be NULL. 318 */ 319 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 320 CPUCacheInfo *l3, 321 uint32_t *ecx, uint32_t *edx) 322 { 323 assert(l2->size % 1024 == 0); 324 assert(l2->associativity > 0); 325 assert(l2->lines_per_tag > 0); 326 assert(l2->line_size > 0); 327 *ecx = ((l2->size / 1024) << 16) | 328 (AMD_ENC_ASSOC(l2->associativity) << 12) | 329 (l2->lines_per_tag << 8) | (l2->line_size); 330 331 if (l3) { 332 assert(l3->size % (512 * 1024) == 0); 333 assert(l3->associativity > 0); 334 assert(l3->lines_per_tag > 0); 335 assert(l3->line_size > 0); 336 *edx = ((l3->size / (512 * 1024)) << 18) | 337 (AMD_ENC_ASSOC(l3->associativity) << 12) | 338 (l3->lines_per_tag << 8) | (l3->line_size); 339 } else { 340 *edx = 0; 341 } 342 } 343 344 /* Encode cache info for CPUID[8000001D] */ 345 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, 346 X86CPUTopoInfo *topo_info, 347 uint32_t *eax, uint32_t *ebx, 348 uint32_t *ecx, uint32_t *edx) 349 { 350 uint32_t l3_threads; 351 assert(cache->size == cache->line_size * cache->associativity * 352 cache->partitions * cache->sets); 353 354 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 355 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 356 357 /* L3 is shared among multiple cores */ 358 if (cache->level == 3) { 359 l3_threads = topo_info->cores_per_die * topo_info->threads_per_core; 360 *eax |= (l3_threads - 1) << 14; 361 } else { 362 *eax |= ((topo_info->threads_per_core - 1) << 14); 363 } 364 365 assert(cache->line_size > 0); 366 assert(cache->partitions > 0); 367 assert(cache->associativity > 0); 368 /* We don't implement fully-associative caches */ 369 assert(cache->associativity < cache->sets); 370 *ebx = (cache->line_size - 1) | 371 ((cache->partitions - 1) << 12) | 372 ((cache->associativity - 1) << 22); 373 374 assert(cache->sets > 0); 375 *ecx = cache->sets - 1; 376 377 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 378 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 379 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 380 } 381 382 /* Encode cache info for CPUID[8000001E] */ 383 static void encode_topo_cpuid8000001e(X86CPU *cpu, X86CPUTopoInfo *topo_info, 384 uint32_t *eax, uint32_t *ebx, 385 uint32_t *ecx, uint32_t *edx) 386 { 387 X86CPUTopoIDs topo_ids; 388 389 x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids); 390 391 *eax = cpu->apic_id; 392 393 /* 394 * CPUID_Fn8000001E_EBX [Core Identifiers] (CoreId) 395 * Read-only. Reset: 0000_XXXXh. 396 * See Core::X86::Cpuid::ExtApicId. 397 * Core::X86::Cpuid::CoreId_lthree[1:0]_core[3:0]_thread[1:0]; 398 * Bits Description 399 * 31:16 Reserved. 400 * 15:8 ThreadsPerCore: threads per core. Read-only. Reset: XXh. 401 * The number of threads per core is ThreadsPerCore+1. 402 * 7:0 CoreId: core ID. Read-only. Reset: XXh. 403 * 404 * NOTE: CoreId is already part of apic_id. Just use it. We can 405 * use all the 8 bits to represent the core_id here. 406 */ 407 *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.core_id & 0xFF); 408 409 /* 410 * CPUID_Fn8000001E_ECX [Node Identifiers] (NodeId) 411 * Read-only. Reset: 0000_0XXXh. 412 * Core::X86::Cpuid::NodeId_lthree[1:0]_core[3:0]_thread[1:0]; 413 * Bits Description 414 * 31:11 Reserved. 415 * 10:8 NodesPerProcessor: Node per processor. Read-only. Reset: XXXb. 416 * ValidValues: 417 * Value Description 418 * 000b 1 node per processor. 419 * 001b 2 nodes per processor. 420 * 010b Reserved. 421 * 011b 4 nodes per processor. 422 * 111b-100b Reserved. 423 * 7:0 NodeId: Node ID. Read-only. Reset: XXh. 424 * 425 * NOTE: Hardware reserves 3 bits for number of nodes per processor. 426 * But users can create more nodes than the actual hardware can 427 * support. To genaralize we can use all the upper 8 bits for nodes. 428 * NodeId is combination of node and socket_id which is already decoded 429 * in apic_id. Just use it by shifting. 430 */ 431 *ecx = ((topo_info->dies_per_pkg - 1) << 8) | 432 ((cpu->apic_id >> apicid_die_offset(topo_info)) & 0xFF); 433 434 *edx = 0; 435 } 436 437 /* 438 * Definitions of the hardcoded cache entries we expose: 439 * These are legacy cache values. If there is a need to change any 440 * of these values please use builtin_x86_defs 441 */ 442 443 /* L1 data cache: */ 444 static CPUCacheInfo legacy_l1d_cache = { 445 .type = DATA_CACHE, 446 .level = 1, 447 .size = 32 * KiB, 448 .self_init = 1, 449 .line_size = 64, 450 .associativity = 8, 451 .sets = 64, 452 .partitions = 1, 453 .no_invd_sharing = true, 454 }; 455 456 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 457 static CPUCacheInfo legacy_l1d_cache_amd = { 458 .type = DATA_CACHE, 459 .level = 1, 460 .size = 64 * KiB, 461 .self_init = 1, 462 .line_size = 64, 463 .associativity = 2, 464 .sets = 512, 465 .partitions = 1, 466 .lines_per_tag = 1, 467 .no_invd_sharing = true, 468 }; 469 470 /* L1 instruction cache: */ 471 static CPUCacheInfo legacy_l1i_cache = { 472 .type = INSTRUCTION_CACHE, 473 .level = 1, 474 .size = 32 * KiB, 475 .self_init = 1, 476 .line_size = 64, 477 .associativity = 8, 478 .sets = 64, 479 .partitions = 1, 480 .no_invd_sharing = true, 481 }; 482 483 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 484 static CPUCacheInfo legacy_l1i_cache_amd = { 485 .type = INSTRUCTION_CACHE, 486 .level = 1, 487 .size = 64 * KiB, 488 .self_init = 1, 489 .line_size = 64, 490 .associativity = 2, 491 .sets = 512, 492 .partitions = 1, 493 .lines_per_tag = 1, 494 .no_invd_sharing = true, 495 }; 496 497 /* Level 2 unified cache: */ 498 static CPUCacheInfo legacy_l2_cache = { 499 .type = UNIFIED_CACHE, 500 .level = 2, 501 .size = 4 * MiB, 502 .self_init = 1, 503 .line_size = 64, 504 .associativity = 16, 505 .sets = 4096, 506 .partitions = 1, 507 .no_invd_sharing = true, 508 }; 509 510 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 511 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 512 .type = UNIFIED_CACHE, 513 .level = 2, 514 .size = 2 * MiB, 515 .line_size = 64, 516 .associativity = 8, 517 }; 518 519 520 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 521 static CPUCacheInfo legacy_l2_cache_amd = { 522 .type = UNIFIED_CACHE, 523 .level = 2, 524 .size = 512 * KiB, 525 .line_size = 64, 526 .lines_per_tag = 1, 527 .associativity = 16, 528 .sets = 512, 529 .partitions = 1, 530 }; 531 532 /* Level 3 unified cache: */ 533 static CPUCacheInfo legacy_l3_cache = { 534 .type = UNIFIED_CACHE, 535 .level = 3, 536 .size = 16 * MiB, 537 .line_size = 64, 538 .associativity = 16, 539 .sets = 16384, 540 .partitions = 1, 541 .lines_per_tag = 1, 542 .self_init = true, 543 .inclusive = true, 544 .complex_indexing = true, 545 }; 546 547 /* TLB definitions: */ 548 549 #define L1_DTLB_2M_ASSOC 1 550 #define L1_DTLB_2M_ENTRIES 255 551 #define L1_DTLB_4K_ASSOC 1 552 #define L1_DTLB_4K_ENTRIES 255 553 554 #define L1_ITLB_2M_ASSOC 1 555 #define L1_ITLB_2M_ENTRIES 255 556 #define L1_ITLB_4K_ASSOC 1 557 #define L1_ITLB_4K_ENTRIES 255 558 559 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 560 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 561 #define L2_DTLB_4K_ASSOC 4 562 #define L2_DTLB_4K_ENTRIES 512 563 564 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 565 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 566 #define L2_ITLB_4K_ASSOC 4 567 #define L2_ITLB_4K_ENTRIES 512 568 569 /* CPUID Leaf 0x14 constants: */ 570 #define INTEL_PT_MAX_SUBLEAF 0x1 571 /* 572 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 573 * MSR can be accessed; 574 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 575 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 576 * of Intel PT MSRs across warm reset; 577 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 578 */ 579 #define INTEL_PT_MINIMAL_EBX 0xf 580 /* 581 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 582 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 583 * accessed; 584 * bit[01]: ToPA tables can hold any number of output entries, up to the 585 * maximum allowed by the MaskOrTableOffset field of 586 * IA32_RTIT_OUTPUT_MASK_PTRS; 587 * bit[02]: Support Single-Range Output scheme; 588 */ 589 #define INTEL_PT_MINIMAL_ECX 0x7 590 /* generated packets which contain IP payloads have LIP values */ 591 #define INTEL_PT_IP_LIP (1 << 31) 592 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 593 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 594 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 595 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 596 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 597 598 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 599 uint32_t vendor2, uint32_t vendor3) 600 { 601 int i; 602 for (i = 0; i < 4; i++) { 603 dst[i] = vendor1 >> (8 * i); 604 dst[i + 4] = vendor2 >> (8 * i); 605 dst[i + 8] = vendor3 >> (8 * i); 606 } 607 dst[CPUID_VENDOR_SZ] = '\0'; 608 } 609 610 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 611 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 612 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 613 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 614 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 615 CPUID_PSE36 | CPUID_FXSR) 616 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 617 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 618 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 619 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 620 CPUID_PAE | CPUID_SEP | CPUID_APIC) 621 622 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 623 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 624 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 625 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 626 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 627 /* partly implemented: 628 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 629 /* missing: 630 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 631 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 632 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 633 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 634 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 635 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 636 CPUID_EXT_RDRAND) 637 /* missing: 638 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 639 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 640 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 641 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 642 CPUID_EXT_F16C */ 643 644 #ifdef TARGET_X86_64 645 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 646 #else 647 #define TCG_EXT2_X86_64_FEATURES 0 648 #endif 649 650 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 651 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 652 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 653 TCG_EXT2_X86_64_FEATURES) 654 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 655 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 656 #define TCG_EXT4_FEATURES 0 657 #define TCG_SVM_FEATURES CPUID_SVM_NPT 658 #define TCG_KVM_FEATURES 0 659 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 660 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 661 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 662 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 663 CPUID_7_0_EBX_ERMS) 664 /* missing: 665 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 666 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 667 CPUID_7_0_EBX_RDSEED */ 668 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 669 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 670 CPUID_7_0_ECX_LA57) 671 #define TCG_7_0_EDX_FEATURES 0 672 #define TCG_7_1_EAX_FEATURES 0 673 #define TCG_APM_FEATURES 0 674 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 675 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 676 /* missing: 677 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 678 #define TCG_14_0_ECX_FEATURES 0 679 680 typedef enum FeatureWordType { 681 CPUID_FEATURE_WORD, 682 MSR_FEATURE_WORD, 683 } FeatureWordType; 684 685 typedef struct FeatureWordInfo { 686 FeatureWordType type; 687 /* feature flags names are taken from "Intel Processor Identification and 688 * the CPUID Instruction" and AMD's "CPUID Specification". 689 * In cases of disagreement between feature naming conventions, 690 * aliases may be added. 691 */ 692 const char *feat_names[64]; 693 union { 694 /* If type==CPUID_FEATURE_WORD */ 695 struct { 696 uint32_t eax; /* Input EAX for CPUID */ 697 bool needs_ecx; /* CPUID instruction uses ECX as input */ 698 uint32_t ecx; /* Input ECX value for CPUID */ 699 int reg; /* output register (R_* constant) */ 700 } cpuid; 701 /* If type==MSR_FEATURE_WORD */ 702 struct { 703 uint32_t index; 704 } msr; 705 }; 706 uint64_t tcg_features; /* Feature flags supported by TCG */ 707 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */ 708 uint64_t migratable_flags; /* Feature flags known to be migratable */ 709 /* Features that shouldn't be auto-enabled by "-cpu host" */ 710 uint64_t no_autoenable_flags; 711 } FeatureWordInfo; 712 713 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 714 [FEAT_1_EDX] = { 715 .type = CPUID_FEATURE_WORD, 716 .feat_names = { 717 "fpu", "vme", "de", "pse", 718 "tsc", "msr", "pae", "mce", 719 "cx8", "apic", NULL, "sep", 720 "mtrr", "pge", "mca", "cmov", 721 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 722 NULL, "ds" /* Intel dts */, "acpi", "mmx", 723 "fxsr", "sse", "sse2", "ss", 724 "ht" /* Intel htt */, "tm", "ia64", "pbe", 725 }, 726 .cpuid = {.eax = 1, .reg = R_EDX, }, 727 .tcg_features = TCG_FEATURES, 728 }, 729 [FEAT_1_ECX] = { 730 .type = CPUID_FEATURE_WORD, 731 .feat_names = { 732 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 733 "ds-cpl", "vmx", "smx", "est", 734 "tm2", "ssse3", "cid", NULL, 735 "fma", "cx16", "xtpr", "pdcm", 736 NULL, "pcid", "dca", "sse4.1", 737 "sse4.2", "x2apic", "movbe", "popcnt", 738 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 739 "avx", "f16c", "rdrand", "hypervisor", 740 }, 741 .cpuid = { .eax = 1, .reg = R_ECX, }, 742 .tcg_features = TCG_EXT_FEATURES, 743 }, 744 /* Feature names that are already defined on feature_name[] but 745 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 746 * names on feat_names below. They are copied automatically 747 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 748 */ 749 [FEAT_8000_0001_EDX] = { 750 .type = CPUID_FEATURE_WORD, 751 .feat_names = { 752 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 753 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 754 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 755 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 756 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 757 "nx", NULL, "mmxext", NULL /* mmx */, 758 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 759 NULL, "lm", "3dnowext", "3dnow", 760 }, 761 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 762 .tcg_features = TCG_EXT2_FEATURES, 763 }, 764 [FEAT_8000_0001_ECX] = { 765 .type = CPUID_FEATURE_WORD, 766 .feat_names = { 767 "lahf-lm", "cmp-legacy", "svm", "extapic", 768 "cr8legacy", "abm", "sse4a", "misalignsse", 769 "3dnowprefetch", "osvw", "ibs", "xop", 770 "skinit", "wdt", NULL, "lwp", 771 "fma4", "tce", NULL, "nodeid-msr", 772 NULL, "tbm", "topoext", "perfctr-core", 773 "perfctr-nb", NULL, NULL, NULL, 774 NULL, NULL, NULL, NULL, 775 }, 776 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 777 .tcg_features = TCG_EXT3_FEATURES, 778 /* 779 * TOPOEXT is always allowed but can't be enabled blindly by 780 * "-cpu host", as it requires consistent cache topology info 781 * to be provided so it doesn't confuse guests. 782 */ 783 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 784 }, 785 [FEAT_C000_0001_EDX] = { 786 .type = CPUID_FEATURE_WORD, 787 .feat_names = { 788 NULL, NULL, "xstore", "xstore-en", 789 NULL, NULL, "xcrypt", "xcrypt-en", 790 "ace2", "ace2-en", "phe", "phe-en", 791 "pmm", "pmm-en", NULL, NULL, 792 NULL, NULL, NULL, NULL, 793 NULL, NULL, NULL, NULL, 794 NULL, NULL, NULL, NULL, 795 NULL, NULL, NULL, NULL, 796 }, 797 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 798 .tcg_features = TCG_EXT4_FEATURES, 799 }, 800 [FEAT_KVM] = { 801 .type = CPUID_FEATURE_WORD, 802 .feat_names = { 803 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 804 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 805 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 806 "kvm-poll-control", "kvm-pv-sched-yield", "kvm-asyncpf-int", "kvm-msi-ext-dest-id", 807 NULL, NULL, NULL, NULL, 808 NULL, NULL, NULL, NULL, 809 "kvmclock-stable-bit", NULL, NULL, NULL, 810 NULL, NULL, NULL, NULL, 811 }, 812 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 813 .tcg_features = TCG_KVM_FEATURES, 814 }, 815 [FEAT_KVM_HINTS] = { 816 .type = CPUID_FEATURE_WORD, 817 .feat_names = { 818 "kvm-hint-dedicated", NULL, NULL, NULL, 819 NULL, NULL, NULL, NULL, 820 NULL, NULL, NULL, NULL, 821 NULL, NULL, NULL, NULL, 822 NULL, NULL, NULL, NULL, 823 NULL, NULL, NULL, NULL, 824 NULL, NULL, NULL, NULL, 825 NULL, NULL, NULL, NULL, 826 }, 827 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 828 .tcg_features = TCG_KVM_FEATURES, 829 /* 830 * KVM hints aren't auto-enabled by -cpu host, they need to be 831 * explicitly enabled in the command-line. 832 */ 833 .no_autoenable_flags = ~0U, 834 }, 835 /* 836 * .feat_names are commented out for Hyper-V enlightenments because we 837 * don't want to have two different ways for enabling them on QEMU command 838 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 839 * enabling several feature bits simultaneously, exposing these bits 840 * individually may just confuse guests. 841 */ 842 [FEAT_HYPERV_EAX] = { 843 .type = CPUID_FEATURE_WORD, 844 .feat_names = { 845 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 846 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 847 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 848 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 849 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 850 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 851 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 852 NULL, NULL, 853 NULL, NULL, NULL, NULL, 854 NULL, NULL, NULL, NULL, 855 NULL, NULL, NULL, NULL, 856 NULL, NULL, NULL, NULL, 857 }, 858 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 859 }, 860 [FEAT_HYPERV_EBX] = { 861 .type = CPUID_FEATURE_WORD, 862 .feat_names = { 863 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 864 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 865 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 866 NULL /* hv_create_port */, NULL /* hv_connect_port */, 867 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 868 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 869 NULL, NULL, 870 NULL, NULL, NULL, NULL, 871 NULL, NULL, NULL, NULL, 872 NULL, NULL, NULL, NULL, 873 NULL, NULL, NULL, NULL, 874 }, 875 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 876 }, 877 [FEAT_HYPERV_EDX] = { 878 .type = CPUID_FEATURE_WORD, 879 .feat_names = { 880 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 881 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 882 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 883 NULL, NULL, 884 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 885 NULL, NULL, NULL, NULL, 886 NULL, NULL, NULL, NULL, 887 NULL, NULL, NULL, NULL, 888 NULL, NULL, NULL, NULL, 889 NULL, NULL, NULL, NULL, 890 }, 891 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 892 }, 893 [FEAT_HV_RECOMM_EAX] = { 894 .type = CPUID_FEATURE_WORD, 895 .feat_names = { 896 NULL /* hv_recommend_pv_as_switch */, 897 NULL /* hv_recommend_pv_tlbflush_local */, 898 NULL /* hv_recommend_pv_tlbflush_remote */, 899 NULL /* hv_recommend_msr_apic_access */, 900 NULL /* hv_recommend_msr_reset */, 901 NULL /* hv_recommend_relaxed_timing */, 902 NULL /* hv_recommend_dma_remapping */, 903 NULL /* hv_recommend_int_remapping */, 904 NULL /* hv_recommend_x2apic_msrs */, 905 NULL /* hv_recommend_autoeoi_deprecation */, 906 NULL /* hv_recommend_pv_ipi */, 907 NULL /* hv_recommend_ex_hypercalls */, 908 NULL /* hv_hypervisor_is_nested */, 909 NULL /* hv_recommend_int_mbec */, 910 NULL /* hv_recommend_evmcs */, 911 NULL, 912 NULL, NULL, NULL, NULL, 913 NULL, NULL, NULL, NULL, 914 NULL, NULL, NULL, NULL, 915 NULL, NULL, NULL, NULL, 916 }, 917 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 918 }, 919 [FEAT_HV_NESTED_EAX] = { 920 .type = CPUID_FEATURE_WORD, 921 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 922 }, 923 [FEAT_SVM] = { 924 .type = CPUID_FEATURE_WORD, 925 .feat_names = { 926 "npt", "lbrv", "svm-lock", "nrip-save", 927 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 928 NULL, NULL, "pause-filter", NULL, 929 "pfthreshold", NULL, NULL, NULL, 930 NULL, NULL, NULL, NULL, 931 NULL, NULL, NULL, NULL, 932 NULL, NULL, NULL, NULL, 933 NULL, NULL, NULL, NULL, 934 }, 935 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 936 .tcg_features = TCG_SVM_FEATURES, 937 }, 938 [FEAT_7_0_EBX] = { 939 .type = CPUID_FEATURE_WORD, 940 .feat_names = { 941 "fsgsbase", "tsc-adjust", NULL, "bmi1", 942 "hle", "avx2", NULL, "smep", 943 "bmi2", "erms", "invpcid", "rtm", 944 NULL, NULL, "mpx", NULL, 945 "avx512f", "avx512dq", "rdseed", "adx", 946 "smap", "avx512ifma", "pcommit", "clflushopt", 947 "clwb", "intel-pt", "avx512pf", "avx512er", 948 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 949 }, 950 .cpuid = { 951 .eax = 7, 952 .needs_ecx = true, .ecx = 0, 953 .reg = R_EBX, 954 }, 955 .tcg_features = TCG_7_0_EBX_FEATURES, 956 }, 957 [FEAT_7_0_ECX] = { 958 .type = CPUID_FEATURE_WORD, 959 .feat_names = { 960 NULL, "avx512vbmi", "umip", "pku", 961 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL, 962 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 963 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 964 "la57", NULL, NULL, NULL, 965 NULL, NULL, "rdpid", NULL, 966 NULL, "cldemote", NULL, "movdiri", 967 "movdir64b", NULL, NULL, NULL, 968 }, 969 .cpuid = { 970 .eax = 7, 971 .needs_ecx = true, .ecx = 0, 972 .reg = R_ECX, 973 }, 974 .tcg_features = TCG_7_0_ECX_FEATURES, 975 }, 976 [FEAT_7_0_EDX] = { 977 .type = CPUID_FEATURE_WORD, 978 .feat_names = { 979 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 980 "fsrm", NULL, NULL, NULL, 981 "avx512-vp2intersect", NULL, "md-clear", NULL, 982 NULL, NULL, "serialize", NULL, 983 "tsx-ldtrk", NULL, NULL /* pconfig */, NULL, 984 NULL, NULL, NULL, "avx512-fp16", 985 NULL, NULL, "spec-ctrl", "stibp", 986 NULL, "arch-capabilities", "core-capability", "ssbd", 987 }, 988 .cpuid = { 989 .eax = 7, 990 .needs_ecx = true, .ecx = 0, 991 .reg = R_EDX, 992 }, 993 .tcg_features = TCG_7_0_EDX_FEATURES, 994 }, 995 [FEAT_7_1_EAX] = { 996 .type = CPUID_FEATURE_WORD, 997 .feat_names = { 998 NULL, NULL, NULL, NULL, 999 NULL, "avx512-bf16", NULL, NULL, 1000 NULL, NULL, NULL, NULL, 1001 NULL, NULL, NULL, NULL, 1002 NULL, NULL, NULL, NULL, 1003 NULL, NULL, NULL, NULL, 1004 NULL, NULL, NULL, NULL, 1005 NULL, NULL, NULL, NULL, 1006 }, 1007 .cpuid = { 1008 .eax = 7, 1009 .needs_ecx = true, .ecx = 1, 1010 .reg = R_EAX, 1011 }, 1012 .tcg_features = TCG_7_1_EAX_FEATURES, 1013 }, 1014 [FEAT_8000_0007_EDX] = { 1015 .type = CPUID_FEATURE_WORD, 1016 .feat_names = { 1017 NULL, NULL, NULL, NULL, 1018 NULL, NULL, NULL, NULL, 1019 "invtsc", NULL, NULL, NULL, 1020 NULL, NULL, NULL, NULL, 1021 NULL, NULL, NULL, NULL, 1022 NULL, NULL, NULL, NULL, 1023 NULL, NULL, NULL, NULL, 1024 NULL, NULL, NULL, NULL, 1025 }, 1026 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1027 .tcg_features = TCG_APM_FEATURES, 1028 .unmigratable_flags = CPUID_APM_INVTSC, 1029 }, 1030 [FEAT_8000_0008_EBX] = { 1031 .type = CPUID_FEATURE_WORD, 1032 .feat_names = { 1033 "clzero", NULL, "xsaveerptr", NULL, 1034 NULL, NULL, NULL, NULL, 1035 NULL, "wbnoinvd", NULL, NULL, 1036 "ibpb", NULL, NULL, "amd-stibp", 1037 NULL, NULL, NULL, NULL, 1038 NULL, NULL, NULL, NULL, 1039 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1040 NULL, NULL, NULL, NULL, 1041 }, 1042 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1043 .tcg_features = 0, 1044 .unmigratable_flags = 0, 1045 }, 1046 [FEAT_XSAVE] = { 1047 .type = CPUID_FEATURE_WORD, 1048 .feat_names = { 1049 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1050 NULL, NULL, NULL, NULL, 1051 NULL, NULL, NULL, NULL, 1052 NULL, NULL, NULL, NULL, 1053 NULL, NULL, NULL, NULL, 1054 NULL, NULL, NULL, NULL, 1055 NULL, NULL, NULL, NULL, 1056 NULL, NULL, NULL, NULL, 1057 }, 1058 .cpuid = { 1059 .eax = 0xd, 1060 .needs_ecx = true, .ecx = 1, 1061 .reg = R_EAX, 1062 }, 1063 .tcg_features = TCG_XSAVE_FEATURES, 1064 }, 1065 [FEAT_6_EAX] = { 1066 .type = CPUID_FEATURE_WORD, 1067 .feat_names = { 1068 NULL, NULL, "arat", NULL, 1069 NULL, NULL, NULL, NULL, 1070 NULL, NULL, NULL, NULL, 1071 NULL, NULL, NULL, NULL, 1072 NULL, NULL, NULL, NULL, 1073 NULL, NULL, NULL, NULL, 1074 NULL, NULL, NULL, NULL, 1075 NULL, NULL, NULL, NULL, 1076 }, 1077 .cpuid = { .eax = 6, .reg = R_EAX, }, 1078 .tcg_features = TCG_6_EAX_FEATURES, 1079 }, 1080 [FEAT_XSAVE_COMP_LO] = { 1081 .type = CPUID_FEATURE_WORD, 1082 .cpuid = { 1083 .eax = 0xD, 1084 .needs_ecx = true, .ecx = 0, 1085 .reg = R_EAX, 1086 }, 1087 .tcg_features = ~0U, 1088 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1089 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1090 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1091 XSTATE_PKRU_MASK, 1092 }, 1093 [FEAT_XSAVE_COMP_HI] = { 1094 .type = CPUID_FEATURE_WORD, 1095 .cpuid = { 1096 .eax = 0xD, 1097 .needs_ecx = true, .ecx = 0, 1098 .reg = R_EDX, 1099 }, 1100 .tcg_features = ~0U, 1101 }, 1102 /*Below are MSR exposed features*/ 1103 [FEAT_ARCH_CAPABILITIES] = { 1104 .type = MSR_FEATURE_WORD, 1105 .feat_names = { 1106 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1107 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", 1108 "taa-no", NULL, NULL, NULL, 1109 NULL, NULL, NULL, NULL, 1110 NULL, NULL, NULL, NULL, 1111 NULL, NULL, NULL, NULL, 1112 NULL, NULL, NULL, NULL, 1113 NULL, NULL, NULL, NULL, 1114 }, 1115 .msr = { 1116 .index = MSR_IA32_ARCH_CAPABILITIES, 1117 }, 1118 }, 1119 [FEAT_CORE_CAPABILITY] = { 1120 .type = MSR_FEATURE_WORD, 1121 .feat_names = { 1122 NULL, NULL, NULL, NULL, 1123 NULL, "split-lock-detect", NULL, NULL, 1124 NULL, NULL, NULL, NULL, 1125 NULL, NULL, NULL, NULL, 1126 NULL, NULL, NULL, NULL, 1127 NULL, NULL, NULL, NULL, 1128 NULL, NULL, NULL, NULL, 1129 NULL, NULL, NULL, NULL, 1130 }, 1131 .msr = { 1132 .index = MSR_IA32_CORE_CAPABILITY, 1133 }, 1134 }, 1135 [FEAT_PERF_CAPABILITIES] = { 1136 .type = MSR_FEATURE_WORD, 1137 .feat_names = { 1138 NULL, NULL, NULL, NULL, 1139 NULL, NULL, NULL, NULL, 1140 NULL, NULL, NULL, NULL, 1141 NULL, "full-width-write", NULL, NULL, 1142 NULL, NULL, NULL, NULL, 1143 NULL, NULL, NULL, NULL, 1144 NULL, NULL, NULL, NULL, 1145 NULL, NULL, NULL, NULL, 1146 }, 1147 .msr = { 1148 .index = MSR_IA32_PERF_CAPABILITIES, 1149 }, 1150 }, 1151 1152 [FEAT_VMX_PROCBASED_CTLS] = { 1153 .type = MSR_FEATURE_WORD, 1154 .feat_names = { 1155 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset", 1156 NULL, NULL, NULL, "vmx-hlt-exit", 1157 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit", 1158 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit", 1159 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit", 1160 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit", 1161 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf", 1162 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls", 1163 }, 1164 .msr = { 1165 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1166 } 1167 }, 1168 1169 [FEAT_VMX_SECONDARY_CTLS] = { 1170 .type = MSR_FEATURE_WORD, 1171 .feat_names = { 1172 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit", 1173 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest", 1174 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit", 1175 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit", 1176 "vmx-rdseed-exit", "vmx-pml", NULL, NULL, 1177 "vmx-xsaves", NULL, NULL, NULL, 1178 NULL, NULL, NULL, NULL, 1179 NULL, NULL, NULL, NULL, 1180 }, 1181 .msr = { 1182 .index = MSR_IA32_VMX_PROCBASED_CTLS2, 1183 } 1184 }, 1185 1186 [FEAT_VMX_PINBASED_CTLS] = { 1187 .type = MSR_FEATURE_WORD, 1188 .feat_names = { 1189 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit", 1190 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr", 1191 NULL, NULL, NULL, NULL, 1192 NULL, NULL, NULL, NULL, 1193 NULL, NULL, NULL, NULL, 1194 NULL, NULL, NULL, NULL, 1195 NULL, NULL, NULL, NULL, 1196 NULL, NULL, NULL, NULL, 1197 }, 1198 .msr = { 1199 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1200 } 1201 }, 1202 1203 [FEAT_VMX_EXIT_CTLS] = { 1204 .type = MSR_FEATURE_WORD, 1205 /* 1206 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from 1207 * the LM CPUID bit. 1208 */ 1209 .feat_names = { 1210 NULL, NULL, "vmx-exit-nosave-debugctl", NULL, 1211 NULL, NULL, NULL, NULL, 1212 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL, 1213 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr", 1214 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat", 1215 "vmx-exit-save-efer", "vmx-exit-load-efer", 1216 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", 1217 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, 1218 NULL, NULL, NULL, NULL, 1219 }, 1220 .msr = { 1221 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, 1222 } 1223 }, 1224 1225 [FEAT_VMX_ENTRY_CTLS] = { 1226 .type = MSR_FEATURE_WORD, 1227 .feat_names = { 1228 NULL, NULL, "vmx-entry-noload-debugctl", NULL, 1229 NULL, NULL, NULL, NULL, 1230 NULL, "vmx-entry-ia32e-mode", NULL, NULL, 1231 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", 1232 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, 1233 NULL, NULL, NULL, NULL, 1234 NULL, NULL, NULL, NULL, 1235 NULL, NULL, NULL, NULL, 1236 }, 1237 .msr = { 1238 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1239 } 1240 }, 1241 1242 [FEAT_VMX_MISC] = { 1243 .type = MSR_FEATURE_WORD, 1244 .feat_names = { 1245 NULL, NULL, NULL, NULL, 1246 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown", 1247 "vmx-activity-wait-sipi", NULL, NULL, NULL, 1248 NULL, NULL, NULL, NULL, 1249 NULL, NULL, NULL, NULL, 1250 NULL, NULL, NULL, NULL, 1251 NULL, NULL, NULL, NULL, 1252 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL, 1253 }, 1254 .msr = { 1255 .index = MSR_IA32_VMX_MISC, 1256 } 1257 }, 1258 1259 [FEAT_VMX_EPT_VPID_CAPS] = { 1260 .type = MSR_FEATURE_WORD, 1261 .feat_names = { 1262 "vmx-ept-execonly", NULL, NULL, NULL, 1263 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5", 1264 NULL, NULL, NULL, NULL, 1265 NULL, NULL, NULL, NULL, 1266 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL, 1267 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL, 1268 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL, 1269 NULL, NULL, NULL, NULL, 1270 "vmx-invvpid", NULL, NULL, NULL, 1271 NULL, NULL, NULL, NULL, 1272 "vmx-invvpid-single-addr", "vmx-invept-single-context", 1273 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals", 1274 NULL, NULL, NULL, NULL, 1275 NULL, NULL, NULL, NULL, 1276 NULL, NULL, NULL, NULL, 1277 NULL, NULL, NULL, NULL, 1278 NULL, NULL, NULL, NULL, 1279 }, 1280 .msr = { 1281 .index = MSR_IA32_VMX_EPT_VPID_CAP, 1282 } 1283 }, 1284 1285 [FEAT_VMX_BASIC] = { 1286 .type = MSR_FEATURE_WORD, 1287 .feat_names = { 1288 [54] = "vmx-ins-outs", 1289 [55] = "vmx-true-ctls", 1290 }, 1291 .msr = { 1292 .index = MSR_IA32_VMX_BASIC, 1293 }, 1294 /* Just to be safe - we don't support setting the MSEG version field. */ 1295 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR, 1296 }, 1297 1298 [FEAT_VMX_VMFUNC] = { 1299 .type = MSR_FEATURE_WORD, 1300 .feat_names = { 1301 [0] = "vmx-eptp-switching", 1302 }, 1303 .msr = { 1304 .index = MSR_IA32_VMX_VMFUNC, 1305 } 1306 }, 1307 1308 [FEAT_14_0_ECX] = { 1309 .type = CPUID_FEATURE_WORD, 1310 .feat_names = { 1311 NULL, NULL, NULL, NULL, 1312 NULL, NULL, NULL, NULL, 1313 NULL, NULL, NULL, NULL, 1314 NULL, NULL, NULL, NULL, 1315 NULL, NULL, NULL, NULL, 1316 NULL, NULL, NULL, NULL, 1317 NULL, NULL, NULL, NULL, 1318 NULL, NULL, NULL, "intel-pt-lip", 1319 }, 1320 .cpuid = { 1321 .eax = 0x14, 1322 .needs_ecx = true, .ecx = 0, 1323 .reg = R_ECX, 1324 }, 1325 .tcg_features = TCG_14_0_ECX_FEATURES, 1326 }, 1327 1328 }; 1329 1330 typedef struct FeatureMask { 1331 FeatureWord index; 1332 uint64_t mask; 1333 } FeatureMask; 1334 1335 typedef struct FeatureDep { 1336 FeatureMask from, to; 1337 } FeatureDep; 1338 1339 static FeatureDep feature_dependencies[] = { 1340 { 1341 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES }, 1342 .to = { FEAT_ARCH_CAPABILITIES, ~0ull }, 1343 }, 1344 { 1345 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY }, 1346 .to = { FEAT_CORE_CAPABILITY, ~0ull }, 1347 }, 1348 { 1349 .from = { FEAT_1_ECX, CPUID_EXT_PDCM }, 1350 .to = { FEAT_PERF_CAPABILITIES, ~0ull }, 1351 }, 1352 { 1353 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1354 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull }, 1355 }, 1356 { 1357 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1358 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull }, 1359 }, 1360 { 1361 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1362 .to = { FEAT_VMX_EXIT_CTLS, ~0ull }, 1363 }, 1364 { 1365 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1366 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull }, 1367 }, 1368 { 1369 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1370 .to = { FEAT_VMX_MISC, ~0ull }, 1371 }, 1372 { 1373 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1374 .to = { FEAT_VMX_BASIC, ~0ull }, 1375 }, 1376 { 1377 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM }, 1378 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE }, 1379 }, 1380 { 1381 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS }, 1382 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull }, 1383 }, 1384 { 1385 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES }, 1386 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES }, 1387 }, 1388 { 1389 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND }, 1390 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING }, 1391 }, 1392 { 1393 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID }, 1394 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID }, 1395 }, 1396 { 1397 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED }, 1398 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING }, 1399 }, 1400 { 1401 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT }, 1402 .to = { FEAT_14_0_ECX, ~0ull }, 1403 }, 1404 { 1405 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP }, 1406 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP }, 1407 }, 1408 { 1409 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1410 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull }, 1411 }, 1412 { 1413 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1414 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST }, 1415 }, 1416 { 1417 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID }, 1418 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 }, 1419 }, 1420 { 1421 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC }, 1422 .to = { FEAT_VMX_VMFUNC, ~0ull }, 1423 }, 1424 { 1425 .from = { FEAT_8000_0001_ECX, CPUID_EXT3_SVM }, 1426 .to = { FEAT_SVM, ~0ull }, 1427 }, 1428 }; 1429 1430 typedef struct X86RegisterInfo32 { 1431 /* Name of register */ 1432 const char *name; 1433 /* QAPI enum value register */ 1434 X86CPURegister32 qapi_enum; 1435 } X86RegisterInfo32; 1436 1437 #define REGISTER(reg) \ 1438 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1439 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1440 REGISTER(EAX), 1441 REGISTER(ECX), 1442 REGISTER(EDX), 1443 REGISTER(EBX), 1444 REGISTER(ESP), 1445 REGISTER(EBP), 1446 REGISTER(ESI), 1447 REGISTER(EDI), 1448 }; 1449 #undef REGISTER 1450 1451 typedef struct ExtSaveArea { 1452 uint32_t feature, bits; 1453 uint32_t offset, size; 1454 } ExtSaveArea; 1455 1456 static const ExtSaveArea x86_ext_save_areas[] = { 1457 [XSTATE_FP_BIT] = { 1458 /* x87 FP state component is always enabled if XSAVE is supported */ 1459 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1460 /* x87 state is in the legacy region of the XSAVE area */ 1461 .offset = 0, 1462 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1463 }, 1464 [XSTATE_SSE_BIT] = { 1465 /* SSE state component is always enabled if XSAVE is supported */ 1466 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1467 /* SSE state is in the legacy region of the XSAVE area */ 1468 .offset = 0, 1469 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1470 }, 1471 [XSTATE_YMM_BIT] = 1472 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1473 .offset = offsetof(X86XSaveArea, avx_state), 1474 .size = sizeof(XSaveAVX) }, 1475 [XSTATE_BNDREGS_BIT] = 1476 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1477 .offset = offsetof(X86XSaveArea, bndreg_state), 1478 .size = sizeof(XSaveBNDREG) }, 1479 [XSTATE_BNDCSR_BIT] = 1480 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1481 .offset = offsetof(X86XSaveArea, bndcsr_state), 1482 .size = sizeof(XSaveBNDCSR) }, 1483 [XSTATE_OPMASK_BIT] = 1484 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1485 .offset = offsetof(X86XSaveArea, opmask_state), 1486 .size = sizeof(XSaveOpmask) }, 1487 [XSTATE_ZMM_Hi256_BIT] = 1488 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1489 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1490 .size = sizeof(XSaveZMM_Hi256) }, 1491 [XSTATE_Hi16_ZMM_BIT] = 1492 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1493 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1494 .size = sizeof(XSaveHi16_ZMM) }, 1495 [XSTATE_PKRU_BIT] = 1496 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1497 .offset = offsetof(X86XSaveArea, pkru_state), 1498 .size = sizeof(XSavePKRU) }, 1499 }; 1500 1501 static uint32_t xsave_area_size(uint64_t mask) 1502 { 1503 int i; 1504 uint64_t ret = 0; 1505 1506 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1507 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1508 if ((mask >> i) & 1) { 1509 ret = MAX(ret, esa->offset + esa->size); 1510 } 1511 } 1512 return ret; 1513 } 1514 1515 static inline bool accel_uses_host_cpuid(void) 1516 { 1517 return kvm_enabled() || hvf_enabled(); 1518 } 1519 1520 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1521 { 1522 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1523 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1524 } 1525 1526 /* Return name of 32-bit register, from a R_* constant */ 1527 static const char *get_register_name_32(unsigned int reg) 1528 { 1529 if (reg >= CPU_NB_REGS32) { 1530 return NULL; 1531 } 1532 return x86_reg_info_32[reg].name; 1533 } 1534 1535 /* 1536 * Returns the set of feature flags that are supported and migratable by 1537 * QEMU, for a given FeatureWord. 1538 */ 1539 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) 1540 { 1541 FeatureWordInfo *wi = &feature_word_info[w]; 1542 uint64_t r = 0; 1543 int i; 1544 1545 for (i = 0; i < 64; i++) { 1546 uint64_t f = 1ULL << i; 1547 1548 /* If the feature name is known, it is implicitly considered migratable, 1549 * unless it is explicitly set in unmigratable_flags */ 1550 if ((wi->migratable_flags & f) || 1551 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1552 r |= f; 1553 } 1554 } 1555 return r; 1556 } 1557 1558 void host_cpuid(uint32_t function, uint32_t count, 1559 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1560 { 1561 uint32_t vec[4]; 1562 1563 #ifdef __x86_64__ 1564 asm volatile("cpuid" 1565 : "=a"(vec[0]), "=b"(vec[1]), 1566 "=c"(vec[2]), "=d"(vec[3]) 1567 : "0"(function), "c"(count) : "cc"); 1568 #elif defined(__i386__) 1569 asm volatile("pusha \n\t" 1570 "cpuid \n\t" 1571 "mov %%eax, 0(%2) \n\t" 1572 "mov %%ebx, 4(%2) \n\t" 1573 "mov %%ecx, 8(%2) \n\t" 1574 "mov %%edx, 12(%2) \n\t" 1575 "popa" 1576 : : "a"(function), "c"(count), "S"(vec) 1577 : "memory", "cc"); 1578 #else 1579 abort(); 1580 #endif 1581 1582 if (eax) 1583 *eax = vec[0]; 1584 if (ebx) 1585 *ebx = vec[1]; 1586 if (ecx) 1587 *ecx = vec[2]; 1588 if (edx) 1589 *edx = vec[3]; 1590 } 1591 1592 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1593 { 1594 uint32_t eax, ebx, ecx, edx; 1595 1596 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1597 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1598 1599 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1600 if (family) { 1601 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1602 } 1603 if (model) { 1604 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1605 } 1606 if (stepping) { 1607 *stepping = eax & 0x0F; 1608 } 1609 } 1610 1611 /* CPU class name definitions: */ 1612 1613 /* Return type name for a given CPU model name 1614 * Caller is responsible for freeing the returned string. 1615 */ 1616 static char *x86_cpu_type_name(const char *model_name) 1617 { 1618 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1619 } 1620 1621 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1622 { 1623 g_autofree char *typename = x86_cpu_type_name(cpu_model); 1624 return object_class_by_name(typename); 1625 } 1626 1627 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1628 { 1629 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1630 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1631 return g_strndup(class_name, 1632 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1633 } 1634 1635 typedef struct PropValue { 1636 const char *prop, *value; 1637 } PropValue; 1638 1639 typedef struct X86CPUVersionDefinition { 1640 X86CPUVersion version; 1641 const char *alias; 1642 const char *note; 1643 PropValue *props; 1644 } X86CPUVersionDefinition; 1645 1646 /* Base definition for a CPU model */ 1647 typedef struct X86CPUDefinition { 1648 const char *name; 1649 uint32_t level; 1650 uint32_t xlevel; 1651 /* vendor is zero-terminated, 12 character ASCII string */ 1652 char vendor[CPUID_VENDOR_SZ + 1]; 1653 int family; 1654 int model; 1655 int stepping; 1656 FeatureWordArray features; 1657 const char *model_id; 1658 CPUCaches *cache_info; 1659 /* 1660 * Definitions for alternative versions of CPU model. 1661 * List is terminated by item with version == 0. 1662 * If NULL, version 1 will be registered automatically. 1663 */ 1664 const X86CPUVersionDefinition *versions; 1665 const char *deprecation_note; 1666 } X86CPUDefinition; 1667 1668 /* Reference to a specific CPU model version */ 1669 struct X86CPUModel { 1670 /* Base CPU definition */ 1671 X86CPUDefinition *cpudef; 1672 /* CPU model version */ 1673 X86CPUVersion version; 1674 const char *note; 1675 /* 1676 * If true, this is an alias CPU model. 1677 * This matters only for "-cpu help" and query-cpu-definitions 1678 */ 1679 bool is_alias; 1680 }; 1681 1682 /* Get full model name for CPU version */ 1683 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, 1684 X86CPUVersion version) 1685 { 1686 assert(version > 0); 1687 return g_strdup_printf("%s-v%d", cpudef->name, (int)version); 1688 } 1689 1690 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) 1691 { 1692 /* When X86CPUDefinition::versions is NULL, we register only v1 */ 1693 static const X86CPUVersionDefinition default_version_list[] = { 1694 { 1 }, 1695 { /* end of list */ } 1696 }; 1697 1698 return def->versions ?: default_version_list; 1699 } 1700 1701 static CPUCaches epyc_cache_info = { 1702 .l1d_cache = &(CPUCacheInfo) { 1703 .type = DATA_CACHE, 1704 .level = 1, 1705 .size = 32 * KiB, 1706 .line_size = 64, 1707 .associativity = 8, 1708 .partitions = 1, 1709 .sets = 64, 1710 .lines_per_tag = 1, 1711 .self_init = 1, 1712 .no_invd_sharing = true, 1713 }, 1714 .l1i_cache = &(CPUCacheInfo) { 1715 .type = INSTRUCTION_CACHE, 1716 .level = 1, 1717 .size = 64 * KiB, 1718 .line_size = 64, 1719 .associativity = 4, 1720 .partitions = 1, 1721 .sets = 256, 1722 .lines_per_tag = 1, 1723 .self_init = 1, 1724 .no_invd_sharing = true, 1725 }, 1726 .l2_cache = &(CPUCacheInfo) { 1727 .type = UNIFIED_CACHE, 1728 .level = 2, 1729 .size = 512 * KiB, 1730 .line_size = 64, 1731 .associativity = 8, 1732 .partitions = 1, 1733 .sets = 1024, 1734 .lines_per_tag = 1, 1735 }, 1736 .l3_cache = &(CPUCacheInfo) { 1737 .type = UNIFIED_CACHE, 1738 .level = 3, 1739 .size = 8 * MiB, 1740 .line_size = 64, 1741 .associativity = 16, 1742 .partitions = 1, 1743 .sets = 8192, 1744 .lines_per_tag = 1, 1745 .self_init = true, 1746 .inclusive = true, 1747 .complex_indexing = true, 1748 }, 1749 }; 1750 1751 static CPUCaches epyc_rome_cache_info = { 1752 .l1d_cache = &(CPUCacheInfo) { 1753 .type = DATA_CACHE, 1754 .level = 1, 1755 .size = 32 * KiB, 1756 .line_size = 64, 1757 .associativity = 8, 1758 .partitions = 1, 1759 .sets = 64, 1760 .lines_per_tag = 1, 1761 .self_init = 1, 1762 .no_invd_sharing = true, 1763 }, 1764 .l1i_cache = &(CPUCacheInfo) { 1765 .type = INSTRUCTION_CACHE, 1766 .level = 1, 1767 .size = 32 * KiB, 1768 .line_size = 64, 1769 .associativity = 8, 1770 .partitions = 1, 1771 .sets = 64, 1772 .lines_per_tag = 1, 1773 .self_init = 1, 1774 .no_invd_sharing = true, 1775 }, 1776 .l2_cache = &(CPUCacheInfo) { 1777 .type = UNIFIED_CACHE, 1778 .level = 2, 1779 .size = 512 * KiB, 1780 .line_size = 64, 1781 .associativity = 8, 1782 .partitions = 1, 1783 .sets = 1024, 1784 .lines_per_tag = 1, 1785 }, 1786 .l3_cache = &(CPUCacheInfo) { 1787 .type = UNIFIED_CACHE, 1788 .level = 3, 1789 .size = 16 * MiB, 1790 .line_size = 64, 1791 .associativity = 16, 1792 .partitions = 1, 1793 .sets = 16384, 1794 .lines_per_tag = 1, 1795 .self_init = true, 1796 .inclusive = true, 1797 .complex_indexing = true, 1798 }, 1799 }; 1800 1801 /* The following VMX features are not supported by KVM and are left out in the 1802 * CPU definitions: 1803 * 1804 * Dual-monitor support (all processors) 1805 * Entry to SMM 1806 * Deactivate dual-monitor treatment 1807 * Number of CR3-target values 1808 * Shutdown activity state 1809 * Wait-for-SIPI activity state 1810 * PAUSE-loop exiting (Westmere and newer) 1811 * EPT-violation #VE (Broadwell and newer) 1812 * Inject event with insn length=0 (Skylake and newer) 1813 * Conceal non-root operation from PT 1814 * Conceal VM exits from PT 1815 * Conceal VM entries from PT 1816 * Enable ENCLS exiting 1817 * Mode-based execute control (XS/XU) 1818 s TSC scaling (Skylake Server and newer) 1819 * GPA translation for PT (IceLake and newer) 1820 * User wait and pause 1821 * ENCLV exiting 1822 * Load IA32_RTIT_CTL 1823 * Clear IA32_RTIT_CTL 1824 * Advanced VM-exit information for EPT violations 1825 * Sub-page write permissions 1826 * PT in VMX operation 1827 */ 1828 1829 static X86CPUDefinition builtin_x86_defs[] = { 1830 { 1831 .name = "qemu64", 1832 .level = 0xd, 1833 .vendor = CPUID_VENDOR_AMD, 1834 .family = 6, 1835 .model = 6, 1836 .stepping = 3, 1837 .features[FEAT_1_EDX] = 1838 PPRO_FEATURES | 1839 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1840 CPUID_PSE36, 1841 .features[FEAT_1_ECX] = 1842 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1843 .features[FEAT_8000_0001_EDX] = 1844 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1845 .features[FEAT_8000_0001_ECX] = 1846 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1847 .xlevel = 0x8000000A, 1848 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1849 }, 1850 { 1851 .name = "phenom", 1852 .level = 5, 1853 .vendor = CPUID_VENDOR_AMD, 1854 .family = 16, 1855 .model = 2, 1856 .stepping = 3, 1857 /* Missing: CPUID_HT */ 1858 .features[FEAT_1_EDX] = 1859 PPRO_FEATURES | 1860 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1861 CPUID_PSE36 | CPUID_VME, 1862 .features[FEAT_1_ECX] = 1863 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1864 CPUID_EXT_POPCNT, 1865 .features[FEAT_8000_0001_EDX] = 1866 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1867 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1868 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1869 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1870 CPUID_EXT3_CR8LEG, 1871 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1872 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1873 .features[FEAT_8000_0001_ECX] = 1874 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1875 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1876 /* Missing: CPUID_SVM_LBRV */ 1877 .features[FEAT_SVM] = 1878 CPUID_SVM_NPT, 1879 .xlevel = 0x8000001A, 1880 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1881 }, 1882 { 1883 .name = "core2duo", 1884 .level = 10, 1885 .vendor = CPUID_VENDOR_INTEL, 1886 .family = 6, 1887 .model = 15, 1888 .stepping = 11, 1889 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1890 .features[FEAT_1_EDX] = 1891 PPRO_FEATURES | 1892 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1893 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1894 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1895 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1896 .features[FEAT_1_ECX] = 1897 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1898 CPUID_EXT_CX16, 1899 .features[FEAT_8000_0001_EDX] = 1900 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1901 .features[FEAT_8000_0001_ECX] = 1902 CPUID_EXT3_LAHF_LM, 1903 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 1904 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1905 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1906 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1907 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1908 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 1909 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1910 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1911 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1912 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1913 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1914 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1915 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1916 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 1917 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 1918 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 1919 .features[FEAT_VMX_SECONDARY_CTLS] = 1920 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 1921 .xlevel = 0x80000008, 1922 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1923 }, 1924 { 1925 .name = "kvm64", 1926 .level = 0xd, 1927 .vendor = CPUID_VENDOR_INTEL, 1928 .family = 15, 1929 .model = 6, 1930 .stepping = 1, 1931 /* Missing: CPUID_HT */ 1932 .features[FEAT_1_EDX] = 1933 PPRO_FEATURES | CPUID_VME | 1934 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1935 CPUID_PSE36, 1936 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1937 .features[FEAT_1_ECX] = 1938 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1939 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1940 .features[FEAT_8000_0001_EDX] = 1941 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1942 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1943 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1944 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1945 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1946 .features[FEAT_8000_0001_ECX] = 1947 0, 1948 /* VMX features from Cedar Mill/Prescott */ 1949 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1950 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1951 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1952 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1953 VMX_PIN_BASED_NMI_EXITING, 1954 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1955 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1956 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1957 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1958 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1959 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1960 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1961 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING, 1962 .xlevel = 0x80000008, 1963 .model_id = "Common KVM processor" 1964 }, 1965 { 1966 .name = "qemu32", 1967 .level = 4, 1968 .vendor = CPUID_VENDOR_INTEL, 1969 .family = 6, 1970 .model = 6, 1971 .stepping = 3, 1972 .features[FEAT_1_EDX] = 1973 PPRO_FEATURES, 1974 .features[FEAT_1_ECX] = 1975 CPUID_EXT_SSE3, 1976 .xlevel = 0x80000004, 1977 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1978 }, 1979 { 1980 .name = "kvm32", 1981 .level = 5, 1982 .vendor = CPUID_VENDOR_INTEL, 1983 .family = 15, 1984 .model = 6, 1985 .stepping = 1, 1986 .features[FEAT_1_EDX] = 1987 PPRO_FEATURES | CPUID_VME | 1988 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1989 .features[FEAT_1_ECX] = 1990 CPUID_EXT_SSE3, 1991 .features[FEAT_8000_0001_ECX] = 1992 0, 1993 /* VMX features from Yonah */ 1994 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1995 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1996 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1997 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1998 VMX_PIN_BASED_NMI_EXITING, 1999 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2000 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2001 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2002 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2003 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2004 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2005 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2006 .xlevel = 0x80000008, 2007 .model_id = "Common 32-bit KVM processor" 2008 }, 2009 { 2010 .name = "coreduo", 2011 .level = 10, 2012 .vendor = CPUID_VENDOR_INTEL, 2013 .family = 6, 2014 .model = 14, 2015 .stepping = 8, 2016 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2017 .features[FEAT_1_EDX] = 2018 PPRO_FEATURES | CPUID_VME | 2019 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 2020 CPUID_SS, 2021 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 2022 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 2023 .features[FEAT_1_ECX] = 2024 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 2025 .features[FEAT_8000_0001_EDX] = 2026 CPUID_EXT2_NX, 2027 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2028 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2029 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2030 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2031 VMX_PIN_BASED_NMI_EXITING, 2032 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2033 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2034 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2035 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2036 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2037 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2038 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2039 .xlevel = 0x80000008, 2040 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 2041 }, 2042 { 2043 .name = "486", 2044 .level = 1, 2045 .vendor = CPUID_VENDOR_INTEL, 2046 .family = 4, 2047 .model = 8, 2048 .stepping = 0, 2049 .features[FEAT_1_EDX] = 2050 I486_FEATURES, 2051 .xlevel = 0, 2052 .model_id = "", 2053 }, 2054 { 2055 .name = "pentium", 2056 .level = 1, 2057 .vendor = CPUID_VENDOR_INTEL, 2058 .family = 5, 2059 .model = 4, 2060 .stepping = 3, 2061 .features[FEAT_1_EDX] = 2062 PENTIUM_FEATURES, 2063 .xlevel = 0, 2064 .model_id = "", 2065 }, 2066 { 2067 .name = "pentium2", 2068 .level = 2, 2069 .vendor = CPUID_VENDOR_INTEL, 2070 .family = 6, 2071 .model = 5, 2072 .stepping = 2, 2073 .features[FEAT_1_EDX] = 2074 PENTIUM2_FEATURES, 2075 .xlevel = 0, 2076 .model_id = "", 2077 }, 2078 { 2079 .name = "pentium3", 2080 .level = 3, 2081 .vendor = CPUID_VENDOR_INTEL, 2082 .family = 6, 2083 .model = 7, 2084 .stepping = 3, 2085 .features[FEAT_1_EDX] = 2086 PENTIUM3_FEATURES, 2087 .xlevel = 0, 2088 .model_id = "", 2089 }, 2090 { 2091 .name = "athlon", 2092 .level = 2, 2093 .vendor = CPUID_VENDOR_AMD, 2094 .family = 6, 2095 .model = 2, 2096 .stepping = 3, 2097 .features[FEAT_1_EDX] = 2098 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 2099 CPUID_MCA, 2100 .features[FEAT_8000_0001_EDX] = 2101 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 2102 .xlevel = 0x80000008, 2103 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 2104 }, 2105 { 2106 .name = "n270", 2107 .level = 10, 2108 .vendor = CPUID_VENDOR_INTEL, 2109 .family = 6, 2110 .model = 28, 2111 .stepping = 2, 2112 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2113 .features[FEAT_1_EDX] = 2114 PPRO_FEATURES | 2115 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 2116 CPUID_ACPI | CPUID_SS, 2117 /* Some CPUs got no CPUID_SEP */ 2118 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 2119 * CPUID_EXT_XTPR */ 2120 .features[FEAT_1_ECX] = 2121 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 2122 CPUID_EXT_MOVBE, 2123 .features[FEAT_8000_0001_EDX] = 2124 CPUID_EXT2_NX, 2125 .features[FEAT_8000_0001_ECX] = 2126 CPUID_EXT3_LAHF_LM, 2127 .xlevel = 0x80000008, 2128 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 2129 }, 2130 { 2131 .name = "Conroe", 2132 .level = 10, 2133 .vendor = CPUID_VENDOR_INTEL, 2134 .family = 6, 2135 .model = 15, 2136 .stepping = 3, 2137 .features[FEAT_1_EDX] = 2138 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2139 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2140 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2141 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2142 CPUID_DE | CPUID_FP87, 2143 .features[FEAT_1_ECX] = 2144 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2145 .features[FEAT_8000_0001_EDX] = 2146 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2147 .features[FEAT_8000_0001_ECX] = 2148 CPUID_EXT3_LAHF_LM, 2149 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2150 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2151 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2152 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2153 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2154 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2155 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2156 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2157 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2158 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2159 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2160 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2161 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2162 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2163 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2164 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2165 .features[FEAT_VMX_SECONDARY_CTLS] = 2166 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 2167 .xlevel = 0x80000008, 2168 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 2169 }, 2170 { 2171 .name = "Penryn", 2172 .level = 10, 2173 .vendor = CPUID_VENDOR_INTEL, 2174 .family = 6, 2175 .model = 23, 2176 .stepping = 3, 2177 .features[FEAT_1_EDX] = 2178 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2179 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2180 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2181 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2182 CPUID_DE | CPUID_FP87, 2183 .features[FEAT_1_ECX] = 2184 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2185 CPUID_EXT_SSE3, 2186 .features[FEAT_8000_0001_EDX] = 2187 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2188 .features[FEAT_8000_0001_ECX] = 2189 CPUID_EXT3_LAHF_LM, 2190 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2191 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2192 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 2193 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | 2194 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 2195 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2196 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2197 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2198 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2199 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2200 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2201 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2202 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2203 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2204 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2205 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2206 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2207 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2208 .features[FEAT_VMX_SECONDARY_CTLS] = 2209 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2210 VMX_SECONDARY_EXEC_WBINVD_EXITING, 2211 .xlevel = 0x80000008, 2212 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 2213 }, 2214 { 2215 .name = "Nehalem", 2216 .level = 11, 2217 .vendor = CPUID_VENDOR_INTEL, 2218 .family = 6, 2219 .model = 26, 2220 .stepping = 3, 2221 .features[FEAT_1_EDX] = 2222 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2223 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2224 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2225 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2226 CPUID_DE | CPUID_FP87, 2227 .features[FEAT_1_ECX] = 2228 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2229 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2230 .features[FEAT_8000_0001_EDX] = 2231 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2232 .features[FEAT_8000_0001_ECX] = 2233 CPUID_EXT3_LAHF_LM, 2234 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2235 MSR_VMX_BASIC_TRUE_CTLS, 2236 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2237 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2238 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2239 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2240 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2241 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2242 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2243 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2244 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2245 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2246 .features[FEAT_VMX_EXIT_CTLS] = 2247 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2248 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2249 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2250 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2251 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2252 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2253 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2254 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2255 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2256 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2257 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2258 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2259 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2260 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2261 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2262 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2263 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2264 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2265 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2266 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2267 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2268 .features[FEAT_VMX_SECONDARY_CTLS] = 2269 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2270 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2271 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2272 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2273 VMX_SECONDARY_EXEC_ENABLE_VPID, 2274 .xlevel = 0x80000008, 2275 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 2276 .versions = (X86CPUVersionDefinition[]) { 2277 { .version = 1 }, 2278 { 2279 .version = 2, 2280 .alias = "Nehalem-IBRS", 2281 .props = (PropValue[]) { 2282 { "spec-ctrl", "on" }, 2283 { "model-id", 2284 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, 2285 { /* end of list */ } 2286 } 2287 }, 2288 { /* end of list */ } 2289 } 2290 }, 2291 { 2292 .name = "Westmere", 2293 .level = 11, 2294 .vendor = CPUID_VENDOR_INTEL, 2295 .family = 6, 2296 .model = 44, 2297 .stepping = 1, 2298 .features[FEAT_1_EDX] = 2299 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2300 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2301 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2302 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2303 CPUID_DE | CPUID_FP87, 2304 .features[FEAT_1_ECX] = 2305 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2306 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2307 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2308 .features[FEAT_8000_0001_EDX] = 2309 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2310 .features[FEAT_8000_0001_ECX] = 2311 CPUID_EXT3_LAHF_LM, 2312 .features[FEAT_6_EAX] = 2313 CPUID_6_EAX_ARAT, 2314 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2315 MSR_VMX_BASIC_TRUE_CTLS, 2316 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2317 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2318 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2319 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2320 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2321 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2322 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2323 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2324 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2325 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2326 .features[FEAT_VMX_EXIT_CTLS] = 2327 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2328 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2329 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2330 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2331 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2332 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2333 MSR_VMX_MISC_STORE_LMA, 2334 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2335 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2336 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2337 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2338 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2339 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2340 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2341 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2342 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2343 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2344 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2345 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2346 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2347 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2348 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2349 .features[FEAT_VMX_SECONDARY_CTLS] = 2350 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2351 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2352 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2353 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2354 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2355 .xlevel = 0x80000008, 2356 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 2357 .versions = (X86CPUVersionDefinition[]) { 2358 { .version = 1 }, 2359 { 2360 .version = 2, 2361 .alias = "Westmere-IBRS", 2362 .props = (PropValue[]) { 2363 { "spec-ctrl", "on" }, 2364 { "model-id", 2365 "Westmere E56xx/L56xx/X56xx (IBRS update)" }, 2366 { /* end of list */ } 2367 } 2368 }, 2369 { /* end of list */ } 2370 } 2371 }, 2372 { 2373 .name = "SandyBridge", 2374 .level = 0xd, 2375 .vendor = CPUID_VENDOR_INTEL, 2376 .family = 6, 2377 .model = 42, 2378 .stepping = 1, 2379 .features[FEAT_1_EDX] = 2380 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2381 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2382 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2383 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2384 CPUID_DE | CPUID_FP87, 2385 .features[FEAT_1_ECX] = 2386 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2387 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2388 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2389 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2390 CPUID_EXT_SSE3, 2391 .features[FEAT_8000_0001_EDX] = 2392 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2393 CPUID_EXT2_SYSCALL, 2394 .features[FEAT_8000_0001_ECX] = 2395 CPUID_EXT3_LAHF_LM, 2396 .features[FEAT_XSAVE] = 2397 CPUID_XSAVE_XSAVEOPT, 2398 .features[FEAT_6_EAX] = 2399 CPUID_6_EAX_ARAT, 2400 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2401 MSR_VMX_BASIC_TRUE_CTLS, 2402 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2403 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2404 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2405 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2406 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2407 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2408 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2409 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2410 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2411 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2412 .features[FEAT_VMX_EXIT_CTLS] = 2413 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2414 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2415 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2416 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2417 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2418 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2419 MSR_VMX_MISC_STORE_LMA, 2420 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2421 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2422 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2423 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2424 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2425 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2426 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2427 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2428 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2429 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2430 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2431 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2432 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2433 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2434 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2435 .features[FEAT_VMX_SECONDARY_CTLS] = 2436 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2437 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2438 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2439 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2440 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2441 .xlevel = 0x80000008, 2442 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 2443 .versions = (X86CPUVersionDefinition[]) { 2444 { .version = 1 }, 2445 { 2446 .version = 2, 2447 .alias = "SandyBridge-IBRS", 2448 .props = (PropValue[]) { 2449 { "spec-ctrl", "on" }, 2450 { "model-id", 2451 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, 2452 { /* end of list */ } 2453 } 2454 }, 2455 { /* end of list */ } 2456 } 2457 }, 2458 { 2459 .name = "IvyBridge", 2460 .level = 0xd, 2461 .vendor = CPUID_VENDOR_INTEL, 2462 .family = 6, 2463 .model = 58, 2464 .stepping = 9, 2465 .features[FEAT_1_EDX] = 2466 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2467 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2468 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2469 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2470 CPUID_DE | CPUID_FP87, 2471 .features[FEAT_1_ECX] = 2472 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2473 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2474 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2475 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2476 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2477 .features[FEAT_7_0_EBX] = 2478 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 2479 CPUID_7_0_EBX_ERMS, 2480 .features[FEAT_8000_0001_EDX] = 2481 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2482 CPUID_EXT2_SYSCALL, 2483 .features[FEAT_8000_0001_ECX] = 2484 CPUID_EXT3_LAHF_LM, 2485 .features[FEAT_XSAVE] = 2486 CPUID_XSAVE_XSAVEOPT, 2487 .features[FEAT_6_EAX] = 2488 CPUID_6_EAX_ARAT, 2489 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2490 MSR_VMX_BASIC_TRUE_CTLS, 2491 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2492 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2493 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2494 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2495 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2496 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2497 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2498 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2499 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2500 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2501 .features[FEAT_VMX_EXIT_CTLS] = 2502 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2503 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2504 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2505 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2506 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2507 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2508 MSR_VMX_MISC_STORE_LMA, 2509 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2510 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2511 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2512 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2513 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2514 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2515 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2516 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2517 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2518 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2519 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2520 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2521 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2522 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2523 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2524 .features[FEAT_VMX_SECONDARY_CTLS] = 2525 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2526 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2527 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2528 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2529 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2530 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2531 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2532 VMX_SECONDARY_EXEC_RDRAND_EXITING, 2533 .xlevel = 0x80000008, 2534 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 2535 .versions = (X86CPUVersionDefinition[]) { 2536 { .version = 1 }, 2537 { 2538 .version = 2, 2539 .alias = "IvyBridge-IBRS", 2540 .props = (PropValue[]) { 2541 { "spec-ctrl", "on" }, 2542 { "model-id", 2543 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, 2544 { /* end of list */ } 2545 } 2546 }, 2547 { /* end of list */ } 2548 } 2549 }, 2550 { 2551 .name = "Haswell", 2552 .level = 0xd, 2553 .vendor = CPUID_VENDOR_INTEL, 2554 .family = 6, 2555 .model = 60, 2556 .stepping = 4, 2557 .features[FEAT_1_EDX] = 2558 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2559 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2560 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2561 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2562 CPUID_DE | CPUID_FP87, 2563 .features[FEAT_1_ECX] = 2564 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2565 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2566 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2567 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2568 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2569 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2570 .features[FEAT_8000_0001_EDX] = 2571 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2572 CPUID_EXT2_SYSCALL, 2573 .features[FEAT_8000_0001_ECX] = 2574 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2575 .features[FEAT_7_0_EBX] = 2576 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2577 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2578 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2579 CPUID_7_0_EBX_RTM, 2580 .features[FEAT_XSAVE] = 2581 CPUID_XSAVE_XSAVEOPT, 2582 .features[FEAT_6_EAX] = 2583 CPUID_6_EAX_ARAT, 2584 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2585 MSR_VMX_BASIC_TRUE_CTLS, 2586 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2587 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2588 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2589 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2590 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2591 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2592 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2593 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2594 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2595 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2596 .features[FEAT_VMX_EXIT_CTLS] = 2597 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2598 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2599 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2600 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2601 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2602 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2603 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2604 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2605 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2606 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2607 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2608 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2609 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2610 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2611 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2612 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2613 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2614 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2615 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2616 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2617 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2618 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2619 .features[FEAT_VMX_SECONDARY_CTLS] = 2620 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2621 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2622 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2623 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2624 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2625 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2626 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2627 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2628 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 2629 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2630 .xlevel = 0x80000008, 2631 .model_id = "Intel Core Processor (Haswell)", 2632 .versions = (X86CPUVersionDefinition[]) { 2633 { .version = 1 }, 2634 { 2635 .version = 2, 2636 .alias = "Haswell-noTSX", 2637 .props = (PropValue[]) { 2638 { "hle", "off" }, 2639 { "rtm", "off" }, 2640 { "stepping", "1" }, 2641 { "model-id", "Intel Core Processor (Haswell, no TSX)", }, 2642 { /* end of list */ } 2643 }, 2644 }, 2645 { 2646 .version = 3, 2647 .alias = "Haswell-IBRS", 2648 .props = (PropValue[]) { 2649 /* Restore TSX features removed by -v2 above */ 2650 { "hle", "on" }, 2651 { "rtm", "on" }, 2652 /* 2653 * Haswell and Haswell-IBRS had stepping=4 in 2654 * QEMU 4.0 and older 2655 */ 2656 { "stepping", "4" }, 2657 { "spec-ctrl", "on" }, 2658 { "model-id", 2659 "Intel Core Processor (Haswell, IBRS)" }, 2660 { /* end of list */ } 2661 } 2662 }, 2663 { 2664 .version = 4, 2665 .alias = "Haswell-noTSX-IBRS", 2666 .props = (PropValue[]) { 2667 { "hle", "off" }, 2668 { "rtm", "off" }, 2669 /* spec-ctrl was already enabled by -v3 above */ 2670 { "stepping", "1" }, 2671 { "model-id", 2672 "Intel Core Processor (Haswell, no TSX, IBRS)" }, 2673 { /* end of list */ } 2674 } 2675 }, 2676 { /* end of list */ } 2677 } 2678 }, 2679 { 2680 .name = "Broadwell", 2681 .level = 0xd, 2682 .vendor = CPUID_VENDOR_INTEL, 2683 .family = 6, 2684 .model = 61, 2685 .stepping = 2, 2686 .features[FEAT_1_EDX] = 2687 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2688 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2689 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2690 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2691 CPUID_DE | CPUID_FP87, 2692 .features[FEAT_1_ECX] = 2693 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2694 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2695 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2696 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2697 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2698 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2699 .features[FEAT_8000_0001_EDX] = 2700 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2701 CPUID_EXT2_SYSCALL, 2702 .features[FEAT_8000_0001_ECX] = 2703 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2704 .features[FEAT_7_0_EBX] = 2705 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2706 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2707 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2708 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2709 CPUID_7_0_EBX_SMAP, 2710 .features[FEAT_XSAVE] = 2711 CPUID_XSAVE_XSAVEOPT, 2712 .features[FEAT_6_EAX] = 2713 CPUID_6_EAX_ARAT, 2714 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2715 MSR_VMX_BASIC_TRUE_CTLS, 2716 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2717 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2718 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2719 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2720 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2721 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2722 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2723 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2724 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2725 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2726 .features[FEAT_VMX_EXIT_CTLS] = 2727 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2728 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2729 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2730 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2731 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2732 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2733 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2734 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2735 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2736 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2737 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2738 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2739 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2740 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2741 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2742 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2743 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2744 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2745 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2746 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2747 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2748 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2749 .features[FEAT_VMX_SECONDARY_CTLS] = 2750 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2751 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2752 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2753 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2754 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2755 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2756 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2757 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2758 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2759 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2760 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2761 .xlevel = 0x80000008, 2762 .model_id = "Intel Core Processor (Broadwell)", 2763 .versions = (X86CPUVersionDefinition[]) { 2764 { .version = 1 }, 2765 { 2766 .version = 2, 2767 .alias = "Broadwell-noTSX", 2768 .props = (PropValue[]) { 2769 { "hle", "off" }, 2770 { "rtm", "off" }, 2771 { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, 2772 { /* end of list */ } 2773 }, 2774 }, 2775 { 2776 .version = 3, 2777 .alias = "Broadwell-IBRS", 2778 .props = (PropValue[]) { 2779 /* Restore TSX features removed by -v2 above */ 2780 { "hle", "on" }, 2781 { "rtm", "on" }, 2782 { "spec-ctrl", "on" }, 2783 { "model-id", 2784 "Intel Core Processor (Broadwell, IBRS)" }, 2785 { /* end of list */ } 2786 } 2787 }, 2788 { 2789 .version = 4, 2790 .alias = "Broadwell-noTSX-IBRS", 2791 .props = (PropValue[]) { 2792 { "hle", "off" }, 2793 { "rtm", "off" }, 2794 /* spec-ctrl was already enabled by -v3 above */ 2795 { "model-id", 2796 "Intel Core Processor (Broadwell, no TSX, IBRS)" }, 2797 { /* end of list */ } 2798 } 2799 }, 2800 { /* end of list */ } 2801 } 2802 }, 2803 { 2804 .name = "Skylake-Client", 2805 .level = 0xd, 2806 .vendor = CPUID_VENDOR_INTEL, 2807 .family = 6, 2808 .model = 94, 2809 .stepping = 3, 2810 .features[FEAT_1_EDX] = 2811 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2812 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2813 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2814 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2815 CPUID_DE | CPUID_FP87, 2816 .features[FEAT_1_ECX] = 2817 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2818 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2819 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2820 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2821 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2822 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2823 .features[FEAT_8000_0001_EDX] = 2824 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2825 CPUID_EXT2_SYSCALL, 2826 .features[FEAT_8000_0001_ECX] = 2827 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2828 .features[FEAT_7_0_EBX] = 2829 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2830 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2831 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2832 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2833 CPUID_7_0_EBX_SMAP, 2834 /* Missing: XSAVES (not supported by some Linux versions, 2835 * including v4.1 to v4.12). 2836 * KVM doesn't yet expose any XSAVES state save component, 2837 * and the only one defined in Skylake (processor tracing) 2838 * probably will block migration anyway. 2839 */ 2840 .features[FEAT_XSAVE] = 2841 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2842 CPUID_XSAVE_XGETBV1, 2843 .features[FEAT_6_EAX] = 2844 CPUID_6_EAX_ARAT, 2845 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2846 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2847 MSR_VMX_BASIC_TRUE_CTLS, 2848 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2849 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2850 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2851 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2852 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2853 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2854 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2855 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2856 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2857 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2858 .features[FEAT_VMX_EXIT_CTLS] = 2859 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2860 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2861 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2862 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2863 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2864 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2865 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2866 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2867 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2868 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2869 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2870 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2871 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2872 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2873 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2874 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2875 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2876 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2877 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2878 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2879 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2880 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2881 .features[FEAT_VMX_SECONDARY_CTLS] = 2882 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2883 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2884 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2885 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2886 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2887 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2888 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2889 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2890 .xlevel = 0x80000008, 2891 .model_id = "Intel Core Processor (Skylake)", 2892 .versions = (X86CPUVersionDefinition[]) { 2893 { .version = 1 }, 2894 { 2895 .version = 2, 2896 .alias = "Skylake-Client-IBRS", 2897 .props = (PropValue[]) { 2898 { "spec-ctrl", "on" }, 2899 { "model-id", 2900 "Intel Core Processor (Skylake, IBRS)" }, 2901 { /* end of list */ } 2902 } 2903 }, 2904 { 2905 .version = 3, 2906 .alias = "Skylake-Client-noTSX-IBRS", 2907 .props = (PropValue[]) { 2908 { "hle", "off" }, 2909 { "rtm", "off" }, 2910 { "model-id", 2911 "Intel Core Processor (Skylake, IBRS, no TSX)" }, 2912 { /* end of list */ } 2913 } 2914 }, 2915 { /* end of list */ } 2916 } 2917 }, 2918 { 2919 .name = "Skylake-Server", 2920 .level = 0xd, 2921 .vendor = CPUID_VENDOR_INTEL, 2922 .family = 6, 2923 .model = 85, 2924 .stepping = 4, 2925 .features[FEAT_1_EDX] = 2926 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2927 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2928 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2929 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2930 CPUID_DE | CPUID_FP87, 2931 .features[FEAT_1_ECX] = 2932 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2933 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2934 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2935 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2936 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2937 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2938 .features[FEAT_8000_0001_EDX] = 2939 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2940 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2941 .features[FEAT_8000_0001_ECX] = 2942 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2943 .features[FEAT_7_0_EBX] = 2944 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2945 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2946 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2947 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2948 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2949 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2950 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2951 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2952 .features[FEAT_7_0_ECX] = 2953 CPUID_7_0_ECX_PKU, 2954 /* Missing: XSAVES (not supported by some Linux versions, 2955 * including v4.1 to v4.12). 2956 * KVM doesn't yet expose any XSAVES state save component, 2957 * and the only one defined in Skylake (processor tracing) 2958 * probably will block migration anyway. 2959 */ 2960 .features[FEAT_XSAVE] = 2961 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2962 CPUID_XSAVE_XGETBV1, 2963 .features[FEAT_6_EAX] = 2964 CPUID_6_EAX_ARAT, 2965 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2966 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2967 MSR_VMX_BASIC_TRUE_CTLS, 2968 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2969 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2970 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2971 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2972 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2973 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2974 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2975 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2976 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2977 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2978 .features[FEAT_VMX_EXIT_CTLS] = 2979 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2980 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2981 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2982 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2983 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2984 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2985 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2986 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2987 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2988 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2989 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2990 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2991 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2992 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2993 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2994 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2995 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2996 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2997 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2998 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2999 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3000 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3001 .features[FEAT_VMX_SECONDARY_CTLS] = 3002 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3003 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3004 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3005 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3006 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3007 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3008 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3009 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3010 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3011 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3012 .xlevel = 0x80000008, 3013 .model_id = "Intel Xeon Processor (Skylake)", 3014 .versions = (X86CPUVersionDefinition[]) { 3015 { .version = 1 }, 3016 { 3017 .version = 2, 3018 .alias = "Skylake-Server-IBRS", 3019 .props = (PropValue[]) { 3020 /* clflushopt was not added to Skylake-Server-IBRS */ 3021 /* TODO: add -v3 including clflushopt */ 3022 { "clflushopt", "off" }, 3023 { "spec-ctrl", "on" }, 3024 { "model-id", 3025 "Intel Xeon Processor (Skylake, IBRS)" }, 3026 { /* end of list */ } 3027 } 3028 }, 3029 { 3030 .version = 3, 3031 .alias = "Skylake-Server-noTSX-IBRS", 3032 .props = (PropValue[]) { 3033 { "hle", "off" }, 3034 { "rtm", "off" }, 3035 { "model-id", 3036 "Intel Xeon Processor (Skylake, IBRS, no TSX)" }, 3037 { /* end of list */ } 3038 } 3039 }, 3040 { 3041 .version = 4, 3042 .props = (PropValue[]) { 3043 { "vmx-eptp-switching", "on" }, 3044 { /* end of list */ } 3045 } 3046 }, 3047 { /* end of list */ } 3048 } 3049 }, 3050 { 3051 .name = "Cascadelake-Server", 3052 .level = 0xd, 3053 .vendor = CPUID_VENDOR_INTEL, 3054 .family = 6, 3055 .model = 85, 3056 .stepping = 6, 3057 .features[FEAT_1_EDX] = 3058 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3059 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3060 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3061 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3062 CPUID_DE | CPUID_FP87, 3063 .features[FEAT_1_ECX] = 3064 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3065 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3066 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3067 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3068 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3069 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3070 .features[FEAT_8000_0001_EDX] = 3071 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3072 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3073 .features[FEAT_8000_0001_ECX] = 3074 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3075 .features[FEAT_7_0_EBX] = 3076 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3077 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3078 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3079 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3080 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3081 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3082 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3083 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3084 .features[FEAT_7_0_ECX] = 3085 CPUID_7_0_ECX_PKU | 3086 CPUID_7_0_ECX_AVX512VNNI, 3087 .features[FEAT_7_0_EDX] = 3088 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3089 /* Missing: XSAVES (not supported by some Linux versions, 3090 * including v4.1 to v4.12). 3091 * KVM doesn't yet expose any XSAVES state save component, 3092 * and the only one defined in Skylake (processor tracing) 3093 * probably will block migration anyway. 3094 */ 3095 .features[FEAT_XSAVE] = 3096 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3097 CPUID_XSAVE_XGETBV1, 3098 .features[FEAT_6_EAX] = 3099 CPUID_6_EAX_ARAT, 3100 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3101 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3102 MSR_VMX_BASIC_TRUE_CTLS, 3103 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3104 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3105 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3106 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3107 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3108 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3109 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3110 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3111 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3112 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3113 .features[FEAT_VMX_EXIT_CTLS] = 3114 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3115 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3116 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3117 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3118 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3119 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3120 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3121 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3122 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3123 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3124 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3125 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3126 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3127 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3128 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3129 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3130 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3131 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3132 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3133 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3134 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3135 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3136 .features[FEAT_VMX_SECONDARY_CTLS] = 3137 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3138 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3139 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3140 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3141 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3142 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3143 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3144 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3145 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3146 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3147 .xlevel = 0x80000008, 3148 .model_id = "Intel Xeon Processor (Cascadelake)", 3149 .versions = (X86CPUVersionDefinition[]) { 3150 { .version = 1 }, 3151 { .version = 2, 3152 .note = "ARCH_CAPABILITIES", 3153 .props = (PropValue[]) { 3154 { "arch-capabilities", "on" }, 3155 { "rdctl-no", "on" }, 3156 { "ibrs-all", "on" }, 3157 { "skip-l1dfl-vmentry", "on" }, 3158 { "mds-no", "on" }, 3159 { /* end of list */ } 3160 }, 3161 }, 3162 { .version = 3, 3163 .alias = "Cascadelake-Server-noTSX", 3164 .note = "ARCH_CAPABILITIES, no TSX", 3165 .props = (PropValue[]) { 3166 { "hle", "off" }, 3167 { "rtm", "off" }, 3168 { /* end of list */ } 3169 }, 3170 }, 3171 { .version = 4, 3172 .note = "ARCH_CAPABILITIES, no TSX", 3173 .props = (PropValue[]) { 3174 { "vmx-eptp-switching", "on" }, 3175 { /* end of list */ } 3176 }, 3177 }, 3178 { /* end of list */ } 3179 } 3180 }, 3181 { 3182 .name = "Cooperlake", 3183 .level = 0xd, 3184 .vendor = CPUID_VENDOR_INTEL, 3185 .family = 6, 3186 .model = 85, 3187 .stepping = 10, 3188 .features[FEAT_1_EDX] = 3189 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3190 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3191 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3192 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3193 CPUID_DE | CPUID_FP87, 3194 .features[FEAT_1_ECX] = 3195 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3196 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3197 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3198 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3199 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3200 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3201 .features[FEAT_8000_0001_EDX] = 3202 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3203 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3204 .features[FEAT_8000_0001_ECX] = 3205 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3206 .features[FEAT_7_0_EBX] = 3207 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3208 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3209 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3210 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3211 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3212 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3213 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3214 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3215 .features[FEAT_7_0_ECX] = 3216 CPUID_7_0_ECX_PKU | 3217 CPUID_7_0_ECX_AVX512VNNI, 3218 .features[FEAT_7_0_EDX] = 3219 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP | 3220 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES, 3221 .features[FEAT_ARCH_CAPABILITIES] = 3222 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | 3223 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | 3224 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, 3225 .features[FEAT_7_1_EAX] = 3226 CPUID_7_1_EAX_AVX512_BF16, 3227 /* 3228 * Missing: XSAVES (not supported by some Linux versions, 3229 * including v4.1 to v4.12). 3230 * KVM doesn't yet expose any XSAVES state save component, 3231 * and the only one defined in Skylake (processor tracing) 3232 * probably will block migration anyway. 3233 */ 3234 .features[FEAT_XSAVE] = 3235 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3236 CPUID_XSAVE_XGETBV1, 3237 .features[FEAT_6_EAX] = 3238 CPUID_6_EAX_ARAT, 3239 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3240 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3241 MSR_VMX_BASIC_TRUE_CTLS, 3242 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3243 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3244 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3245 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3246 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3247 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3248 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3249 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3250 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3251 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3252 .features[FEAT_VMX_EXIT_CTLS] = 3253 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3254 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3255 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3256 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3257 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3258 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3259 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3260 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3261 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3262 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3263 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3264 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3265 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3266 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3267 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3268 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3269 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3270 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3271 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3272 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3273 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3274 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3275 .features[FEAT_VMX_SECONDARY_CTLS] = 3276 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3277 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3278 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3279 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3280 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3281 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3282 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3283 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3284 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3285 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3286 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3287 .xlevel = 0x80000008, 3288 .model_id = "Intel Xeon Processor (Cooperlake)", 3289 }, 3290 { 3291 .name = "Icelake-Client", 3292 .level = 0xd, 3293 .vendor = CPUID_VENDOR_INTEL, 3294 .family = 6, 3295 .model = 126, 3296 .stepping = 0, 3297 .features[FEAT_1_EDX] = 3298 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3299 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3300 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3301 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3302 CPUID_DE | CPUID_FP87, 3303 .features[FEAT_1_ECX] = 3304 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3305 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3306 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3307 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3308 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3309 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3310 .features[FEAT_8000_0001_EDX] = 3311 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 3312 CPUID_EXT2_SYSCALL, 3313 .features[FEAT_8000_0001_ECX] = 3314 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3315 .features[FEAT_8000_0008_EBX] = 3316 CPUID_8000_0008_EBX_WBNOINVD, 3317 .features[FEAT_7_0_EBX] = 3318 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3319 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3320 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3321 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3322 CPUID_7_0_EBX_SMAP, 3323 .features[FEAT_7_0_ECX] = 3324 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3325 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3326 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3327 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3328 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3329 .features[FEAT_7_0_EDX] = 3330 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3331 /* Missing: XSAVES (not supported by some Linux versions, 3332 * including v4.1 to v4.12). 3333 * KVM doesn't yet expose any XSAVES state save component, 3334 * and the only one defined in Skylake (processor tracing) 3335 * probably will block migration anyway. 3336 */ 3337 .features[FEAT_XSAVE] = 3338 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3339 CPUID_XSAVE_XGETBV1, 3340 .features[FEAT_6_EAX] = 3341 CPUID_6_EAX_ARAT, 3342 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3343 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3344 MSR_VMX_BASIC_TRUE_CTLS, 3345 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3346 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3347 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3348 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3349 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3350 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3351 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3352 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3353 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3354 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3355 .features[FEAT_VMX_EXIT_CTLS] = 3356 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3357 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3358 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3359 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3360 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3361 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3362 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3363 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3364 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3365 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 3366 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3367 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3368 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3369 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3370 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3371 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3372 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3373 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3374 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3375 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3376 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3377 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3378 .features[FEAT_VMX_SECONDARY_CTLS] = 3379 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3380 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3381 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3382 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3383 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3384 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3385 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3386 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3387 .xlevel = 0x80000008, 3388 .model_id = "Intel Core Processor (Icelake)", 3389 .versions = (X86CPUVersionDefinition[]) { 3390 { 3391 .version = 1, 3392 .note = "deprecated" 3393 }, 3394 { 3395 .version = 2, 3396 .note = "no TSX, deprecated", 3397 .alias = "Icelake-Client-noTSX", 3398 .props = (PropValue[]) { 3399 { "hle", "off" }, 3400 { "rtm", "off" }, 3401 { /* end of list */ } 3402 }, 3403 }, 3404 { /* end of list */ } 3405 }, 3406 .deprecation_note = "use Icelake-Server instead" 3407 }, 3408 { 3409 .name = "Icelake-Server", 3410 .level = 0xd, 3411 .vendor = CPUID_VENDOR_INTEL, 3412 .family = 6, 3413 .model = 134, 3414 .stepping = 0, 3415 .features[FEAT_1_EDX] = 3416 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3417 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3418 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3419 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3420 CPUID_DE | CPUID_FP87, 3421 .features[FEAT_1_ECX] = 3422 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3423 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3424 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3425 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3426 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3427 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3428 .features[FEAT_8000_0001_EDX] = 3429 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3430 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3431 .features[FEAT_8000_0001_ECX] = 3432 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3433 .features[FEAT_8000_0008_EBX] = 3434 CPUID_8000_0008_EBX_WBNOINVD, 3435 .features[FEAT_7_0_EBX] = 3436 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3437 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3438 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3439 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3440 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3441 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3442 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3443 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3444 .features[FEAT_7_0_ECX] = 3445 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3446 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3447 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3448 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3449 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 3450 .features[FEAT_7_0_EDX] = 3451 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3452 /* Missing: XSAVES (not supported by some Linux versions, 3453 * including v4.1 to v4.12). 3454 * KVM doesn't yet expose any XSAVES state save component, 3455 * and the only one defined in Skylake (processor tracing) 3456 * probably will block migration anyway. 3457 */ 3458 .features[FEAT_XSAVE] = 3459 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3460 CPUID_XSAVE_XGETBV1, 3461 .features[FEAT_6_EAX] = 3462 CPUID_6_EAX_ARAT, 3463 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3464 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3465 MSR_VMX_BASIC_TRUE_CTLS, 3466 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3467 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3468 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3469 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3470 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3471 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3472 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3473 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3474 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3475 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3476 .features[FEAT_VMX_EXIT_CTLS] = 3477 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3478 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3479 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3480 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3481 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3482 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3483 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3484 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3485 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3486 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3487 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3488 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3489 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3490 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3491 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3492 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3493 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3494 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3495 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3496 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3497 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3498 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3499 .features[FEAT_VMX_SECONDARY_CTLS] = 3500 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3501 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3502 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3503 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3504 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3505 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3506 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3507 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3508 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 3509 .xlevel = 0x80000008, 3510 .model_id = "Intel Xeon Processor (Icelake)", 3511 .versions = (X86CPUVersionDefinition[]) { 3512 { .version = 1 }, 3513 { 3514 .version = 2, 3515 .note = "no TSX", 3516 .alias = "Icelake-Server-noTSX", 3517 .props = (PropValue[]) { 3518 { "hle", "off" }, 3519 { "rtm", "off" }, 3520 { /* end of list */ } 3521 }, 3522 }, 3523 { 3524 .version = 3, 3525 .props = (PropValue[]) { 3526 { "arch-capabilities", "on" }, 3527 { "rdctl-no", "on" }, 3528 { "ibrs-all", "on" }, 3529 { "skip-l1dfl-vmentry", "on" }, 3530 { "mds-no", "on" }, 3531 { "pschange-mc-no", "on" }, 3532 { "taa-no", "on" }, 3533 { /* end of list */ } 3534 }, 3535 }, 3536 { 3537 .version = 4, 3538 .props = (PropValue[]) { 3539 { "sha-ni", "on" }, 3540 { "avx512ifma", "on" }, 3541 { "rdpid", "on" }, 3542 { "fsrm", "on" }, 3543 { "vmx-rdseed-exit", "on" }, 3544 { "vmx-pml", "on" }, 3545 { "vmx-eptp-switching", "on" }, 3546 { "model", "106" }, 3547 { /* end of list */ } 3548 }, 3549 }, 3550 { /* end of list */ } 3551 } 3552 }, 3553 { 3554 .name = "Denverton", 3555 .level = 21, 3556 .vendor = CPUID_VENDOR_INTEL, 3557 .family = 6, 3558 .model = 95, 3559 .stepping = 1, 3560 .features[FEAT_1_EDX] = 3561 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | 3562 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | 3563 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3564 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | 3565 CPUID_SSE | CPUID_SSE2, 3566 .features[FEAT_1_ECX] = 3567 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3568 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 | 3569 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3570 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | 3571 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND, 3572 .features[FEAT_8000_0001_EDX] = 3573 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | 3574 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, 3575 .features[FEAT_8000_0001_ECX] = 3576 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3577 .features[FEAT_7_0_EBX] = 3578 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS | 3579 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP | 3580 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI, 3581 .features[FEAT_7_0_EDX] = 3582 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | 3583 CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3584 /* 3585 * Missing: XSAVES (not supported by some Linux versions, 3586 * including v4.1 to v4.12). 3587 * KVM doesn't yet expose any XSAVES state save component, 3588 * and the only one defined in Skylake (processor tracing) 3589 * probably will block migration anyway. 3590 */ 3591 .features[FEAT_XSAVE] = 3592 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, 3593 .features[FEAT_6_EAX] = 3594 CPUID_6_EAX_ARAT, 3595 .features[FEAT_ARCH_CAPABILITIES] = 3596 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY, 3597 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3598 MSR_VMX_BASIC_TRUE_CTLS, 3599 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3600 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3601 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3602 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3603 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3604 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3605 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3606 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3607 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3608 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3609 .features[FEAT_VMX_EXIT_CTLS] = 3610 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3611 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3612 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3613 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3614 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3615 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3616 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3617 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3618 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3619 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3620 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3621 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3622 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3623 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3624 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3625 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3626 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3627 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3628 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3629 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3630 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3631 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3632 .features[FEAT_VMX_SECONDARY_CTLS] = 3633 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3634 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3635 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3636 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3637 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3638 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3639 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3640 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3641 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3642 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3643 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3644 .xlevel = 0x80000008, 3645 .model_id = "Intel Atom Processor (Denverton)", 3646 .versions = (X86CPUVersionDefinition[]) { 3647 { .version = 1 }, 3648 { 3649 .version = 2, 3650 .note = "no MPX, no MONITOR", 3651 .props = (PropValue[]) { 3652 { "monitor", "off" }, 3653 { "mpx", "off" }, 3654 { /* end of list */ }, 3655 }, 3656 }, 3657 { /* end of list */ }, 3658 }, 3659 }, 3660 { 3661 .name = "Snowridge", 3662 .level = 27, 3663 .vendor = CPUID_VENDOR_INTEL, 3664 .family = 6, 3665 .model = 134, 3666 .stepping = 1, 3667 .features[FEAT_1_EDX] = 3668 /* missing: CPUID_PN CPUID_IA64 */ 3669 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 3670 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | 3671 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | 3672 CPUID_CX8 | CPUID_APIC | CPUID_SEP | 3673 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3674 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | 3675 CPUID_MMX | 3676 CPUID_FXSR | CPUID_SSE | CPUID_SSE2, 3677 .features[FEAT_1_ECX] = 3678 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3679 CPUID_EXT_SSSE3 | 3680 CPUID_EXT_CX16 | 3681 CPUID_EXT_SSE41 | 3682 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3683 CPUID_EXT_POPCNT | 3684 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | 3685 CPUID_EXT_RDRAND, 3686 .features[FEAT_8000_0001_EDX] = 3687 CPUID_EXT2_SYSCALL | 3688 CPUID_EXT2_NX | 3689 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3690 CPUID_EXT2_LM, 3691 .features[FEAT_8000_0001_ECX] = 3692 CPUID_EXT3_LAHF_LM | 3693 CPUID_EXT3_3DNOWPREFETCH, 3694 .features[FEAT_7_0_EBX] = 3695 CPUID_7_0_EBX_FSGSBASE | 3696 CPUID_7_0_EBX_SMEP | 3697 CPUID_7_0_EBX_ERMS | 3698 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ 3699 CPUID_7_0_EBX_RDSEED | 3700 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3701 CPUID_7_0_EBX_CLWB | 3702 CPUID_7_0_EBX_SHA_NI, 3703 .features[FEAT_7_0_ECX] = 3704 CPUID_7_0_ECX_UMIP | 3705 /* missing bit 5 */ 3706 CPUID_7_0_ECX_GFNI | 3707 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | 3708 CPUID_7_0_ECX_MOVDIR64B, 3709 .features[FEAT_7_0_EDX] = 3710 CPUID_7_0_EDX_SPEC_CTRL | 3711 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | 3712 CPUID_7_0_EDX_CORE_CAPABILITY, 3713 .features[FEAT_CORE_CAPABILITY] = 3714 MSR_CORE_CAP_SPLIT_LOCK_DETECT, 3715 /* 3716 * Missing: XSAVES (not supported by some Linux versions, 3717 * including v4.1 to v4.12). 3718 * KVM doesn't yet expose any XSAVES state save component, 3719 * and the only one defined in Skylake (processor tracing) 3720 * probably will block migration anyway. 3721 */ 3722 .features[FEAT_XSAVE] = 3723 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3724 CPUID_XSAVE_XGETBV1, 3725 .features[FEAT_6_EAX] = 3726 CPUID_6_EAX_ARAT, 3727 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3728 MSR_VMX_BASIC_TRUE_CTLS, 3729 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3730 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3731 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3732 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3733 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3734 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3735 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3736 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3737 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3738 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3739 .features[FEAT_VMX_EXIT_CTLS] = 3740 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3741 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3742 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3743 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3744 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3745 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3746 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3747 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3748 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3749 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3750 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3751 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3752 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3753 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3754 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3755 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3756 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3757 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3758 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3759 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3760 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3761 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3762 .features[FEAT_VMX_SECONDARY_CTLS] = 3763 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3764 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3765 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3766 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3767 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3768 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3769 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3770 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3771 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3772 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3773 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3774 .xlevel = 0x80000008, 3775 .model_id = "Intel Atom Processor (SnowRidge)", 3776 .versions = (X86CPUVersionDefinition[]) { 3777 { .version = 1 }, 3778 { 3779 .version = 2, 3780 .props = (PropValue[]) { 3781 { "mpx", "off" }, 3782 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" }, 3783 { /* end of list */ }, 3784 }, 3785 }, 3786 { /* end of list */ }, 3787 }, 3788 }, 3789 { 3790 .name = "KnightsMill", 3791 .level = 0xd, 3792 .vendor = CPUID_VENDOR_INTEL, 3793 .family = 6, 3794 .model = 133, 3795 .stepping = 0, 3796 .features[FEAT_1_EDX] = 3797 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 3798 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 3799 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 3800 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 3801 CPUID_PSE | CPUID_DE | CPUID_FP87, 3802 .features[FEAT_1_ECX] = 3803 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3804 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3805 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3806 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3807 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3808 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3809 .features[FEAT_8000_0001_EDX] = 3810 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3811 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3812 .features[FEAT_8000_0001_ECX] = 3813 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3814 .features[FEAT_7_0_EBX] = 3815 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3816 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 3817 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 3818 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 3819 CPUID_7_0_EBX_AVX512ER, 3820 .features[FEAT_7_0_ECX] = 3821 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3822 .features[FEAT_7_0_EDX] = 3823 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 3824 .features[FEAT_XSAVE] = 3825 CPUID_XSAVE_XSAVEOPT, 3826 .features[FEAT_6_EAX] = 3827 CPUID_6_EAX_ARAT, 3828 .xlevel = 0x80000008, 3829 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 3830 }, 3831 { 3832 .name = "Opteron_G1", 3833 .level = 5, 3834 .vendor = CPUID_VENDOR_AMD, 3835 .family = 15, 3836 .model = 6, 3837 .stepping = 1, 3838 .features[FEAT_1_EDX] = 3839 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3840 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3841 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3842 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3843 CPUID_DE | CPUID_FP87, 3844 .features[FEAT_1_ECX] = 3845 CPUID_EXT_SSE3, 3846 .features[FEAT_8000_0001_EDX] = 3847 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3848 .xlevel = 0x80000008, 3849 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 3850 }, 3851 { 3852 .name = "Opteron_G2", 3853 .level = 5, 3854 .vendor = CPUID_VENDOR_AMD, 3855 .family = 15, 3856 .model = 6, 3857 .stepping = 1, 3858 .features[FEAT_1_EDX] = 3859 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3860 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3861 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3862 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3863 CPUID_DE | CPUID_FP87, 3864 .features[FEAT_1_ECX] = 3865 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 3866 .features[FEAT_8000_0001_EDX] = 3867 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3868 .features[FEAT_8000_0001_ECX] = 3869 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3870 .xlevel = 0x80000008, 3871 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 3872 }, 3873 { 3874 .name = "Opteron_G3", 3875 .level = 5, 3876 .vendor = CPUID_VENDOR_AMD, 3877 .family = 16, 3878 .model = 2, 3879 .stepping = 3, 3880 .features[FEAT_1_EDX] = 3881 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3882 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3883 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3884 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3885 CPUID_DE | CPUID_FP87, 3886 .features[FEAT_1_ECX] = 3887 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 3888 CPUID_EXT_SSE3, 3889 .features[FEAT_8000_0001_EDX] = 3890 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 3891 CPUID_EXT2_RDTSCP, 3892 .features[FEAT_8000_0001_ECX] = 3893 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 3894 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3895 .xlevel = 0x80000008, 3896 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 3897 }, 3898 { 3899 .name = "Opteron_G4", 3900 .level = 0xd, 3901 .vendor = CPUID_VENDOR_AMD, 3902 .family = 21, 3903 .model = 1, 3904 .stepping = 2, 3905 .features[FEAT_1_EDX] = 3906 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3907 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3908 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3909 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3910 CPUID_DE | CPUID_FP87, 3911 .features[FEAT_1_ECX] = 3912 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3913 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3914 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 3915 CPUID_EXT_SSE3, 3916 .features[FEAT_8000_0001_EDX] = 3917 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3918 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3919 .features[FEAT_8000_0001_ECX] = 3920 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3921 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3922 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3923 CPUID_EXT3_LAHF_LM, 3924 .features[FEAT_SVM] = 3925 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3926 /* no xsaveopt! */ 3927 .xlevel = 0x8000001A, 3928 .model_id = "AMD Opteron 62xx class CPU", 3929 }, 3930 { 3931 .name = "Opteron_G5", 3932 .level = 0xd, 3933 .vendor = CPUID_VENDOR_AMD, 3934 .family = 21, 3935 .model = 2, 3936 .stepping = 0, 3937 .features[FEAT_1_EDX] = 3938 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3939 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3940 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3941 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3942 CPUID_DE | CPUID_FP87, 3943 .features[FEAT_1_ECX] = 3944 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 3945 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 3946 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 3947 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3948 .features[FEAT_8000_0001_EDX] = 3949 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3950 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3951 .features[FEAT_8000_0001_ECX] = 3952 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3953 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3954 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3955 CPUID_EXT3_LAHF_LM, 3956 .features[FEAT_SVM] = 3957 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3958 /* no xsaveopt! */ 3959 .xlevel = 0x8000001A, 3960 .model_id = "AMD Opteron 63xx class CPU", 3961 }, 3962 { 3963 .name = "EPYC", 3964 .level = 0xd, 3965 .vendor = CPUID_VENDOR_AMD, 3966 .family = 23, 3967 .model = 1, 3968 .stepping = 2, 3969 .features[FEAT_1_EDX] = 3970 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 3971 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 3972 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 3973 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 3974 CPUID_VME | CPUID_FP87, 3975 .features[FEAT_1_ECX] = 3976 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 3977 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 3978 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3979 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 3980 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3981 .features[FEAT_8000_0001_EDX] = 3982 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 3983 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 3984 CPUID_EXT2_SYSCALL, 3985 .features[FEAT_8000_0001_ECX] = 3986 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 3987 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 3988 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 3989 CPUID_EXT3_TOPOEXT, 3990 .features[FEAT_7_0_EBX] = 3991 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3992 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 3993 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3994 CPUID_7_0_EBX_SHA_NI, 3995 .features[FEAT_XSAVE] = 3996 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3997 CPUID_XSAVE_XGETBV1, 3998 .features[FEAT_6_EAX] = 3999 CPUID_6_EAX_ARAT, 4000 .features[FEAT_SVM] = 4001 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4002 .xlevel = 0x8000001E, 4003 .model_id = "AMD EPYC Processor", 4004 .cache_info = &epyc_cache_info, 4005 .versions = (X86CPUVersionDefinition[]) { 4006 { .version = 1 }, 4007 { 4008 .version = 2, 4009 .alias = "EPYC-IBPB", 4010 .props = (PropValue[]) { 4011 { "ibpb", "on" }, 4012 { "model-id", 4013 "AMD EPYC Processor (with IBPB)" }, 4014 { /* end of list */ } 4015 } 4016 }, 4017 { 4018 .version = 3, 4019 .props = (PropValue[]) { 4020 { "ibpb", "on" }, 4021 { "perfctr-core", "on" }, 4022 { "clzero", "on" }, 4023 { "xsaveerptr", "on" }, 4024 { "xsaves", "on" }, 4025 { "model-id", 4026 "AMD EPYC Processor" }, 4027 { /* end of list */ } 4028 } 4029 }, 4030 { /* end of list */ } 4031 } 4032 }, 4033 { 4034 .name = "Dhyana", 4035 .level = 0xd, 4036 .vendor = CPUID_VENDOR_HYGON, 4037 .family = 24, 4038 .model = 0, 4039 .stepping = 1, 4040 .features[FEAT_1_EDX] = 4041 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4042 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4043 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4044 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4045 CPUID_VME | CPUID_FP87, 4046 .features[FEAT_1_ECX] = 4047 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4048 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 4049 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4050 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4051 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 4052 .features[FEAT_8000_0001_EDX] = 4053 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4054 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4055 CPUID_EXT2_SYSCALL, 4056 .features[FEAT_8000_0001_ECX] = 4057 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4058 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4059 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4060 CPUID_EXT3_TOPOEXT, 4061 .features[FEAT_8000_0008_EBX] = 4062 CPUID_8000_0008_EBX_IBPB, 4063 .features[FEAT_7_0_EBX] = 4064 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4065 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4066 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 4067 /* 4068 * Missing: XSAVES (not supported by some Linux versions, 4069 * including v4.1 to v4.12). 4070 * KVM doesn't yet expose any XSAVES state save component. 4071 */ 4072 .features[FEAT_XSAVE] = 4073 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4074 CPUID_XSAVE_XGETBV1, 4075 .features[FEAT_6_EAX] = 4076 CPUID_6_EAX_ARAT, 4077 .features[FEAT_SVM] = 4078 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4079 .xlevel = 0x8000001E, 4080 .model_id = "Hygon Dhyana Processor", 4081 .cache_info = &epyc_cache_info, 4082 }, 4083 { 4084 .name = "EPYC-Rome", 4085 .level = 0xd, 4086 .vendor = CPUID_VENDOR_AMD, 4087 .family = 23, 4088 .model = 49, 4089 .stepping = 0, 4090 .features[FEAT_1_EDX] = 4091 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4092 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4093 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4094 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4095 CPUID_VME | CPUID_FP87, 4096 .features[FEAT_1_ECX] = 4097 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4098 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 4099 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4100 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4101 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 4102 .features[FEAT_8000_0001_EDX] = 4103 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4104 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4105 CPUID_EXT2_SYSCALL, 4106 .features[FEAT_8000_0001_ECX] = 4107 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4108 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4109 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4110 CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, 4111 .features[FEAT_8000_0008_EBX] = 4112 CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | 4113 CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | 4114 CPUID_8000_0008_EBX_STIBP, 4115 .features[FEAT_7_0_EBX] = 4116 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4117 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4118 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 4119 CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB, 4120 .features[FEAT_7_0_ECX] = 4121 CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID, 4122 .features[FEAT_XSAVE] = 4123 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4124 CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, 4125 .features[FEAT_6_EAX] = 4126 CPUID_6_EAX_ARAT, 4127 .features[FEAT_SVM] = 4128 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4129 .xlevel = 0x8000001E, 4130 .model_id = "AMD EPYC-Rome Processor", 4131 .cache_info = &epyc_rome_cache_info, 4132 }, 4133 }; 4134 4135 /* KVM-specific features that are automatically added/removed 4136 * from all CPU models when KVM is enabled. 4137 */ 4138 static PropValue kvm_default_props[] = { 4139 { "kvmclock", "on" }, 4140 { "kvm-nopiodelay", "on" }, 4141 { "kvm-asyncpf", "on" }, 4142 { "kvm-steal-time", "on" }, 4143 { "kvm-pv-eoi", "on" }, 4144 { "kvmclock-stable-bit", "on" }, 4145 { "x2apic", "on" }, 4146 { "kvm-msi-ext-dest-id", "off" }, 4147 { "acpi", "off" }, 4148 { "monitor", "off" }, 4149 { "svm", "off" }, 4150 { NULL, NULL }, 4151 }; 4152 4153 /* TCG-specific defaults that override all CPU models when using TCG 4154 */ 4155 static PropValue tcg_default_props[] = { 4156 { "vme", "off" }, 4157 { NULL, NULL }, 4158 }; 4159 4160 4161 /* 4162 * We resolve CPU model aliases using -v1 when using "-machine 4163 * none", but this is just for compatibility while libvirt isn't 4164 * adapted to resolve CPU model versions before creating VMs. 4165 * See "Runnability guarantee of CPU models" at 4166 * docs/system/deprecated.rst. 4167 */ 4168 X86CPUVersion default_cpu_version = 1; 4169 4170 void x86_cpu_set_default_version(X86CPUVersion version) 4171 { 4172 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ 4173 assert(version != CPU_VERSION_AUTO); 4174 default_cpu_version = version; 4175 } 4176 4177 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) 4178 { 4179 int v = 0; 4180 const X86CPUVersionDefinition *vdef = 4181 x86_cpu_def_get_versions(model->cpudef); 4182 while (vdef->version) { 4183 v = vdef->version; 4184 vdef++; 4185 } 4186 return v; 4187 } 4188 4189 /* Return the actual version being used for a specific CPU model */ 4190 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) 4191 { 4192 X86CPUVersion v = model->version; 4193 if (v == CPU_VERSION_AUTO) { 4194 v = default_cpu_version; 4195 } 4196 if (v == CPU_VERSION_LATEST) { 4197 return x86_cpu_model_last_version(model); 4198 } 4199 return v; 4200 } 4201 4202 void x86_cpu_change_kvm_default(const char *prop, const char *value) 4203 { 4204 PropValue *pv; 4205 for (pv = kvm_default_props; pv->prop; pv++) { 4206 if (!strcmp(pv->prop, prop)) { 4207 pv->value = value; 4208 break; 4209 } 4210 } 4211 4212 /* It is valid to call this function only for properties that 4213 * are already present in the kvm_default_props table. 4214 */ 4215 assert(pv->prop); 4216 } 4217 4218 static bool lmce_supported(void) 4219 { 4220 uint64_t mce_cap = 0; 4221 4222 #ifdef CONFIG_KVM 4223 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 4224 return false; 4225 } 4226 #endif 4227 4228 return !!(mce_cap & MCG_LMCE_P); 4229 } 4230 4231 #define CPUID_MODEL_ID_SZ 48 4232 4233 /** 4234 * cpu_x86_fill_model_id: 4235 * Get CPUID model ID string from host CPU. 4236 * 4237 * @str should have at least CPUID_MODEL_ID_SZ bytes 4238 * 4239 * The function does NOT add a null terminator to the string 4240 * automatically. 4241 */ 4242 static int cpu_x86_fill_model_id(char *str) 4243 { 4244 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 4245 int i; 4246 4247 for (i = 0; i < 3; i++) { 4248 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 4249 memcpy(str + i * 16 + 0, &eax, 4); 4250 memcpy(str + i * 16 + 4, &ebx, 4); 4251 memcpy(str + i * 16 + 8, &ecx, 4); 4252 memcpy(str + i * 16 + 12, &edx, 4); 4253 } 4254 return 0; 4255 } 4256 4257 static Property max_x86_cpu_properties[] = { 4258 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 4259 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 4260 DEFINE_PROP_END_OF_LIST() 4261 }; 4262 4263 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 4264 { 4265 DeviceClass *dc = DEVICE_CLASS(oc); 4266 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4267 4268 xcc->ordering = 9; 4269 4270 xcc->model_description = 4271 "Enables all features supported by the accelerator in the current host"; 4272 4273 device_class_set_props(dc, max_x86_cpu_properties); 4274 } 4275 4276 static void max_x86_cpu_initfn(Object *obj) 4277 { 4278 X86CPU *cpu = X86_CPU(obj); 4279 CPUX86State *env = &cpu->env; 4280 KVMState *s = kvm_state; 4281 4282 /* We can't fill the features array here because we don't know yet if 4283 * "migratable" is true or false. 4284 */ 4285 cpu->max_features = true; 4286 4287 if (accel_uses_host_cpuid()) { 4288 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 4289 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 4290 int family, model, stepping; 4291 4292 host_vendor_fms(vendor, &family, &model, &stepping); 4293 cpu_x86_fill_model_id(model_id); 4294 4295 object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); 4296 object_property_set_int(OBJECT(cpu), "family", family, &error_abort); 4297 object_property_set_int(OBJECT(cpu), "model", model, &error_abort); 4298 object_property_set_int(OBJECT(cpu), "stepping", stepping, 4299 &error_abort); 4300 object_property_set_str(OBJECT(cpu), "model-id", model_id, 4301 &error_abort); 4302 4303 if (kvm_enabled()) { 4304 env->cpuid_min_level = 4305 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 4306 env->cpuid_min_xlevel = 4307 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 4308 env->cpuid_min_xlevel2 = 4309 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 4310 } else { 4311 env->cpuid_min_level = 4312 hvf_get_supported_cpuid(0x0, 0, R_EAX); 4313 env->cpuid_min_xlevel = 4314 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 4315 env->cpuid_min_xlevel2 = 4316 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 4317 } 4318 4319 if (lmce_supported()) { 4320 object_property_set_bool(OBJECT(cpu), "lmce", true, &error_abort); 4321 } 4322 } else { 4323 object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD, 4324 &error_abort); 4325 object_property_set_int(OBJECT(cpu), "family", 6, &error_abort); 4326 object_property_set_int(OBJECT(cpu), "model", 6, &error_abort); 4327 object_property_set_int(OBJECT(cpu), "stepping", 3, &error_abort); 4328 object_property_set_str(OBJECT(cpu), "model-id", 4329 "QEMU TCG CPU version " QEMU_HW_VERSION, 4330 &error_abort); 4331 } 4332 4333 object_property_set_bool(OBJECT(cpu), "pmu", true, &error_abort); 4334 } 4335 4336 static const TypeInfo max_x86_cpu_type_info = { 4337 .name = X86_CPU_TYPE_NAME("max"), 4338 .parent = TYPE_X86_CPU, 4339 .instance_init = max_x86_cpu_initfn, 4340 .class_init = max_x86_cpu_class_init, 4341 }; 4342 4343 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 4344 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 4345 { 4346 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4347 4348 xcc->host_cpuid_required = true; 4349 xcc->ordering = 8; 4350 4351 #if defined(CONFIG_KVM) 4352 xcc->model_description = 4353 "KVM processor with all supported host features "; 4354 #elif defined(CONFIG_HVF) 4355 xcc->model_description = 4356 "HVF processor with all supported host features "; 4357 #endif 4358 } 4359 4360 static const TypeInfo host_x86_cpu_type_info = { 4361 .name = X86_CPU_TYPE_NAME("host"), 4362 .parent = X86_CPU_TYPE_NAME("max"), 4363 .class_init = host_x86_cpu_class_init, 4364 }; 4365 4366 #endif 4367 4368 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 4369 { 4370 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 4371 4372 switch (f->type) { 4373 case CPUID_FEATURE_WORD: 4374 { 4375 const char *reg = get_register_name_32(f->cpuid.reg); 4376 assert(reg); 4377 return g_strdup_printf("CPUID.%02XH:%s", 4378 f->cpuid.eax, reg); 4379 } 4380 case MSR_FEATURE_WORD: 4381 return g_strdup_printf("MSR(%02XH)", 4382 f->msr.index); 4383 } 4384 4385 return NULL; 4386 } 4387 4388 static bool x86_cpu_have_filtered_features(X86CPU *cpu) 4389 { 4390 FeatureWord w; 4391 4392 for (w = 0; w < FEATURE_WORDS; w++) { 4393 if (cpu->filtered_features[w]) { 4394 return true; 4395 } 4396 } 4397 4398 return false; 4399 } 4400 4401 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, 4402 const char *verbose_prefix) 4403 { 4404 CPUX86State *env = &cpu->env; 4405 FeatureWordInfo *f = &feature_word_info[w]; 4406 int i; 4407 4408 if (!cpu->force_features) { 4409 env->features[w] &= ~mask; 4410 } 4411 cpu->filtered_features[w] |= mask; 4412 4413 if (!verbose_prefix) { 4414 return; 4415 } 4416 4417 for (i = 0; i < 64; ++i) { 4418 if ((1ULL << i) & mask) { 4419 g_autofree char *feat_word_str = feature_word_description(f, i); 4420 warn_report("%s: %s%s%s [bit %d]", 4421 verbose_prefix, 4422 feat_word_str, 4423 f->feat_names[i] ? "." : "", 4424 f->feat_names[i] ? f->feat_names[i] : "", i); 4425 } 4426 } 4427 } 4428 4429 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 4430 const char *name, void *opaque, 4431 Error **errp) 4432 { 4433 X86CPU *cpu = X86_CPU(obj); 4434 CPUX86State *env = &cpu->env; 4435 int64_t value; 4436 4437 value = (env->cpuid_version >> 8) & 0xf; 4438 if (value == 0xf) { 4439 value += (env->cpuid_version >> 20) & 0xff; 4440 } 4441 visit_type_int(v, name, &value, errp); 4442 } 4443 4444 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 4445 const char *name, void *opaque, 4446 Error **errp) 4447 { 4448 X86CPU *cpu = X86_CPU(obj); 4449 CPUX86State *env = &cpu->env; 4450 const int64_t min = 0; 4451 const int64_t max = 0xff + 0xf; 4452 int64_t value; 4453 4454 if (!visit_type_int(v, name, &value, errp)) { 4455 return; 4456 } 4457 if (value < min || value > max) { 4458 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4459 name ? name : "null", value, min, max); 4460 return; 4461 } 4462 4463 env->cpuid_version &= ~0xff00f00; 4464 if (value > 0x0f) { 4465 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 4466 } else { 4467 env->cpuid_version |= value << 8; 4468 } 4469 } 4470 4471 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 4472 const char *name, void *opaque, 4473 Error **errp) 4474 { 4475 X86CPU *cpu = X86_CPU(obj); 4476 CPUX86State *env = &cpu->env; 4477 int64_t value; 4478 4479 value = (env->cpuid_version >> 4) & 0xf; 4480 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 4481 visit_type_int(v, name, &value, errp); 4482 } 4483 4484 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 4485 const char *name, void *opaque, 4486 Error **errp) 4487 { 4488 X86CPU *cpu = X86_CPU(obj); 4489 CPUX86State *env = &cpu->env; 4490 const int64_t min = 0; 4491 const int64_t max = 0xff; 4492 int64_t value; 4493 4494 if (!visit_type_int(v, name, &value, errp)) { 4495 return; 4496 } 4497 if (value < min || value > max) { 4498 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4499 name ? name : "null", value, min, max); 4500 return; 4501 } 4502 4503 env->cpuid_version &= ~0xf00f0; 4504 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 4505 } 4506 4507 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 4508 const char *name, void *opaque, 4509 Error **errp) 4510 { 4511 X86CPU *cpu = X86_CPU(obj); 4512 CPUX86State *env = &cpu->env; 4513 int64_t value; 4514 4515 value = env->cpuid_version & 0xf; 4516 visit_type_int(v, name, &value, errp); 4517 } 4518 4519 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 4520 const char *name, void *opaque, 4521 Error **errp) 4522 { 4523 X86CPU *cpu = X86_CPU(obj); 4524 CPUX86State *env = &cpu->env; 4525 const int64_t min = 0; 4526 const int64_t max = 0xf; 4527 int64_t value; 4528 4529 if (!visit_type_int(v, name, &value, errp)) { 4530 return; 4531 } 4532 if (value < min || value > max) { 4533 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4534 name ? name : "null", value, min, max); 4535 return; 4536 } 4537 4538 env->cpuid_version &= ~0xf; 4539 env->cpuid_version |= value & 0xf; 4540 } 4541 4542 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 4543 { 4544 X86CPU *cpu = X86_CPU(obj); 4545 CPUX86State *env = &cpu->env; 4546 char *value; 4547 4548 value = g_malloc(CPUID_VENDOR_SZ + 1); 4549 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 4550 env->cpuid_vendor3); 4551 return value; 4552 } 4553 4554 static void x86_cpuid_set_vendor(Object *obj, const char *value, 4555 Error **errp) 4556 { 4557 X86CPU *cpu = X86_CPU(obj); 4558 CPUX86State *env = &cpu->env; 4559 int i; 4560 4561 if (strlen(value) != CPUID_VENDOR_SZ) { 4562 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 4563 return; 4564 } 4565 4566 env->cpuid_vendor1 = 0; 4567 env->cpuid_vendor2 = 0; 4568 env->cpuid_vendor3 = 0; 4569 for (i = 0; i < 4; i++) { 4570 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 4571 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 4572 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 4573 } 4574 } 4575 4576 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 4577 { 4578 X86CPU *cpu = X86_CPU(obj); 4579 CPUX86State *env = &cpu->env; 4580 char *value; 4581 int i; 4582 4583 value = g_malloc(48 + 1); 4584 for (i = 0; i < 48; i++) { 4585 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 4586 } 4587 value[48] = '\0'; 4588 return value; 4589 } 4590 4591 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 4592 Error **errp) 4593 { 4594 X86CPU *cpu = X86_CPU(obj); 4595 CPUX86State *env = &cpu->env; 4596 int c, len, i; 4597 4598 if (model_id == NULL) { 4599 model_id = ""; 4600 } 4601 len = strlen(model_id); 4602 memset(env->cpuid_model, 0, 48); 4603 for (i = 0; i < 48; i++) { 4604 if (i >= len) { 4605 c = '\0'; 4606 } else { 4607 c = (uint8_t)model_id[i]; 4608 } 4609 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 4610 } 4611 } 4612 4613 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 4614 void *opaque, Error **errp) 4615 { 4616 X86CPU *cpu = X86_CPU(obj); 4617 int64_t value; 4618 4619 value = cpu->env.tsc_khz * 1000; 4620 visit_type_int(v, name, &value, errp); 4621 } 4622 4623 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 4624 void *opaque, Error **errp) 4625 { 4626 X86CPU *cpu = X86_CPU(obj); 4627 const int64_t min = 0; 4628 const int64_t max = INT64_MAX; 4629 int64_t value; 4630 4631 if (!visit_type_int(v, name, &value, errp)) { 4632 return; 4633 } 4634 if (value < min || value > max) { 4635 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4636 name ? name : "null", value, min, max); 4637 return; 4638 } 4639 4640 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 4641 } 4642 4643 /* Generic getter for "feature-words" and "filtered-features" properties */ 4644 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 4645 const char *name, void *opaque, 4646 Error **errp) 4647 { 4648 uint64_t *array = (uint64_t *)opaque; 4649 FeatureWord w; 4650 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 4651 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 4652 X86CPUFeatureWordInfoList *list = NULL; 4653 4654 for (w = 0; w < FEATURE_WORDS; w++) { 4655 FeatureWordInfo *wi = &feature_word_info[w]; 4656 /* 4657 * We didn't have MSR features when "feature-words" was 4658 * introduced. Therefore skipped other type entries. 4659 */ 4660 if (wi->type != CPUID_FEATURE_WORD) { 4661 continue; 4662 } 4663 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 4664 qwi->cpuid_input_eax = wi->cpuid.eax; 4665 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 4666 qwi->cpuid_input_ecx = wi->cpuid.ecx; 4667 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 4668 qwi->features = array[w]; 4669 4670 /* List will be in reverse order, but order shouldn't matter */ 4671 list_entries[w].next = list; 4672 list_entries[w].value = &word_infos[w]; 4673 list = &list_entries[w]; 4674 } 4675 4676 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 4677 } 4678 4679 /* Convert all '_' in a feature string option name to '-', to make feature 4680 * name conform to QOM property naming rule, which uses '-' instead of '_'. 4681 */ 4682 static inline void feat2prop(char *s) 4683 { 4684 while ((s = strchr(s, '_'))) { 4685 *s = '-'; 4686 } 4687 } 4688 4689 /* Return the feature property name for a feature flag bit */ 4690 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 4691 { 4692 const char *name; 4693 /* XSAVE components are automatically enabled by other features, 4694 * so return the original feature name instead 4695 */ 4696 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 4697 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 4698 4699 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 4700 x86_ext_save_areas[comp].bits) { 4701 w = x86_ext_save_areas[comp].feature; 4702 bitnr = ctz32(x86_ext_save_areas[comp].bits); 4703 } 4704 } 4705 4706 assert(bitnr < 64); 4707 assert(w < FEATURE_WORDS); 4708 name = feature_word_info[w].feat_names[bitnr]; 4709 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD)); 4710 return name; 4711 } 4712 4713 /* Compatibily hack to maintain legacy +-feat semantic, 4714 * where +-feat overwrites any feature set by 4715 * feat=on|feat even if the later is parsed after +-feat 4716 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 4717 */ 4718 static GList *plus_features, *minus_features; 4719 4720 static gint compare_string(gconstpointer a, gconstpointer b) 4721 { 4722 return g_strcmp0(a, b); 4723 } 4724 4725 /* Parse "+feature,-feature,feature=foo" CPU feature string 4726 */ 4727 static void x86_cpu_parse_featurestr(const char *typename, char *features, 4728 Error **errp) 4729 { 4730 char *featurestr; /* Single 'key=value" string being parsed */ 4731 static bool cpu_globals_initialized; 4732 bool ambiguous = false; 4733 4734 if (cpu_globals_initialized) { 4735 return; 4736 } 4737 cpu_globals_initialized = true; 4738 4739 if (!features) { 4740 return; 4741 } 4742 4743 for (featurestr = strtok(features, ","); 4744 featurestr; 4745 featurestr = strtok(NULL, ",")) { 4746 const char *name; 4747 const char *val = NULL; 4748 char *eq = NULL; 4749 char num[32]; 4750 GlobalProperty *prop; 4751 4752 /* Compatibility syntax: */ 4753 if (featurestr[0] == '+') { 4754 plus_features = g_list_append(plus_features, 4755 g_strdup(featurestr + 1)); 4756 continue; 4757 } else if (featurestr[0] == '-') { 4758 minus_features = g_list_append(minus_features, 4759 g_strdup(featurestr + 1)); 4760 continue; 4761 } 4762 4763 eq = strchr(featurestr, '='); 4764 if (eq) { 4765 *eq++ = 0; 4766 val = eq; 4767 } else { 4768 val = "on"; 4769 } 4770 4771 feat2prop(featurestr); 4772 name = featurestr; 4773 4774 if (g_list_find_custom(plus_features, name, compare_string)) { 4775 warn_report("Ambiguous CPU model string. " 4776 "Don't mix both \"+%s\" and \"%s=%s\"", 4777 name, name, val); 4778 ambiguous = true; 4779 } 4780 if (g_list_find_custom(minus_features, name, compare_string)) { 4781 warn_report("Ambiguous CPU model string. " 4782 "Don't mix both \"-%s\" and \"%s=%s\"", 4783 name, name, val); 4784 ambiguous = true; 4785 } 4786 4787 /* Special case: */ 4788 if (!strcmp(name, "tsc-freq")) { 4789 int ret; 4790 uint64_t tsc_freq; 4791 4792 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 4793 if (ret < 0 || tsc_freq > INT64_MAX) { 4794 error_setg(errp, "bad numerical value %s", val); 4795 return; 4796 } 4797 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 4798 val = num; 4799 name = "tsc-frequency"; 4800 } 4801 4802 prop = g_new0(typeof(*prop), 1); 4803 prop->driver = typename; 4804 prop->property = g_strdup(name); 4805 prop->value = g_strdup(val); 4806 qdev_prop_register_global(prop); 4807 } 4808 4809 if (ambiguous) { 4810 warn_report("Compatibility of ambiguous CPU model " 4811 "strings won't be kept on future QEMU versions"); 4812 } 4813 } 4814 4815 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 4816 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); 4817 4818 /* Build a list with the name of all features on a feature word array */ 4819 static void x86_cpu_list_feature_names(FeatureWordArray features, 4820 strList **feat_names) 4821 { 4822 FeatureWord w; 4823 strList **next = feat_names; 4824 4825 for (w = 0; w < FEATURE_WORDS; w++) { 4826 uint64_t filtered = features[w]; 4827 int i; 4828 for (i = 0; i < 64; i++) { 4829 if (filtered & (1ULL << i)) { 4830 strList *new = g_new0(strList, 1); 4831 new->value = g_strdup(x86_cpu_feature_name(w, i)); 4832 *next = new; 4833 next = &new->next; 4834 } 4835 } 4836 } 4837 } 4838 4839 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 4840 const char *name, void *opaque, 4841 Error **errp) 4842 { 4843 X86CPU *xc = X86_CPU(obj); 4844 strList *result = NULL; 4845 4846 x86_cpu_list_feature_names(xc->filtered_features, &result); 4847 visit_type_strList(v, "unavailable-features", &result, errp); 4848 } 4849 4850 /* Check for missing features that may prevent the CPU class from 4851 * running using the current machine and accelerator. 4852 */ 4853 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 4854 strList **missing_feats) 4855 { 4856 X86CPU *xc; 4857 Error *err = NULL; 4858 strList **next = missing_feats; 4859 4860 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 4861 strList *new = g_new0(strList, 1); 4862 new->value = g_strdup("kvm"); 4863 *missing_feats = new; 4864 return; 4865 } 4866 4867 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 4868 4869 x86_cpu_expand_features(xc, &err); 4870 if (err) { 4871 /* Errors at x86_cpu_expand_features should never happen, 4872 * but in case it does, just report the model as not 4873 * runnable at all using the "type" property. 4874 */ 4875 strList *new = g_new0(strList, 1); 4876 new->value = g_strdup("type"); 4877 *next = new; 4878 next = &new->next; 4879 error_free(err); 4880 } 4881 4882 x86_cpu_filter_features(xc, false); 4883 4884 x86_cpu_list_feature_names(xc->filtered_features, next); 4885 4886 object_unref(OBJECT(xc)); 4887 } 4888 4889 /* Print all cpuid feature names in featureset 4890 */ 4891 static void listflags(GList *features) 4892 { 4893 size_t len = 0; 4894 GList *tmp; 4895 4896 for (tmp = features; tmp; tmp = tmp->next) { 4897 const char *name = tmp->data; 4898 if ((len + strlen(name) + 1) >= 75) { 4899 qemu_printf("\n"); 4900 len = 0; 4901 } 4902 qemu_printf("%s%s", len == 0 ? " " : " ", name); 4903 len += strlen(name) + 1; 4904 } 4905 qemu_printf("\n"); 4906 } 4907 4908 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 4909 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 4910 { 4911 ObjectClass *class_a = (ObjectClass *)a; 4912 ObjectClass *class_b = (ObjectClass *)b; 4913 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 4914 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 4915 int ret; 4916 4917 if (cc_a->ordering != cc_b->ordering) { 4918 ret = cc_a->ordering - cc_b->ordering; 4919 } else { 4920 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a); 4921 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b); 4922 ret = strcmp(name_a, name_b); 4923 } 4924 return ret; 4925 } 4926 4927 static GSList *get_sorted_cpu_model_list(void) 4928 { 4929 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 4930 list = g_slist_sort(list, x86_cpu_list_compare); 4931 return list; 4932 } 4933 4934 static char *x86_cpu_class_get_model_id(X86CPUClass *xc) 4935 { 4936 Object *obj = object_new_with_class(OBJECT_CLASS(xc)); 4937 char *r = object_property_get_str(obj, "model-id", &error_abort); 4938 object_unref(obj); 4939 return r; 4940 } 4941 4942 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) 4943 { 4944 X86CPUVersion version; 4945 4946 if (!cc->model || !cc->model->is_alias) { 4947 return NULL; 4948 } 4949 version = x86_cpu_model_resolve_version(cc->model); 4950 if (version <= 0) { 4951 return NULL; 4952 } 4953 return x86_cpu_versioned_model_name(cc->model->cpudef, version); 4954 } 4955 4956 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 4957 { 4958 ObjectClass *oc = data; 4959 X86CPUClass *cc = X86_CPU_CLASS(oc); 4960 g_autofree char *name = x86_cpu_class_get_model_name(cc); 4961 g_autofree char *desc = g_strdup(cc->model_description); 4962 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc); 4963 g_autofree char *model_id = x86_cpu_class_get_model_id(cc); 4964 4965 if (!desc && alias_of) { 4966 if (cc->model && cc->model->version == CPU_VERSION_AUTO) { 4967 desc = g_strdup("(alias configured by machine type)"); 4968 } else { 4969 desc = g_strdup_printf("(alias of %s)", alias_of); 4970 } 4971 } 4972 if (!desc && cc->model && cc->model->note) { 4973 desc = g_strdup_printf("%s [%s]", model_id, cc->model->note); 4974 } 4975 if (!desc) { 4976 desc = g_strdup_printf("%s", model_id); 4977 } 4978 4979 qemu_printf("x86 %-20s %-58s\n", name, desc); 4980 } 4981 4982 /* list available CPU models and flags */ 4983 void x86_cpu_list(void) 4984 { 4985 int i, j; 4986 GSList *list; 4987 GList *names = NULL; 4988 4989 qemu_printf("Available CPUs:\n"); 4990 list = get_sorted_cpu_model_list(); 4991 g_slist_foreach(list, x86_cpu_list_entry, NULL); 4992 g_slist_free(list); 4993 4994 names = NULL; 4995 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 4996 FeatureWordInfo *fw = &feature_word_info[i]; 4997 for (j = 0; j < 64; j++) { 4998 if (fw->feat_names[j]) { 4999 names = g_list_append(names, (gpointer)fw->feat_names[j]); 5000 } 5001 } 5002 } 5003 5004 names = g_list_sort(names, (GCompareFunc)strcmp); 5005 5006 qemu_printf("\nRecognized CPUID flags:\n"); 5007 listflags(names); 5008 qemu_printf("\n"); 5009 g_list_free(names); 5010 } 5011 5012 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 5013 { 5014 ObjectClass *oc = data; 5015 X86CPUClass *cc = X86_CPU_CLASS(oc); 5016 CpuDefinitionInfoList **cpu_list = user_data; 5017 CpuDefinitionInfo *info; 5018 5019 info = g_malloc0(sizeof(*info)); 5020 info->name = x86_cpu_class_get_model_name(cc); 5021 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 5022 info->has_unavailable_features = true; 5023 info->q_typename = g_strdup(object_class_get_name(oc)); 5024 info->migration_safe = cc->migration_safe; 5025 info->has_migration_safe = true; 5026 info->q_static = cc->static_model; 5027 if (cc->model && cc->model->cpudef->deprecation_note) { 5028 info->deprecated = true; 5029 } else { 5030 info->deprecated = false; 5031 } 5032 /* 5033 * Old machine types won't report aliases, so that alias translation 5034 * doesn't break compatibility with previous QEMU versions. 5035 */ 5036 if (default_cpu_version != CPU_VERSION_LEGACY) { 5037 info->alias_of = x86_cpu_class_get_alias_of(cc); 5038 info->has_alias_of = !!info->alias_of; 5039 } 5040 5041 QAPI_LIST_PREPEND(*cpu_list, info); 5042 } 5043 5044 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 5045 { 5046 CpuDefinitionInfoList *cpu_list = NULL; 5047 GSList *list = get_sorted_cpu_model_list(); 5048 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 5049 g_slist_free(list); 5050 return cpu_list; 5051 } 5052 5053 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 5054 bool migratable_only) 5055 { 5056 FeatureWordInfo *wi = &feature_word_info[w]; 5057 uint64_t r = 0; 5058 5059 if (kvm_enabled()) { 5060 switch (wi->type) { 5061 case CPUID_FEATURE_WORD: 5062 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 5063 wi->cpuid.ecx, 5064 wi->cpuid.reg); 5065 break; 5066 case MSR_FEATURE_WORD: 5067 r = kvm_arch_get_supported_msr_feature(kvm_state, 5068 wi->msr.index); 5069 break; 5070 } 5071 } else if (hvf_enabled()) { 5072 if (wi->type != CPUID_FEATURE_WORD) { 5073 return 0; 5074 } 5075 r = hvf_get_supported_cpuid(wi->cpuid.eax, 5076 wi->cpuid.ecx, 5077 wi->cpuid.reg); 5078 } else if (tcg_enabled()) { 5079 r = wi->tcg_features; 5080 } else { 5081 return ~0; 5082 } 5083 if (migratable_only) { 5084 r &= x86_cpu_get_migratable_flags(w); 5085 } 5086 return r; 5087 } 5088 5089 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 5090 { 5091 PropValue *pv; 5092 for (pv = props; pv->prop; pv++) { 5093 if (!pv->value) { 5094 continue; 5095 } 5096 object_property_parse(OBJECT(cpu), pv->prop, pv->value, 5097 &error_abort); 5098 } 5099 } 5100 5101 /* Apply properties for the CPU model version specified in model */ 5102 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) 5103 { 5104 const X86CPUVersionDefinition *vdef; 5105 X86CPUVersion version = x86_cpu_model_resolve_version(model); 5106 5107 if (version == CPU_VERSION_LEGACY) { 5108 return; 5109 } 5110 5111 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { 5112 PropValue *p; 5113 5114 for (p = vdef->props; p && p->prop; p++) { 5115 object_property_parse(OBJECT(cpu), p->prop, p->value, 5116 &error_abort); 5117 } 5118 5119 if (vdef->version == version) { 5120 break; 5121 } 5122 } 5123 5124 /* 5125 * If we reached the end of the list, version number was invalid 5126 */ 5127 assert(vdef->version == version); 5128 } 5129 5130 /* Load data from X86CPUDefinition into a X86CPU object 5131 */ 5132 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) 5133 { 5134 X86CPUDefinition *def = model->cpudef; 5135 CPUX86State *env = &cpu->env; 5136 const char *vendor; 5137 char host_vendor[CPUID_VENDOR_SZ + 1]; 5138 FeatureWord w; 5139 5140 /*NOTE: any property set by this function should be returned by 5141 * x86_cpu_static_props(), so static expansion of 5142 * query-cpu-model-expansion is always complete. 5143 */ 5144 5145 /* CPU models only set _minimum_ values for level/xlevel: */ 5146 object_property_set_uint(OBJECT(cpu), "min-level", def->level, 5147 &error_abort); 5148 object_property_set_uint(OBJECT(cpu), "min-xlevel", def->xlevel, 5149 &error_abort); 5150 5151 object_property_set_int(OBJECT(cpu), "family", def->family, &error_abort); 5152 object_property_set_int(OBJECT(cpu), "model", def->model, &error_abort); 5153 object_property_set_int(OBJECT(cpu), "stepping", def->stepping, 5154 &error_abort); 5155 object_property_set_str(OBJECT(cpu), "model-id", def->model_id, 5156 &error_abort); 5157 for (w = 0; w < FEATURE_WORDS; w++) { 5158 env->features[w] = def->features[w]; 5159 } 5160 5161 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 5162 cpu->legacy_cache = !def->cache_info; 5163 5164 /* Special cases not set in the X86CPUDefinition structs: */ 5165 /* TODO: in-kernel irqchip for hvf */ 5166 if (kvm_enabled()) { 5167 if (!kvm_irqchip_in_kernel()) { 5168 x86_cpu_change_kvm_default("x2apic", "off"); 5169 } else if (kvm_irqchip_is_split() && kvm_enable_x2apic()) { 5170 x86_cpu_change_kvm_default("kvm-msi-ext-dest-id", "on"); 5171 } 5172 5173 x86_cpu_apply_props(cpu, kvm_default_props); 5174 } else if (tcg_enabled()) { 5175 x86_cpu_apply_props(cpu, tcg_default_props); 5176 } 5177 5178 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 5179 5180 /* sysenter isn't supported in compatibility mode on AMD, 5181 * syscall isn't supported in compatibility mode on Intel. 5182 * Normally we advertise the actual CPU vendor, but you can 5183 * override this using the 'vendor' property if you want to use 5184 * KVM's sysenter/syscall emulation in compatibility mode and 5185 * when doing cross vendor migration 5186 */ 5187 vendor = def->vendor; 5188 if (accel_uses_host_cpuid()) { 5189 uint32_t ebx = 0, ecx = 0, edx = 0; 5190 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 5191 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 5192 vendor = host_vendor; 5193 } 5194 5195 object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); 5196 5197 x86_cpu_apply_version_props(cpu, model); 5198 5199 /* 5200 * Properties in versioned CPU model are not user specified features. 5201 * We can simply clear env->user_features here since it will be filled later 5202 * in x86_cpu_expand_features() based on plus_features and minus_features. 5203 */ 5204 memset(&env->user_features, 0, sizeof(env->user_features)); 5205 } 5206 5207 #ifndef CONFIG_USER_ONLY 5208 /* Return a QDict containing keys for all properties that can be included 5209 * in static expansion of CPU models. All properties set by x86_cpu_load_model() 5210 * must be included in the dictionary. 5211 */ 5212 static QDict *x86_cpu_static_props(void) 5213 { 5214 FeatureWord w; 5215 int i; 5216 static const char *props[] = { 5217 "min-level", 5218 "min-xlevel", 5219 "family", 5220 "model", 5221 "stepping", 5222 "model-id", 5223 "vendor", 5224 "lmce", 5225 NULL, 5226 }; 5227 static QDict *d; 5228 5229 if (d) { 5230 return d; 5231 } 5232 5233 d = qdict_new(); 5234 for (i = 0; props[i]; i++) { 5235 qdict_put_null(d, props[i]); 5236 } 5237 5238 for (w = 0; w < FEATURE_WORDS; w++) { 5239 FeatureWordInfo *fi = &feature_word_info[w]; 5240 int bit; 5241 for (bit = 0; bit < 64; bit++) { 5242 if (!fi->feat_names[bit]) { 5243 continue; 5244 } 5245 qdict_put_null(d, fi->feat_names[bit]); 5246 } 5247 } 5248 5249 return d; 5250 } 5251 5252 /* Add an entry to @props dict, with the value for property. */ 5253 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 5254 { 5255 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 5256 &error_abort); 5257 5258 qdict_put_obj(props, prop, value); 5259 } 5260 5261 /* Convert CPU model data from X86CPU object to a property dictionary 5262 * that can recreate exactly the same CPU model. 5263 */ 5264 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 5265 { 5266 QDict *sprops = x86_cpu_static_props(); 5267 const QDictEntry *e; 5268 5269 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 5270 const char *prop = qdict_entry_key(e); 5271 x86_cpu_expand_prop(cpu, props, prop); 5272 } 5273 } 5274 5275 /* Convert CPU model data from X86CPU object to a property dictionary 5276 * that can recreate exactly the same CPU model, including every 5277 * writeable QOM property. 5278 */ 5279 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 5280 { 5281 ObjectPropertyIterator iter; 5282 ObjectProperty *prop; 5283 5284 object_property_iter_init(&iter, OBJECT(cpu)); 5285 while ((prop = object_property_iter_next(&iter))) { 5286 /* skip read-only or write-only properties */ 5287 if (!prop->get || !prop->set) { 5288 continue; 5289 } 5290 5291 /* "hotplugged" is the only property that is configurable 5292 * on the command-line but will be set differently on CPUs 5293 * created using "-cpu ... -smp ..." and by CPUs created 5294 * on the fly by x86_cpu_from_model() for querying. Skip it. 5295 */ 5296 if (!strcmp(prop->name, "hotplugged")) { 5297 continue; 5298 } 5299 x86_cpu_expand_prop(cpu, props, prop->name); 5300 } 5301 } 5302 5303 static void object_apply_props(Object *obj, QDict *props, Error **errp) 5304 { 5305 const QDictEntry *prop; 5306 5307 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 5308 if (!object_property_set_qobject(obj, qdict_entry_key(prop), 5309 qdict_entry_value(prop), errp)) { 5310 break; 5311 } 5312 } 5313 } 5314 5315 /* Create X86CPU object according to model+props specification */ 5316 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 5317 { 5318 X86CPU *xc = NULL; 5319 X86CPUClass *xcc; 5320 Error *err = NULL; 5321 5322 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 5323 if (xcc == NULL) { 5324 error_setg(&err, "CPU model '%s' not found", model); 5325 goto out; 5326 } 5327 5328 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 5329 if (props) { 5330 object_apply_props(OBJECT(xc), props, &err); 5331 if (err) { 5332 goto out; 5333 } 5334 } 5335 5336 x86_cpu_expand_features(xc, &err); 5337 if (err) { 5338 goto out; 5339 } 5340 5341 out: 5342 if (err) { 5343 error_propagate(errp, err); 5344 object_unref(OBJECT(xc)); 5345 xc = NULL; 5346 } 5347 return xc; 5348 } 5349 5350 CpuModelExpansionInfo * 5351 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 5352 CpuModelInfo *model, 5353 Error **errp) 5354 { 5355 X86CPU *xc = NULL; 5356 Error *err = NULL; 5357 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 5358 QDict *props = NULL; 5359 const char *base_name; 5360 5361 xc = x86_cpu_from_model(model->name, 5362 model->has_props ? 5363 qobject_to(QDict, model->props) : 5364 NULL, &err); 5365 if (err) { 5366 goto out; 5367 } 5368 5369 props = qdict_new(); 5370 ret->model = g_new0(CpuModelInfo, 1); 5371 ret->model->props = QOBJECT(props); 5372 ret->model->has_props = true; 5373 5374 switch (type) { 5375 case CPU_MODEL_EXPANSION_TYPE_STATIC: 5376 /* Static expansion will be based on "base" only */ 5377 base_name = "base"; 5378 x86_cpu_to_dict(xc, props); 5379 break; 5380 case CPU_MODEL_EXPANSION_TYPE_FULL: 5381 /* As we don't return every single property, full expansion needs 5382 * to keep the original model name+props, and add extra 5383 * properties on top of that. 5384 */ 5385 base_name = model->name; 5386 x86_cpu_to_dict_full(xc, props); 5387 break; 5388 default: 5389 error_setg(&err, "Unsupported expansion type"); 5390 goto out; 5391 } 5392 5393 x86_cpu_to_dict(xc, props); 5394 5395 ret->model->name = g_strdup(base_name); 5396 5397 out: 5398 object_unref(OBJECT(xc)); 5399 if (err) { 5400 error_propagate(errp, err); 5401 qapi_free_CpuModelExpansionInfo(ret); 5402 ret = NULL; 5403 } 5404 return ret; 5405 } 5406 #endif /* !CONFIG_USER_ONLY */ 5407 5408 static gchar *x86_gdb_arch_name(CPUState *cs) 5409 { 5410 #ifdef TARGET_X86_64 5411 return g_strdup("i386:x86-64"); 5412 #else 5413 return g_strdup("i386"); 5414 #endif 5415 } 5416 5417 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 5418 { 5419 X86CPUModel *model = data; 5420 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5421 CPUClass *cc = CPU_CLASS(oc); 5422 5423 xcc->model = model; 5424 xcc->migration_safe = true; 5425 cc->deprecation_note = model->cpudef->deprecation_note; 5426 } 5427 5428 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) 5429 { 5430 g_autofree char *typename = x86_cpu_type_name(name); 5431 TypeInfo ti = { 5432 .name = typename, 5433 .parent = TYPE_X86_CPU, 5434 .class_init = x86_cpu_cpudef_class_init, 5435 .class_data = model, 5436 }; 5437 5438 type_register(&ti); 5439 } 5440 5441 static void x86_register_cpudef_types(X86CPUDefinition *def) 5442 { 5443 X86CPUModel *m; 5444 const X86CPUVersionDefinition *vdef; 5445 5446 /* AMD aliases are handled at runtime based on CPUID vendor, so 5447 * they shouldn't be set on the CPU model table. 5448 */ 5449 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 5450 /* catch mistakes instead of silently truncating model_id when too long */ 5451 assert(def->model_id && strlen(def->model_id) <= 48); 5452 5453 /* Unversioned model: */ 5454 m = g_new0(X86CPUModel, 1); 5455 m->cpudef = def; 5456 m->version = CPU_VERSION_AUTO; 5457 m->is_alias = true; 5458 x86_register_cpu_model_type(def->name, m); 5459 5460 /* Versioned models: */ 5461 5462 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { 5463 X86CPUModel *m = g_new0(X86CPUModel, 1); 5464 g_autofree char *name = 5465 x86_cpu_versioned_model_name(def, vdef->version); 5466 m->cpudef = def; 5467 m->version = vdef->version; 5468 m->note = vdef->note; 5469 x86_register_cpu_model_type(name, m); 5470 5471 if (vdef->alias) { 5472 X86CPUModel *am = g_new0(X86CPUModel, 1); 5473 am->cpudef = def; 5474 am->version = vdef->version; 5475 am->is_alias = true; 5476 x86_register_cpu_model_type(vdef->alias, am); 5477 } 5478 } 5479 5480 } 5481 5482 #if !defined(CONFIG_USER_ONLY) 5483 5484 void cpu_clear_apic_feature(CPUX86State *env) 5485 { 5486 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 5487 } 5488 5489 #endif /* !CONFIG_USER_ONLY */ 5490 5491 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 5492 uint32_t *eax, uint32_t *ebx, 5493 uint32_t *ecx, uint32_t *edx) 5494 { 5495 X86CPU *cpu = env_archcpu(env); 5496 CPUState *cs = env_cpu(env); 5497 uint32_t die_offset; 5498 uint32_t limit; 5499 uint32_t signature[3]; 5500 X86CPUTopoInfo topo_info; 5501 5502 topo_info.dies_per_pkg = env->nr_dies; 5503 topo_info.cores_per_die = cs->nr_cores; 5504 topo_info.threads_per_core = cs->nr_threads; 5505 5506 /* Calculate & apply limits for different index ranges */ 5507 if (index >= 0xC0000000) { 5508 limit = env->cpuid_xlevel2; 5509 } else if (index >= 0x80000000) { 5510 limit = env->cpuid_xlevel; 5511 } else if (index >= 0x40000000) { 5512 limit = 0x40000001; 5513 } else { 5514 limit = env->cpuid_level; 5515 } 5516 5517 if (index > limit) { 5518 /* Intel documentation states that invalid EAX input will 5519 * return the same information as EAX=cpuid_level 5520 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 5521 */ 5522 index = env->cpuid_level; 5523 } 5524 5525 switch(index) { 5526 case 0: 5527 *eax = env->cpuid_level; 5528 *ebx = env->cpuid_vendor1; 5529 *edx = env->cpuid_vendor2; 5530 *ecx = env->cpuid_vendor3; 5531 break; 5532 case 1: 5533 *eax = env->cpuid_version; 5534 *ebx = (cpu->apic_id << 24) | 5535 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 5536 *ecx = env->features[FEAT_1_ECX]; 5537 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 5538 *ecx |= CPUID_EXT_OSXSAVE; 5539 } 5540 *edx = env->features[FEAT_1_EDX]; 5541 if (cs->nr_cores * cs->nr_threads > 1) { 5542 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 5543 *edx |= CPUID_HT; 5544 } 5545 if (!cpu->enable_pmu) { 5546 *ecx &= ~CPUID_EXT_PDCM; 5547 } 5548 break; 5549 case 2: 5550 /* cache info: needed for Pentium Pro compatibility */ 5551 if (cpu->cache_info_passthrough) { 5552 host_cpuid(index, 0, eax, ebx, ecx, edx); 5553 break; 5554 } 5555 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 5556 *ebx = 0; 5557 if (!cpu->enable_l3_cache) { 5558 *ecx = 0; 5559 } else { 5560 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 5561 } 5562 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 5563 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 5564 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 5565 break; 5566 case 4: 5567 /* cache info: needed for Core compatibility */ 5568 if (cpu->cache_info_passthrough) { 5569 host_cpuid(index, count, eax, ebx, ecx, edx); 5570 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 5571 *eax &= ~0xFC000000; 5572 if ((*eax & 31) && cs->nr_cores > 1) { 5573 *eax |= (cs->nr_cores - 1) << 26; 5574 } 5575 } else { 5576 *eax = 0; 5577 switch (count) { 5578 case 0: /* L1 dcache info */ 5579 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 5580 1, cs->nr_cores, 5581 eax, ebx, ecx, edx); 5582 break; 5583 case 1: /* L1 icache info */ 5584 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 5585 1, cs->nr_cores, 5586 eax, ebx, ecx, edx); 5587 break; 5588 case 2: /* L2 cache info */ 5589 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 5590 cs->nr_threads, cs->nr_cores, 5591 eax, ebx, ecx, edx); 5592 break; 5593 case 3: /* L3 cache info */ 5594 die_offset = apicid_die_offset(&topo_info); 5595 if (cpu->enable_l3_cache) { 5596 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 5597 (1 << die_offset), cs->nr_cores, 5598 eax, ebx, ecx, edx); 5599 break; 5600 } 5601 /* fall through */ 5602 default: /* end of info */ 5603 *eax = *ebx = *ecx = *edx = 0; 5604 break; 5605 } 5606 } 5607 break; 5608 case 5: 5609 /* MONITOR/MWAIT Leaf */ 5610 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 5611 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 5612 *ecx = cpu->mwait.ecx; /* flags */ 5613 *edx = cpu->mwait.edx; /* mwait substates */ 5614 break; 5615 case 6: 5616 /* Thermal and Power Leaf */ 5617 *eax = env->features[FEAT_6_EAX]; 5618 *ebx = 0; 5619 *ecx = 0; 5620 *edx = 0; 5621 break; 5622 case 7: 5623 /* Structured Extended Feature Flags Enumeration Leaf */ 5624 if (count == 0) { 5625 /* Maximum ECX value for sub-leaves */ 5626 *eax = env->cpuid_level_func7; 5627 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 5628 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 5629 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 5630 *ecx |= CPUID_7_0_ECX_OSPKE; 5631 } 5632 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 5633 } else if (count == 1) { 5634 *eax = env->features[FEAT_7_1_EAX]; 5635 *ebx = 0; 5636 *ecx = 0; 5637 *edx = 0; 5638 } else { 5639 *eax = 0; 5640 *ebx = 0; 5641 *ecx = 0; 5642 *edx = 0; 5643 } 5644 break; 5645 case 9: 5646 /* Direct Cache Access Information Leaf */ 5647 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 5648 *ebx = 0; 5649 *ecx = 0; 5650 *edx = 0; 5651 break; 5652 case 0xA: 5653 /* Architectural Performance Monitoring Leaf */ 5654 if (kvm_enabled() && cpu->enable_pmu) { 5655 KVMState *s = cs->kvm_state; 5656 5657 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 5658 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 5659 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 5660 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 5661 } else if (hvf_enabled() && cpu->enable_pmu) { 5662 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 5663 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 5664 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 5665 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 5666 } else { 5667 *eax = 0; 5668 *ebx = 0; 5669 *ecx = 0; 5670 *edx = 0; 5671 } 5672 break; 5673 case 0xB: 5674 /* Extended Topology Enumeration Leaf */ 5675 if (!cpu->enable_cpuid_0xb) { 5676 *eax = *ebx = *ecx = *edx = 0; 5677 break; 5678 } 5679 5680 *ecx = count & 0xff; 5681 *edx = cpu->apic_id; 5682 5683 switch (count) { 5684 case 0: 5685 *eax = apicid_core_offset(&topo_info); 5686 *ebx = cs->nr_threads; 5687 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5688 break; 5689 case 1: 5690 *eax = apicid_pkg_offset(&topo_info); 5691 *ebx = cs->nr_cores * cs->nr_threads; 5692 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5693 break; 5694 default: 5695 *eax = 0; 5696 *ebx = 0; 5697 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5698 } 5699 5700 assert(!(*eax & ~0x1f)); 5701 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5702 break; 5703 case 0x1F: 5704 /* V2 Extended Topology Enumeration Leaf */ 5705 if (env->nr_dies < 2) { 5706 *eax = *ebx = *ecx = *edx = 0; 5707 break; 5708 } 5709 5710 *ecx = count & 0xff; 5711 *edx = cpu->apic_id; 5712 switch (count) { 5713 case 0: 5714 *eax = apicid_core_offset(&topo_info); 5715 *ebx = cs->nr_threads; 5716 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5717 break; 5718 case 1: 5719 *eax = apicid_die_offset(&topo_info); 5720 *ebx = cs->nr_cores * cs->nr_threads; 5721 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5722 break; 5723 case 2: 5724 *eax = apicid_pkg_offset(&topo_info); 5725 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; 5726 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; 5727 break; 5728 default: 5729 *eax = 0; 5730 *ebx = 0; 5731 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5732 } 5733 assert(!(*eax & ~0x1f)); 5734 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5735 break; 5736 case 0xD: { 5737 /* Processor Extended State */ 5738 *eax = 0; 5739 *ebx = 0; 5740 *ecx = 0; 5741 *edx = 0; 5742 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 5743 break; 5744 } 5745 5746 if (count == 0) { 5747 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 5748 *eax = env->features[FEAT_XSAVE_COMP_LO]; 5749 *edx = env->features[FEAT_XSAVE_COMP_HI]; 5750 /* 5751 * The initial value of xcr0 and ebx == 0, On host without kvm 5752 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 5753 * even through guest update xcr0, this will crash some legacy guest 5754 * (e.g., CentOS 6), So set ebx == ecx to workaroud it. 5755 */ 5756 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0); 5757 } else if (count == 1) { 5758 *eax = env->features[FEAT_XSAVE]; 5759 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 5760 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 5761 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 5762 *eax = esa->size; 5763 *ebx = esa->offset; 5764 } 5765 } 5766 break; 5767 } 5768 case 0x14: { 5769 /* Intel Processor Trace Enumeration */ 5770 *eax = 0; 5771 *ebx = 0; 5772 *ecx = 0; 5773 *edx = 0; 5774 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 5775 !kvm_enabled()) { 5776 break; 5777 } 5778 5779 if (count == 0) { 5780 *eax = INTEL_PT_MAX_SUBLEAF; 5781 *ebx = INTEL_PT_MINIMAL_EBX; 5782 *ecx = INTEL_PT_MINIMAL_ECX; 5783 if (env->features[FEAT_14_0_ECX] & CPUID_14_0_ECX_LIP) { 5784 *ecx |= CPUID_14_0_ECX_LIP; 5785 } 5786 } else if (count == 1) { 5787 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 5788 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 5789 } 5790 break; 5791 } 5792 case 0x40000000: 5793 /* 5794 * CPUID code in kvm_arch_init_vcpu() ignores stuff 5795 * set here, but we restrict to TCG none the less. 5796 */ 5797 if (tcg_enabled() && cpu->expose_tcg) { 5798 memcpy(signature, "TCGTCGTCGTCG", 12); 5799 *eax = 0x40000001; 5800 *ebx = signature[0]; 5801 *ecx = signature[1]; 5802 *edx = signature[2]; 5803 } else { 5804 *eax = 0; 5805 *ebx = 0; 5806 *ecx = 0; 5807 *edx = 0; 5808 } 5809 break; 5810 case 0x40000001: 5811 *eax = 0; 5812 *ebx = 0; 5813 *ecx = 0; 5814 *edx = 0; 5815 break; 5816 case 0x80000000: 5817 *eax = env->cpuid_xlevel; 5818 *ebx = env->cpuid_vendor1; 5819 *edx = env->cpuid_vendor2; 5820 *ecx = env->cpuid_vendor3; 5821 break; 5822 case 0x80000001: 5823 *eax = env->cpuid_version; 5824 *ebx = 0; 5825 *ecx = env->features[FEAT_8000_0001_ECX]; 5826 *edx = env->features[FEAT_8000_0001_EDX]; 5827 5828 /* The Linux kernel checks for the CMPLegacy bit and 5829 * discards multiple thread information if it is set. 5830 * So don't set it here for Intel to make Linux guests happy. 5831 */ 5832 if (cs->nr_cores * cs->nr_threads > 1) { 5833 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 5834 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 5835 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 5836 *ecx |= 1 << 1; /* CmpLegacy bit */ 5837 } 5838 } 5839 break; 5840 case 0x80000002: 5841 case 0x80000003: 5842 case 0x80000004: 5843 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 5844 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 5845 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 5846 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 5847 break; 5848 case 0x80000005: 5849 /* cache info (L1 cache) */ 5850 if (cpu->cache_info_passthrough) { 5851 host_cpuid(index, 0, eax, ebx, ecx, edx); 5852 break; 5853 } 5854 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | 5855 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 5856 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | 5857 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 5858 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 5859 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 5860 break; 5861 case 0x80000006: 5862 /* cache info (L2 cache) */ 5863 if (cpu->cache_info_passthrough) { 5864 host_cpuid(index, 0, eax, ebx, ecx, edx); 5865 break; 5866 } 5867 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | 5868 (L2_DTLB_2M_ENTRIES << 16) | 5869 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | 5870 (L2_ITLB_2M_ENTRIES); 5871 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | 5872 (L2_DTLB_4K_ENTRIES << 16) | 5873 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | 5874 (L2_ITLB_4K_ENTRIES); 5875 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 5876 cpu->enable_l3_cache ? 5877 env->cache_info_amd.l3_cache : NULL, 5878 ecx, edx); 5879 break; 5880 case 0x80000007: 5881 *eax = 0; 5882 *ebx = 0; 5883 *ecx = 0; 5884 *edx = env->features[FEAT_8000_0007_EDX]; 5885 break; 5886 case 0x80000008: 5887 /* virtual & phys address size in low 2 bytes. */ 5888 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5889 /* 64 bit processor */ 5890 *eax = cpu->phys_bits; /* configurable physical bits */ 5891 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 5892 *eax |= 0x00003900; /* 57 bits virtual */ 5893 } else { 5894 *eax |= 0x00003000; /* 48 bits virtual */ 5895 } 5896 } else { 5897 *eax = cpu->phys_bits; 5898 } 5899 *ebx = env->features[FEAT_8000_0008_EBX]; 5900 if (cs->nr_cores * cs->nr_threads > 1) { 5901 /* 5902 * Bits 15:12 is "The number of bits in the initial 5903 * Core::X86::Apic::ApicId[ApicId] value that indicate 5904 * thread ID within a package". 5905 * Bits 7:0 is "The number of threads in the package is NC+1" 5906 */ 5907 *ecx = (apicid_pkg_offset(&topo_info) << 12) | 5908 ((cs->nr_cores * cs->nr_threads) - 1); 5909 } else { 5910 *ecx = 0; 5911 } 5912 *edx = 0; 5913 break; 5914 case 0x8000000A: 5915 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5916 *eax = 0x00000001; /* SVM Revision */ 5917 *ebx = 0x00000010; /* nr of ASIDs */ 5918 *ecx = 0; 5919 *edx = env->features[FEAT_SVM]; /* optional features */ 5920 } else { 5921 *eax = 0; 5922 *ebx = 0; 5923 *ecx = 0; 5924 *edx = 0; 5925 } 5926 break; 5927 case 0x8000001D: 5928 *eax = 0; 5929 if (cpu->cache_info_passthrough) { 5930 host_cpuid(index, count, eax, ebx, ecx, edx); 5931 break; 5932 } 5933 switch (count) { 5934 case 0: /* L1 dcache info */ 5935 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, 5936 &topo_info, eax, ebx, ecx, edx); 5937 break; 5938 case 1: /* L1 icache info */ 5939 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, 5940 &topo_info, eax, ebx, ecx, edx); 5941 break; 5942 case 2: /* L2 cache info */ 5943 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, 5944 &topo_info, eax, ebx, ecx, edx); 5945 break; 5946 case 3: /* L3 cache info */ 5947 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, 5948 &topo_info, eax, ebx, ecx, edx); 5949 break; 5950 default: /* end of info */ 5951 *eax = *ebx = *ecx = *edx = 0; 5952 break; 5953 } 5954 break; 5955 case 0x8000001E: 5956 if (cpu->core_id <= 255) { 5957 encode_topo_cpuid8000001e(cpu, &topo_info, eax, ebx, ecx, edx); 5958 } else { 5959 *eax = 0; 5960 *ebx = 0; 5961 *ecx = 0; 5962 *edx = 0; 5963 } 5964 break; 5965 case 0xC0000000: 5966 *eax = env->cpuid_xlevel2; 5967 *ebx = 0; 5968 *ecx = 0; 5969 *edx = 0; 5970 break; 5971 case 0xC0000001: 5972 /* Support for VIA CPU's CPUID instruction */ 5973 *eax = env->cpuid_version; 5974 *ebx = 0; 5975 *ecx = 0; 5976 *edx = env->features[FEAT_C000_0001_EDX]; 5977 break; 5978 case 0xC0000002: 5979 case 0xC0000003: 5980 case 0xC0000004: 5981 /* Reserved for the future, and now filled with zero */ 5982 *eax = 0; 5983 *ebx = 0; 5984 *ecx = 0; 5985 *edx = 0; 5986 break; 5987 case 0x8000001F: 5988 *eax = sev_enabled() ? 0x2 : 0; 5989 *ebx = sev_get_cbit_position(); 5990 *ebx |= sev_get_reduced_phys_bits() << 6; 5991 *ecx = 0; 5992 *edx = 0; 5993 break; 5994 default: 5995 /* reserved values: zero */ 5996 *eax = 0; 5997 *ebx = 0; 5998 *ecx = 0; 5999 *edx = 0; 6000 break; 6001 } 6002 } 6003 6004 static void x86_cpu_reset(DeviceState *dev) 6005 { 6006 CPUState *s = CPU(dev); 6007 X86CPU *cpu = X86_CPU(s); 6008 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 6009 CPUX86State *env = &cpu->env; 6010 target_ulong cr4; 6011 uint64_t xcr0; 6012 int i; 6013 6014 xcc->parent_reset(dev); 6015 6016 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 6017 6018 env->old_exception = -1; 6019 6020 /* init to reset state */ 6021 6022 env->hflags2 |= HF2_GIF_MASK; 6023 env->hflags &= ~HF_GUEST_MASK; 6024 6025 cpu_x86_update_cr0(env, 0x60000010); 6026 env->a20_mask = ~0x0; 6027 env->smbase = 0x30000; 6028 env->msr_smi_count = 0; 6029 6030 env->idt.limit = 0xffff; 6031 env->gdt.limit = 0xffff; 6032 env->ldt.limit = 0xffff; 6033 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 6034 env->tr.limit = 0xffff; 6035 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 6036 6037 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 6038 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 6039 DESC_R_MASK | DESC_A_MASK); 6040 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 6041 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6042 DESC_A_MASK); 6043 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 6044 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6045 DESC_A_MASK); 6046 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 6047 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6048 DESC_A_MASK); 6049 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 6050 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6051 DESC_A_MASK); 6052 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 6053 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6054 DESC_A_MASK); 6055 6056 env->eip = 0xfff0; 6057 env->regs[R_EDX] = env->cpuid_version; 6058 6059 env->eflags = 0x2; 6060 6061 /* FPU init */ 6062 for (i = 0; i < 8; i++) { 6063 env->fptags[i] = 1; 6064 } 6065 cpu_set_fpuc(env, 0x37f); 6066 6067 env->mxcsr = 0x1f80; 6068 /* All units are in INIT state. */ 6069 env->xstate_bv = 0; 6070 6071 env->pat = 0x0007040600070406ULL; 6072 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 6073 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 6074 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 6075 } 6076 6077 memset(env->dr, 0, sizeof(env->dr)); 6078 env->dr[6] = DR6_FIXED_1; 6079 env->dr[7] = DR7_FIXED_1; 6080 cpu_breakpoint_remove_all(s, BP_CPU); 6081 cpu_watchpoint_remove_all(s, BP_CPU); 6082 6083 cr4 = 0; 6084 xcr0 = XSTATE_FP_MASK; 6085 6086 #ifdef CONFIG_USER_ONLY 6087 /* Enable all the features for user-mode. */ 6088 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 6089 xcr0 |= XSTATE_SSE_MASK; 6090 } 6091 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6092 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6093 if (env->features[esa->feature] & esa->bits) { 6094 xcr0 |= 1ull << i; 6095 } 6096 } 6097 6098 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 6099 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 6100 } 6101 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 6102 cr4 |= CR4_FSGSBASE_MASK; 6103 } 6104 #endif 6105 6106 env->xcr0 = xcr0; 6107 cpu_x86_update_cr4(env, cr4); 6108 6109 /* 6110 * SDM 11.11.5 requires: 6111 * - IA32_MTRR_DEF_TYPE MSR.E = 0 6112 * - IA32_MTRR_PHYSMASKn.V = 0 6113 * All other bits are undefined. For simplification, zero it all. 6114 */ 6115 env->mtrr_deftype = 0; 6116 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 6117 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 6118 6119 env->interrupt_injected = -1; 6120 env->exception_nr = -1; 6121 env->exception_pending = 0; 6122 env->exception_injected = 0; 6123 env->exception_has_payload = false; 6124 env->exception_payload = 0; 6125 env->nmi_injected = false; 6126 #if !defined(CONFIG_USER_ONLY) 6127 /* We hard-wire the BSP to the first CPU. */ 6128 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 6129 6130 s->halted = !cpu_is_bsp(cpu); 6131 6132 if (kvm_enabled()) { 6133 kvm_arch_reset_vcpu(cpu); 6134 } 6135 #endif 6136 } 6137 6138 #ifndef CONFIG_USER_ONLY 6139 bool cpu_is_bsp(X86CPU *cpu) 6140 { 6141 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 6142 } 6143 6144 /* TODO: remove me, when reset over QOM tree is implemented */ 6145 static void x86_cpu_machine_reset_cb(void *opaque) 6146 { 6147 X86CPU *cpu = opaque; 6148 cpu_reset(CPU(cpu)); 6149 } 6150 #endif 6151 6152 static void mce_init(X86CPU *cpu) 6153 { 6154 CPUX86State *cenv = &cpu->env; 6155 unsigned int bank; 6156 6157 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 6158 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 6159 (CPUID_MCE | CPUID_MCA)) { 6160 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 6161 (cpu->enable_lmce ? MCG_LMCE_P : 0); 6162 cenv->mcg_ctl = ~(uint64_t)0; 6163 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 6164 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 6165 } 6166 } 6167 } 6168 6169 #ifndef CONFIG_USER_ONLY 6170 APICCommonClass *apic_get_class(void) 6171 { 6172 const char *apic_type = "apic"; 6173 6174 /* TODO: in-kernel irqchip for hvf */ 6175 if (kvm_apic_in_kernel()) { 6176 apic_type = "kvm-apic"; 6177 } else if (xen_enabled()) { 6178 apic_type = "xen-apic"; 6179 } else if (whpx_apic_in_platform()) { 6180 apic_type = "whpx-apic"; 6181 } 6182 6183 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 6184 } 6185 6186 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 6187 { 6188 APICCommonState *apic; 6189 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 6190 6191 cpu->apic_state = DEVICE(object_new_with_class(apic_class)); 6192 6193 object_property_add_child(OBJECT(cpu), "lapic", 6194 OBJECT(cpu->apic_state)); 6195 object_unref(OBJECT(cpu->apic_state)); 6196 6197 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 6198 /* TODO: convert to link<> */ 6199 apic = APIC_COMMON(cpu->apic_state); 6200 apic->cpu = cpu; 6201 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 6202 } 6203 6204 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6205 { 6206 APICCommonState *apic; 6207 static bool apic_mmio_map_once; 6208 6209 if (cpu->apic_state == NULL) { 6210 return; 6211 } 6212 qdev_realize(DEVICE(cpu->apic_state), NULL, errp); 6213 6214 /* Map APIC MMIO area */ 6215 apic = APIC_COMMON(cpu->apic_state); 6216 if (!apic_mmio_map_once) { 6217 memory_region_add_subregion_overlap(get_system_memory(), 6218 apic->apicbase & 6219 MSR_IA32_APICBASE_BASE, 6220 &apic->io_memory, 6221 0x1000); 6222 apic_mmio_map_once = true; 6223 } 6224 } 6225 6226 static void x86_cpu_machine_done(Notifier *n, void *unused) 6227 { 6228 X86CPU *cpu = container_of(n, X86CPU, machine_done); 6229 MemoryRegion *smram = 6230 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 6231 6232 if (smram) { 6233 cpu->smram = g_new(MemoryRegion, 1); 6234 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 6235 smram, 0, 4 * GiB); 6236 memory_region_set_enabled(cpu->smram, true); 6237 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 6238 } 6239 } 6240 #else 6241 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6242 { 6243 } 6244 #endif 6245 6246 /* Note: Only safe for use on x86(-64) hosts */ 6247 static uint32_t x86_host_phys_bits(void) 6248 { 6249 uint32_t eax; 6250 uint32_t host_phys_bits; 6251 6252 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 6253 if (eax >= 0x80000008) { 6254 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 6255 /* Note: According to AMD doc 25481 rev 2.34 they have a field 6256 * at 23:16 that can specify a maximum physical address bits for 6257 * the guest that can override this value; but I've not seen 6258 * anything with that set. 6259 */ 6260 host_phys_bits = eax & 0xff; 6261 } else { 6262 /* It's an odd 64 bit machine that doesn't have the leaf for 6263 * physical address bits; fall back to 36 that's most older 6264 * Intel. 6265 */ 6266 host_phys_bits = 36; 6267 } 6268 6269 return host_phys_bits; 6270 } 6271 6272 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 6273 { 6274 if (*min < value) { 6275 *min = value; 6276 } 6277 } 6278 6279 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 6280 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 6281 { 6282 CPUX86State *env = &cpu->env; 6283 FeatureWordInfo *fi = &feature_word_info[w]; 6284 uint32_t eax = fi->cpuid.eax; 6285 uint32_t region = eax & 0xF0000000; 6286 6287 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 6288 if (!env->features[w]) { 6289 return; 6290 } 6291 6292 switch (region) { 6293 case 0x00000000: 6294 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 6295 break; 6296 case 0x80000000: 6297 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 6298 break; 6299 case 0xC0000000: 6300 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 6301 break; 6302 } 6303 6304 if (eax == 7) { 6305 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7, 6306 fi->cpuid.ecx); 6307 } 6308 } 6309 6310 /* Calculate XSAVE components based on the configured CPU feature flags */ 6311 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 6312 { 6313 CPUX86State *env = &cpu->env; 6314 int i; 6315 uint64_t mask; 6316 6317 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 6318 env->features[FEAT_XSAVE_COMP_LO] = 0; 6319 env->features[FEAT_XSAVE_COMP_HI] = 0; 6320 return; 6321 } 6322 6323 mask = 0; 6324 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6325 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6326 if (env->features[esa->feature] & esa->bits) { 6327 mask |= (1ULL << i); 6328 } 6329 } 6330 6331 env->features[FEAT_XSAVE_COMP_LO] = mask; 6332 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 6333 } 6334 6335 /***** Steps involved on loading and filtering CPUID data 6336 * 6337 * When initializing and realizing a CPU object, the steps 6338 * involved in setting up CPUID data are: 6339 * 6340 * 1) Loading CPU model definition (X86CPUDefinition). This is 6341 * implemented by x86_cpu_load_model() and should be completely 6342 * transparent, as it is done automatically by instance_init. 6343 * No code should need to look at X86CPUDefinition structs 6344 * outside instance_init. 6345 * 6346 * 2) CPU expansion. This is done by realize before CPUID 6347 * filtering, and will make sure host/accelerator data is 6348 * loaded for CPU models that depend on host capabilities 6349 * (e.g. "host"). Done by x86_cpu_expand_features(). 6350 * 6351 * 3) CPUID filtering. This initializes extra data related to 6352 * CPUID, and checks if the host supports all capabilities 6353 * required by the CPU. Runnability of a CPU model is 6354 * determined at this step. Done by x86_cpu_filter_features(). 6355 * 6356 * Some operations don't require all steps to be performed. 6357 * More precisely: 6358 * 6359 * - CPU instance creation (instance_init) will run only CPU 6360 * model loading. CPU expansion can't run at instance_init-time 6361 * because host/accelerator data may be not available yet. 6362 * - CPU realization will perform both CPU model expansion and CPUID 6363 * filtering, and return an error in case one of them fails. 6364 * - query-cpu-definitions needs to run all 3 steps. It needs 6365 * to run CPUID filtering, as the 'unavailable-features' 6366 * field is set based on the filtering results. 6367 * - The query-cpu-model-expansion QMP command only needs to run 6368 * CPU model loading and CPU expansion. It should not filter 6369 * any CPUID data based on host capabilities. 6370 */ 6371 6372 /* Expand CPU configuration data, based on configured features 6373 * and host/accelerator capabilities when appropriate. 6374 */ 6375 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 6376 { 6377 CPUX86State *env = &cpu->env; 6378 FeatureWord w; 6379 int i; 6380 GList *l; 6381 6382 for (l = plus_features; l; l = l->next) { 6383 const char *prop = l->data; 6384 if (!object_property_set_bool(OBJECT(cpu), prop, true, errp)) { 6385 return; 6386 } 6387 } 6388 6389 for (l = minus_features; l; l = l->next) { 6390 const char *prop = l->data; 6391 if (!object_property_set_bool(OBJECT(cpu), prop, false, errp)) { 6392 return; 6393 } 6394 } 6395 6396 /*TODO: Now cpu->max_features doesn't overwrite features 6397 * set using QOM properties, and we can convert 6398 * plus_features & minus_features to global properties 6399 * inside x86_cpu_parse_featurestr() too. 6400 */ 6401 if (cpu->max_features) { 6402 for (w = 0; w < FEATURE_WORDS; w++) { 6403 /* Override only features that weren't set explicitly 6404 * by the user. 6405 */ 6406 env->features[w] |= 6407 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 6408 ~env->user_features[w] & 6409 ~feature_word_info[w].no_autoenable_flags; 6410 } 6411 } 6412 6413 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) { 6414 FeatureDep *d = &feature_dependencies[i]; 6415 if (!(env->features[d->from.index] & d->from.mask)) { 6416 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask; 6417 6418 /* Not an error unless the dependent feature was added explicitly. */ 6419 mark_unavailable_features(cpu, d->to.index, 6420 unavailable_features & env->user_features[d->to.index], 6421 "This feature depends on other features that were not requested"); 6422 6423 env->features[d->to.index] &= ~unavailable_features; 6424 } 6425 } 6426 6427 if (!kvm_enabled() || !cpu->expose_kvm) { 6428 env->features[FEAT_KVM] = 0; 6429 } 6430 6431 x86_cpu_enable_xsave_components(cpu); 6432 6433 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 6434 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 6435 if (cpu->full_cpuid_auto_level) { 6436 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 6437 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 6438 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 6439 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 6440 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); 6441 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 6442 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 6443 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 6444 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 6445 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 6446 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 6447 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 6448 6449 /* Intel Processor Trace requires CPUID[0x14] */ 6450 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) { 6451 if (cpu->intel_pt_auto_level) { 6452 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 6453 } else if (cpu->env.cpuid_min_level < 0x14) { 6454 mark_unavailable_features(cpu, FEAT_7_0_EBX, 6455 CPUID_7_0_EBX_INTEL_PT, 6456 "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,min-level=0x14\""); 6457 } 6458 } 6459 6460 /* CPU topology with multi-dies support requires CPUID[0x1F] */ 6461 if (env->nr_dies > 1) { 6462 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); 6463 } 6464 6465 /* SVM requires CPUID[0x8000000A] */ 6466 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 6467 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 6468 } 6469 6470 /* SEV requires CPUID[0x8000001F] */ 6471 if (sev_enabled()) { 6472 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 6473 } 6474 } 6475 6476 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 6477 if (env->cpuid_level_func7 == UINT32_MAX) { 6478 env->cpuid_level_func7 = env->cpuid_min_level_func7; 6479 } 6480 if (env->cpuid_level == UINT32_MAX) { 6481 env->cpuid_level = env->cpuid_min_level; 6482 } 6483 if (env->cpuid_xlevel == UINT32_MAX) { 6484 env->cpuid_xlevel = env->cpuid_min_xlevel; 6485 } 6486 if (env->cpuid_xlevel2 == UINT32_MAX) { 6487 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 6488 } 6489 } 6490 6491 /* 6492 * Finishes initialization of CPUID data, filters CPU feature 6493 * words based on host availability of each feature. 6494 * 6495 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 6496 */ 6497 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) 6498 { 6499 CPUX86State *env = &cpu->env; 6500 FeatureWord w; 6501 const char *prefix = NULL; 6502 6503 if (verbose) { 6504 prefix = accel_uses_host_cpuid() 6505 ? "host doesn't support requested feature" 6506 : "TCG doesn't support requested feature"; 6507 } 6508 6509 for (w = 0; w < FEATURE_WORDS; w++) { 6510 uint64_t host_feat = 6511 x86_cpu_get_supported_feature_word(w, false); 6512 uint64_t requested_features = env->features[w]; 6513 uint64_t unavailable_features = requested_features & ~host_feat; 6514 mark_unavailable_features(cpu, w, unavailable_features, prefix); 6515 } 6516 6517 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 6518 kvm_enabled()) { 6519 KVMState *s = CPU(cpu)->kvm_state; 6520 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 6521 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 6522 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 6523 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 6524 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 6525 6526 if (!eax_0 || 6527 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 6528 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 6529 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 6530 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 6531 INTEL_PT_ADDR_RANGES_NUM) || 6532 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 6533 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 6534 ((ecx_0 & CPUID_14_0_ECX_LIP) != 6535 (env->features[FEAT_14_0_ECX] & CPUID_14_0_ECX_LIP))) { 6536 /* 6537 * Processor Trace capabilities aren't configurable, so if the 6538 * host can't emulate the capabilities we report on 6539 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 6540 */ 6541 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix); 6542 } 6543 } 6544 } 6545 6546 static void x86_cpu_hyperv_realize(X86CPU *cpu) 6547 { 6548 size_t len; 6549 6550 /* Hyper-V vendor id */ 6551 if (!cpu->hyperv_vendor) { 6552 memcpy(cpu->hyperv_vendor_id, "Microsoft Hv", 12); 6553 } else { 6554 len = strlen(cpu->hyperv_vendor); 6555 6556 if (len > 12) { 6557 warn_report("hv-vendor-id truncated to 12 characters"); 6558 len = 12; 6559 } 6560 memset(cpu->hyperv_vendor_id, 0, 12); 6561 memcpy(cpu->hyperv_vendor_id, cpu->hyperv_vendor, len); 6562 } 6563 6564 /* 'Hv#1' interface identification*/ 6565 cpu->hyperv_interface_id[0] = 0x31237648; 6566 cpu->hyperv_interface_id[1] = 0; 6567 cpu->hyperv_interface_id[2] = 0; 6568 cpu->hyperv_interface_id[3] = 0; 6569 6570 /* Hypervisor system identity */ 6571 cpu->hyperv_version_id[0] = 0x00001bbc; 6572 cpu->hyperv_version_id[1] = 0x00060001; 6573 6574 /* Hypervisor implementation limits */ 6575 cpu->hyperv_limits[0] = 64; 6576 cpu->hyperv_limits[1] = 0; 6577 cpu->hyperv_limits[2] = 0; 6578 } 6579 6580 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 6581 { 6582 CPUState *cs = CPU(dev); 6583 X86CPU *cpu = X86_CPU(dev); 6584 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6585 CPUX86State *env = &cpu->env; 6586 Error *local_err = NULL; 6587 static bool ht_warned; 6588 6589 if (xcc->host_cpuid_required) { 6590 if (!accel_uses_host_cpuid()) { 6591 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6592 error_setg(&local_err, "CPU model '%s' requires KVM", name); 6593 goto out; 6594 } 6595 } 6596 6597 if (cpu->max_features && accel_uses_host_cpuid()) { 6598 if (enable_cpu_pm) { 6599 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 6600 &cpu->mwait.ecx, &cpu->mwait.edx); 6601 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 6602 if (kvm_enabled() && kvm_has_waitpkg()) { 6603 env->features[FEAT_7_0_ECX] |= CPUID_7_0_ECX_WAITPKG; 6604 } 6605 } 6606 if (kvm_enabled() && cpu->ucode_rev == 0) { 6607 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state, 6608 MSR_IA32_UCODE_REV); 6609 } 6610 } 6611 6612 if (cpu->ucode_rev == 0) { 6613 /* The default is the same as KVM's. */ 6614 if (IS_AMD_CPU(env)) { 6615 cpu->ucode_rev = 0x01000065; 6616 } else { 6617 cpu->ucode_rev = 0x100000000ULL; 6618 } 6619 } 6620 6621 /* mwait extended info: needed for Core compatibility */ 6622 /* We always wake on interrupt even if host does not have the capability */ 6623 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 6624 6625 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 6626 error_setg(errp, "apic-id property was not initialized properly"); 6627 return; 6628 } 6629 6630 x86_cpu_expand_features(cpu, &local_err); 6631 if (local_err) { 6632 goto out; 6633 } 6634 6635 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); 6636 6637 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { 6638 error_setg(&local_err, 6639 accel_uses_host_cpuid() ? 6640 "Host doesn't support requested features" : 6641 "TCG doesn't support requested features"); 6642 goto out; 6643 } 6644 6645 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 6646 * CPUID[1].EDX. 6647 */ 6648 if (IS_AMD_CPU(env)) { 6649 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 6650 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 6651 & CPUID_EXT2_AMD_ALIASES); 6652 } 6653 6654 /* For 64bit systems think about the number of physical bits to present. 6655 * ideally this should be the same as the host; anything other than matching 6656 * the host can cause incorrect guest behaviour. 6657 * QEMU used to pick the magic value of 40 bits that corresponds to 6658 * consumer AMD devices but nothing else. 6659 */ 6660 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 6661 if (accel_uses_host_cpuid()) { 6662 uint32_t host_phys_bits = x86_host_phys_bits(); 6663 static bool warned; 6664 6665 /* Print a warning if the user set it to a value that's not the 6666 * host value. 6667 */ 6668 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 6669 !warned) { 6670 warn_report("Host physical bits (%u)" 6671 " does not match phys-bits property (%u)", 6672 host_phys_bits, cpu->phys_bits); 6673 warned = true; 6674 } 6675 6676 if (cpu->host_phys_bits) { 6677 /* The user asked for us to use the host physical bits */ 6678 cpu->phys_bits = host_phys_bits; 6679 if (cpu->host_phys_bits_limit && 6680 cpu->phys_bits > cpu->host_phys_bits_limit) { 6681 cpu->phys_bits = cpu->host_phys_bits_limit; 6682 } 6683 } 6684 6685 if (cpu->phys_bits && 6686 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 6687 cpu->phys_bits < 32)) { 6688 error_setg(errp, "phys-bits should be between 32 and %u " 6689 " (but is %u)", 6690 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 6691 return; 6692 } 6693 } else { 6694 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 6695 error_setg(errp, "TCG only supports phys-bits=%u", 6696 TCG_PHYS_ADDR_BITS); 6697 return; 6698 } 6699 } 6700 /* 0 means it was not explicitly set by the user (or by machine 6701 * compat_props or by the host code above). In this case, the default 6702 * is the value used by TCG (40). 6703 */ 6704 if (cpu->phys_bits == 0) { 6705 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 6706 } 6707 } else { 6708 /* For 32 bit systems don't use the user set value, but keep 6709 * phys_bits consistent with what we tell the guest. 6710 */ 6711 if (cpu->phys_bits != 0) { 6712 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 6713 return; 6714 } 6715 6716 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 6717 cpu->phys_bits = 36; 6718 } else { 6719 cpu->phys_bits = 32; 6720 } 6721 } 6722 6723 /* Cache information initialization */ 6724 if (!cpu->legacy_cache) { 6725 if (!xcc->model || !xcc->model->cpudef->cache_info) { 6726 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6727 error_setg(errp, 6728 "CPU model '%s' doesn't support legacy-cache=off", name); 6729 return; 6730 } 6731 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 6732 *xcc->model->cpudef->cache_info; 6733 } else { 6734 /* Build legacy cache information */ 6735 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 6736 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 6737 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 6738 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 6739 6740 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 6741 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 6742 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 6743 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 6744 6745 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 6746 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 6747 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 6748 env->cache_info_amd.l3_cache = &legacy_l3_cache; 6749 } 6750 6751 /* Process Hyper-V enlightenments */ 6752 x86_cpu_hyperv_realize(cpu); 6753 6754 cpu_exec_realizefn(cs, &local_err); 6755 if (local_err != NULL) { 6756 error_propagate(errp, local_err); 6757 return; 6758 } 6759 6760 #ifndef CONFIG_USER_ONLY 6761 MachineState *ms = MACHINE(qdev_get_machine()); 6762 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 6763 6764 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { 6765 x86_cpu_apic_create(cpu, &local_err); 6766 if (local_err != NULL) { 6767 goto out; 6768 } 6769 } 6770 #endif 6771 6772 mce_init(cpu); 6773 6774 #ifndef CONFIG_USER_ONLY 6775 if (tcg_enabled()) { 6776 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 6777 cpu->cpu_as_root = g_new(MemoryRegion, 1); 6778 6779 /* Outer container... */ 6780 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 6781 memory_region_set_enabled(cpu->cpu_as_root, true); 6782 6783 /* ... with two regions inside: normal system memory with low 6784 * priority, and... 6785 */ 6786 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 6787 get_system_memory(), 0, ~0ull); 6788 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 6789 memory_region_set_enabled(cpu->cpu_as_mem, true); 6790 6791 cs->num_ases = 2; 6792 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 6793 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 6794 6795 /* ... SMRAM with higher priority, linked from /machine/smram. */ 6796 cpu->machine_done.notify = x86_cpu_machine_done; 6797 qemu_add_machine_init_done_notifier(&cpu->machine_done); 6798 } 6799 #endif 6800 6801 qemu_init_vcpu(cs); 6802 6803 /* 6804 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 6805 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 6806 * based on inputs (sockets,cores,threads), it is still better to give 6807 * users a warning. 6808 * 6809 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 6810 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 6811 */ 6812 if (IS_AMD_CPU(env) && 6813 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 6814 cs->nr_threads > 1 && !ht_warned) { 6815 warn_report("This family of AMD CPU doesn't support " 6816 "hyperthreading(%d)", 6817 cs->nr_threads); 6818 error_printf("Please configure -smp options properly" 6819 " or try enabling topoext feature.\n"); 6820 ht_warned = true; 6821 } 6822 6823 x86_cpu_apic_realize(cpu, &local_err); 6824 if (local_err != NULL) { 6825 goto out; 6826 } 6827 cpu_reset(cs); 6828 6829 xcc->parent_realize(dev, &local_err); 6830 6831 out: 6832 if (local_err != NULL) { 6833 error_propagate(errp, local_err); 6834 return; 6835 } 6836 } 6837 6838 static void x86_cpu_unrealizefn(DeviceState *dev) 6839 { 6840 X86CPU *cpu = X86_CPU(dev); 6841 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6842 6843 #ifndef CONFIG_USER_ONLY 6844 cpu_remove_sync(CPU(dev)); 6845 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 6846 #endif 6847 6848 if (cpu->apic_state) { 6849 object_unparent(OBJECT(cpu->apic_state)); 6850 cpu->apic_state = NULL; 6851 } 6852 6853 xcc->parent_unrealize(dev); 6854 } 6855 6856 typedef struct BitProperty { 6857 FeatureWord w; 6858 uint64_t mask; 6859 } BitProperty; 6860 6861 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 6862 void *opaque, Error **errp) 6863 { 6864 X86CPU *cpu = X86_CPU(obj); 6865 BitProperty *fp = opaque; 6866 uint64_t f = cpu->env.features[fp->w]; 6867 bool value = (f & fp->mask) == fp->mask; 6868 visit_type_bool(v, name, &value, errp); 6869 } 6870 6871 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 6872 void *opaque, Error **errp) 6873 { 6874 DeviceState *dev = DEVICE(obj); 6875 X86CPU *cpu = X86_CPU(obj); 6876 BitProperty *fp = opaque; 6877 bool value; 6878 6879 if (dev->realized) { 6880 qdev_prop_set_after_realize(dev, name, errp); 6881 return; 6882 } 6883 6884 if (!visit_type_bool(v, name, &value, errp)) { 6885 return; 6886 } 6887 6888 if (value) { 6889 cpu->env.features[fp->w] |= fp->mask; 6890 } else { 6891 cpu->env.features[fp->w] &= ~fp->mask; 6892 } 6893 cpu->env.user_features[fp->w] |= fp->mask; 6894 } 6895 6896 /* Register a boolean property to get/set a single bit in a uint32_t field. 6897 * 6898 * The same property name can be registered multiple times to make it affect 6899 * multiple bits in the same FeatureWord. In that case, the getter will return 6900 * true only if all bits are set. 6901 */ 6902 static void x86_cpu_register_bit_prop(X86CPUClass *xcc, 6903 const char *prop_name, 6904 FeatureWord w, 6905 int bitnr) 6906 { 6907 ObjectClass *oc = OBJECT_CLASS(xcc); 6908 BitProperty *fp; 6909 ObjectProperty *op; 6910 uint64_t mask = (1ULL << bitnr); 6911 6912 op = object_class_property_find(oc, prop_name); 6913 if (op) { 6914 fp = op->opaque; 6915 assert(fp->w == w); 6916 fp->mask |= mask; 6917 } else { 6918 fp = g_new0(BitProperty, 1); 6919 fp->w = w; 6920 fp->mask = mask; 6921 object_class_property_add(oc, prop_name, "bool", 6922 x86_cpu_get_bit_prop, 6923 x86_cpu_set_bit_prop, 6924 NULL, fp); 6925 } 6926 } 6927 6928 static void x86_cpu_register_feature_bit_props(X86CPUClass *xcc, 6929 FeatureWord w, 6930 int bitnr) 6931 { 6932 FeatureWordInfo *fi = &feature_word_info[w]; 6933 const char *name = fi->feat_names[bitnr]; 6934 6935 if (!name) { 6936 return; 6937 } 6938 6939 /* Property names should use "-" instead of "_". 6940 * Old names containing underscores are registered as aliases 6941 * using object_property_add_alias() 6942 */ 6943 assert(!strchr(name, '_')); 6944 /* aliases don't use "|" delimiters anymore, they are registered 6945 * manually using object_property_add_alias() */ 6946 assert(!strchr(name, '|')); 6947 x86_cpu_register_bit_prop(xcc, name, w, bitnr); 6948 } 6949 6950 #if !defined(CONFIG_USER_ONLY) 6951 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 6952 { 6953 X86CPU *cpu = X86_CPU(cs); 6954 CPUX86State *env = &cpu->env; 6955 GuestPanicInformation *panic_info = NULL; 6956 6957 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 6958 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 6959 6960 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 6961 6962 assert(HV_CRASH_PARAMS >= 5); 6963 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 6964 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 6965 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 6966 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 6967 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 6968 } 6969 6970 return panic_info; 6971 } 6972 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 6973 const char *name, void *opaque, 6974 Error **errp) 6975 { 6976 CPUState *cs = CPU(obj); 6977 GuestPanicInformation *panic_info; 6978 6979 if (!cs->crash_occurred) { 6980 error_setg(errp, "No crash occured"); 6981 return; 6982 } 6983 6984 panic_info = x86_cpu_get_crash_info(cs); 6985 if (panic_info == NULL) { 6986 error_setg(errp, "No crash information"); 6987 return; 6988 } 6989 6990 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 6991 errp); 6992 qapi_free_GuestPanicInformation(panic_info); 6993 } 6994 #endif /* !CONFIG_USER_ONLY */ 6995 6996 static void x86_cpu_initfn(Object *obj) 6997 { 6998 X86CPU *cpu = X86_CPU(obj); 6999 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 7000 CPUX86State *env = &cpu->env; 7001 7002 env->nr_dies = 1; 7003 cpu_set_cpustate_pointers(cpu); 7004 7005 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 7006 x86_cpu_get_feature_words, 7007 NULL, NULL, (void *)env->features); 7008 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 7009 x86_cpu_get_feature_words, 7010 NULL, NULL, (void *)cpu->filtered_features); 7011 7012 object_property_add_alias(obj, "sse3", obj, "pni"); 7013 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq"); 7014 object_property_add_alias(obj, "sse4-1", obj, "sse4.1"); 7015 object_property_add_alias(obj, "sse4-2", obj, "sse4.2"); 7016 object_property_add_alias(obj, "xd", obj, "nx"); 7017 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt"); 7018 object_property_add_alias(obj, "i64", obj, "lm"); 7019 7020 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl"); 7021 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust"); 7022 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt"); 7023 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm"); 7024 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy"); 7025 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr"); 7026 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core"); 7027 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb"); 7028 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay"); 7029 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu"); 7030 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf"); 7031 object_property_add_alias(obj, "kvm_asyncpf_int", obj, "kvm-asyncpf-int"); 7032 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time"); 7033 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi"); 7034 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt"); 7035 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control"); 7036 object_property_add_alias(obj, "svm_lock", obj, "svm-lock"); 7037 object_property_add_alias(obj, "nrip_save", obj, "nrip-save"); 7038 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale"); 7039 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean"); 7040 object_property_add_alias(obj, "pause_filter", obj, "pause-filter"); 7041 object_property_add_alias(obj, "sse4_1", obj, "sse4.1"); 7042 object_property_add_alias(obj, "sse4_2", obj, "sse4.2"); 7043 7044 if (xcc->model) { 7045 x86_cpu_load_model(cpu, xcc->model); 7046 } 7047 } 7048 7049 static int64_t x86_cpu_get_arch_id(CPUState *cs) 7050 { 7051 X86CPU *cpu = X86_CPU(cs); 7052 7053 return cpu->apic_id; 7054 } 7055 7056 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 7057 { 7058 X86CPU *cpu = X86_CPU(cs); 7059 7060 return cpu->env.cr[0] & CR0_PG_MASK; 7061 } 7062 7063 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 7064 { 7065 X86CPU *cpu = X86_CPU(cs); 7066 7067 cpu->env.eip = value; 7068 } 7069 7070 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 7071 { 7072 X86CPU *cpu = X86_CPU(cs); 7073 CPUX86State *env = &cpu->env; 7074 7075 #if !defined(CONFIG_USER_ONLY) 7076 if (interrupt_request & CPU_INTERRUPT_POLL) { 7077 return CPU_INTERRUPT_POLL; 7078 } 7079 #endif 7080 if (interrupt_request & CPU_INTERRUPT_SIPI) { 7081 return CPU_INTERRUPT_SIPI; 7082 } 7083 7084 if (env->hflags2 & HF2_GIF_MASK) { 7085 if ((interrupt_request & CPU_INTERRUPT_SMI) && 7086 !(env->hflags & HF_SMM_MASK)) { 7087 return CPU_INTERRUPT_SMI; 7088 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 7089 !(env->hflags2 & HF2_NMI_MASK)) { 7090 return CPU_INTERRUPT_NMI; 7091 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 7092 return CPU_INTERRUPT_MCE; 7093 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 7094 (((env->hflags2 & HF2_VINTR_MASK) && 7095 (env->hflags2 & HF2_HIF_MASK)) || 7096 (!(env->hflags2 & HF2_VINTR_MASK) && 7097 (env->eflags & IF_MASK && 7098 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 7099 return CPU_INTERRUPT_HARD; 7100 #if !defined(CONFIG_USER_ONLY) 7101 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 7102 (env->eflags & IF_MASK) && 7103 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 7104 return CPU_INTERRUPT_VIRQ; 7105 #endif 7106 } 7107 } 7108 7109 return 0; 7110 } 7111 7112 static bool x86_cpu_has_work(CPUState *cs) 7113 { 7114 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 7115 } 7116 7117 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 7118 { 7119 X86CPU *cpu = X86_CPU(cs); 7120 CPUX86State *env = &cpu->env; 7121 7122 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 7123 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 7124 : bfd_mach_i386_i8086); 7125 info->print_insn = print_insn_i386; 7126 7127 info->cap_arch = CS_ARCH_X86; 7128 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 7129 : env->hflags & HF_CS32_MASK ? CS_MODE_32 7130 : CS_MODE_16); 7131 info->cap_insn_unit = 1; 7132 info->cap_insn_split = 8; 7133 } 7134 7135 void x86_update_hflags(CPUX86State *env) 7136 { 7137 uint32_t hflags; 7138 #define HFLAG_COPY_MASK \ 7139 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 7140 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 7141 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 7142 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 7143 7144 hflags = env->hflags & HFLAG_COPY_MASK; 7145 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 7146 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 7147 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 7148 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 7149 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 7150 7151 if (env->cr[4] & CR4_OSFXSR_MASK) { 7152 hflags |= HF_OSFXSR_MASK; 7153 } 7154 7155 if (env->efer & MSR_EFER_LMA) { 7156 hflags |= HF_LMA_MASK; 7157 } 7158 7159 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 7160 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 7161 } else { 7162 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 7163 (DESC_B_SHIFT - HF_CS32_SHIFT); 7164 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 7165 (DESC_B_SHIFT - HF_SS32_SHIFT); 7166 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 7167 !(hflags & HF_CS32_MASK)) { 7168 hflags |= HF_ADDSEG_MASK; 7169 } else { 7170 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 7171 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 7172 } 7173 } 7174 env->hflags = hflags; 7175 } 7176 7177 static Property x86_cpu_properties[] = { 7178 #ifdef CONFIG_USER_ONLY 7179 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 7180 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 7181 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 7182 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 7183 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0), 7184 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 7185 #else 7186 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 7187 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 7188 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 7189 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1), 7190 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 7191 #endif 7192 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 7193 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 7194 7195 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, 7196 HYPERV_SPINLOCK_NEVER_NOTIFY), 7197 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 7198 HYPERV_FEAT_RELAXED, 0), 7199 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 7200 HYPERV_FEAT_VAPIC, 0), 7201 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 7202 HYPERV_FEAT_TIME, 0), 7203 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 7204 HYPERV_FEAT_CRASH, 0), 7205 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 7206 HYPERV_FEAT_RESET, 0), 7207 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 7208 HYPERV_FEAT_VPINDEX, 0), 7209 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 7210 HYPERV_FEAT_RUNTIME, 0), 7211 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 7212 HYPERV_FEAT_SYNIC, 0), 7213 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 7214 HYPERV_FEAT_STIMER, 0), 7215 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 7216 HYPERV_FEAT_FREQUENCIES, 0), 7217 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 7218 HYPERV_FEAT_REENLIGHTENMENT, 0), 7219 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 7220 HYPERV_FEAT_TLBFLUSH, 0), 7221 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 7222 HYPERV_FEAT_EVMCS, 0), 7223 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 7224 HYPERV_FEAT_IPI, 0), 7225 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 7226 HYPERV_FEAT_STIMER_DIRECT, 0), 7227 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, 7228 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), 7229 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 7230 7231 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 7232 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 7233 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false), 7234 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 7235 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 7236 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 7237 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 7238 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 7239 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7, 7240 UINT32_MAX), 7241 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 7242 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 7243 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 7244 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 7245 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 7246 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 7247 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0), 7248 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 7249 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor), 7250 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 7251 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 7252 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 7253 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 7254 false), 7255 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 7256 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 7257 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 7258 true), 7259 /* 7260 * lecacy_cache defaults to true unless the CPU model provides its 7261 * own cache information (see x86_cpu_load_def()). 7262 */ 7263 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 7264 7265 /* 7266 * From "Requirements for Implementing the Microsoft 7267 * Hypervisor Interface": 7268 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 7269 * 7270 * "Starting with Windows Server 2012 and Windows 8, if 7271 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 7272 * the hypervisor imposes no specific limit to the number of VPs. 7273 * In this case, Windows Server 2012 guest VMs may use more than 7274 * 64 VPs, up to the maximum supported number of processors applicable 7275 * to the specific Windows version being used." 7276 */ 7277 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 7278 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 7279 false), 7280 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 7281 true), 7282 DEFINE_PROP_END_OF_LIST() 7283 }; 7284 7285 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 7286 { 7287 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7288 CPUClass *cc = CPU_CLASS(oc); 7289 DeviceClass *dc = DEVICE_CLASS(oc); 7290 FeatureWord w; 7291 7292 device_class_set_parent_realize(dc, x86_cpu_realizefn, 7293 &xcc->parent_realize); 7294 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 7295 &xcc->parent_unrealize); 7296 device_class_set_props(dc, x86_cpu_properties); 7297 7298 device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset); 7299 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 7300 7301 cc->class_by_name = x86_cpu_class_by_name; 7302 cc->parse_features = x86_cpu_parse_featurestr; 7303 cc->has_work = x86_cpu_has_work; 7304 7305 #ifdef CONFIG_TCG 7306 tcg_cpu_common_class_init(cc); 7307 #endif /* CONFIG_TCG */ 7308 7309 cc->dump_state = x86_cpu_dump_state; 7310 cc->set_pc = x86_cpu_set_pc; 7311 cc->gdb_read_register = x86_cpu_gdb_read_register; 7312 cc->gdb_write_register = x86_cpu_gdb_write_register; 7313 cc->get_arch_id = x86_cpu_get_arch_id; 7314 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 7315 7316 #ifndef CONFIG_USER_ONLY 7317 cc->asidx_from_attrs = x86_asidx_from_attrs; 7318 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 7319 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug; 7320 cc->get_crash_info = x86_cpu_get_crash_info; 7321 cc->write_elf64_note = x86_cpu_write_elf64_note; 7322 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 7323 cc->write_elf32_note = x86_cpu_write_elf32_note; 7324 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 7325 cc->vmsd = &vmstate_x86_cpu; 7326 #endif /* !CONFIG_USER_ONLY */ 7327 7328 cc->gdb_arch_name = x86_gdb_arch_name; 7329 #ifdef TARGET_X86_64 7330 cc->gdb_core_xml_file = "i386-64bit.xml"; 7331 cc->gdb_num_core_regs = 66; 7332 #else 7333 cc->gdb_core_xml_file = "i386-32bit.xml"; 7334 cc->gdb_num_core_regs = 50; 7335 #endif 7336 cc->disas_set_info = x86_disas_set_info; 7337 7338 dc->user_creatable = true; 7339 7340 object_class_property_add(oc, "family", "int", 7341 x86_cpuid_version_get_family, 7342 x86_cpuid_version_set_family, NULL, NULL); 7343 object_class_property_add(oc, "model", "int", 7344 x86_cpuid_version_get_model, 7345 x86_cpuid_version_set_model, NULL, NULL); 7346 object_class_property_add(oc, "stepping", "int", 7347 x86_cpuid_version_get_stepping, 7348 x86_cpuid_version_set_stepping, NULL, NULL); 7349 object_class_property_add_str(oc, "vendor", 7350 x86_cpuid_get_vendor, 7351 x86_cpuid_set_vendor); 7352 object_class_property_add_str(oc, "model-id", 7353 x86_cpuid_get_model_id, 7354 x86_cpuid_set_model_id); 7355 object_class_property_add(oc, "tsc-frequency", "int", 7356 x86_cpuid_get_tsc_freq, 7357 x86_cpuid_set_tsc_freq, NULL, NULL); 7358 /* 7359 * The "unavailable-features" property has the same semantics as 7360 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 7361 * QMP command: they list the features that would have prevented the 7362 * CPU from running if the "enforce" flag was set. 7363 */ 7364 object_class_property_add(oc, "unavailable-features", "strList", 7365 x86_cpu_get_unavailable_features, 7366 NULL, NULL, NULL); 7367 7368 #if !defined(CONFIG_USER_ONLY) 7369 object_class_property_add(oc, "crash-information", "GuestPanicInformation", 7370 x86_cpu_get_crash_info_qom, NULL, NULL, NULL); 7371 #endif 7372 7373 for (w = 0; w < FEATURE_WORDS; w++) { 7374 int bitnr; 7375 for (bitnr = 0; bitnr < 64; bitnr++) { 7376 x86_cpu_register_feature_bit_props(xcc, w, bitnr); 7377 } 7378 } 7379 } 7380 7381 static const TypeInfo x86_cpu_type_info = { 7382 .name = TYPE_X86_CPU, 7383 .parent = TYPE_CPU, 7384 .instance_size = sizeof(X86CPU), 7385 .instance_init = x86_cpu_initfn, 7386 .abstract = true, 7387 .class_size = sizeof(X86CPUClass), 7388 .class_init = x86_cpu_common_class_init, 7389 }; 7390 7391 7392 /* "base" CPU model, used by query-cpu-model-expansion */ 7393 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 7394 { 7395 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7396 7397 xcc->static_model = true; 7398 xcc->migration_safe = true; 7399 xcc->model_description = "base CPU model type with no features enabled"; 7400 xcc->ordering = 8; 7401 } 7402 7403 static const TypeInfo x86_base_cpu_type_info = { 7404 .name = X86_CPU_TYPE_NAME("base"), 7405 .parent = TYPE_X86_CPU, 7406 .class_init = x86_cpu_base_class_init, 7407 }; 7408 7409 static void x86_cpu_register_types(void) 7410 { 7411 int i; 7412 7413 type_register_static(&x86_cpu_type_info); 7414 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 7415 x86_register_cpudef_types(&builtin_x86_defs[i]); 7416 } 7417 type_register_static(&max_x86_cpu_type_info); 7418 type_register_static(&x86_base_cpu_type_info); 7419 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 7420 type_register_static(&host_x86_cpu_type_info); 7421 #endif 7422 } 7423 7424 type_init(x86_cpu_register_types) 7425