1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "tcg/tcg-cpu.h" 28 #include "tcg/helper-tcg.h" 29 #include "exec/exec-all.h" 30 #include "sysemu/kvm.h" 31 #include "sysemu/reset.h" 32 #include "sysemu/hvf.h" 33 #include "sysemu/cpus.h" 34 #include "sysemu/xen.h" 35 #include "sysemu/whpx.h" 36 #include "kvm/kvm_i386.h" 37 #include "sev_i386.h" 38 39 #include "qemu/error-report.h" 40 #include "qemu/module.h" 41 #include "qemu/option.h" 42 #include "qemu/config-file.h" 43 #include "qapi/error.h" 44 #include "qapi/qapi-visit-machine.h" 45 #include "qapi/qapi-visit-run-state.h" 46 #include "qapi/qmp/qdict.h" 47 #include "qapi/qmp/qerror.h" 48 #include "qapi/visitor.h" 49 #include "qom/qom-qobject.h" 50 #include "sysemu/arch_init.h" 51 #include "qapi/qapi-commands-machine-target.h" 52 53 #include "standard-headers/asm-x86/kvm_para.h" 54 55 #include "sysemu/sysemu.h" 56 #include "sysemu/tcg.h" 57 #include "hw/qdev-properties.h" 58 #include "hw/i386/topology.h" 59 #ifndef CONFIG_USER_ONLY 60 #include "exec/address-spaces.h" 61 #include "hw/i386/apic_internal.h" 62 #include "hw/boards.h" 63 #endif 64 65 #include "disas/capstone.h" 66 67 /* Helpers for building CPUID[2] descriptors: */ 68 69 struct CPUID2CacheDescriptorInfo { 70 enum CacheType type; 71 int level; 72 int size; 73 int line_size; 74 int associativity; 75 }; 76 77 /* 78 * Known CPUID 2 cache descriptors. 79 * From Intel SDM Volume 2A, CPUID instruction 80 */ 81 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 82 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 83 .associativity = 4, .line_size = 32, }, 84 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 85 .associativity = 4, .line_size = 32, }, 86 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 87 .associativity = 4, .line_size = 64, }, 88 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 89 .associativity = 2, .line_size = 32, }, 90 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 91 .associativity = 4, .line_size = 32, }, 92 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 93 .associativity = 4, .line_size = 64, }, 94 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 95 .associativity = 6, .line_size = 64, }, 96 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 97 .associativity = 2, .line_size = 64, }, 98 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 99 .associativity = 8, .line_size = 64, }, 100 /* lines per sector is not supported cpuid2_cache_descriptor(), 101 * so descriptors 0x22, 0x23 are not included 102 */ 103 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 104 .associativity = 16, .line_size = 64, }, 105 /* lines per sector is not supported cpuid2_cache_descriptor(), 106 * so descriptors 0x25, 0x20 are not included 107 */ 108 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 109 .associativity = 8, .line_size = 64, }, 110 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 111 .associativity = 8, .line_size = 64, }, 112 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 113 .associativity = 4, .line_size = 32, }, 114 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 115 .associativity = 4, .line_size = 32, }, 116 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 117 .associativity = 4, .line_size = 32, }, 118 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 119 .associativity = 4, .line_size = 32, }, 120 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 121 .associativity = 4, .line_size = 32, }, 122 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 123 .associativity = 4, .line_size = 64, }, 124 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 125 .associativity = 8, .line_size = 64, }, 126 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 127 .associativity = 12, .line_size = 64, }, 128 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 129 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 130 .associativity = 12, .line_size = 64, }, 131 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 132 .associativity = 16, .line_size = 64, }, 133 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 134 .associativity = 12, .line_size = 64, }, 135 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 136 .associativity = 16, .line_size = 64, }, 137 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 138 .associativity = 24, .line_size = 64, }, 139 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 140 .associativity = 8, .line_size = 64, }, 141 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 142 .associativity = 4, .line_size = 64, }, 143 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 144 .associativity = 4, .line_size = 64, }, 145 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 146 .associativity = 4, .line_size = 64, }, 147 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 148 .associativity = 4, .line_size = 64, }, 149 /* lines per sector is not supported cpuid2_cache_descriptor(), 150 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 151 */ 152 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 153 .associativity = 8, .line_size = 64, }, 154 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 155 .associativity = 2, .line_size = 64, }, 156 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 157 .associativity = 8, .line_size = 64, }, 158 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 159 .associativity = 8, .line_size = 32, }, 160 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 161 .associativity = 8, .line_size = 32, }, 162 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 163 .associativity = 8, .line_size = 32, }, 164 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 165 .associativity = 8, .line_size = 32, }, 166 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 167 .associativity = 4, .line_size = 64, }, 168 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 169 .associativity = 8, .line_size = 64, }, 170 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 171 .associativity = 4, .line_size = 64, }, 172 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 173 .associativity = 4, .line_size = 64, }, 174 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 175 .associativity = 4, .line_size = 64, }, 176 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 177 .associativity = 8, .line_size = 64, }, 178 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 179 .associativity = 8, .line_size = 64, }, 180 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 181 .associativity = 8, .line_size = 64, }, 182 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 183 .associativity = 12, .line_size = 64, }, 184 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 185 .associativity = 12, .line_size = 64, }, 186 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 187 .associativity = 12, .line_size = 64, }, 188 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 189 .associativity = 16, .line_size = 64, }, 190 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 191 .associativity = 16, .line_size = 64, }, 192 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 193 .associativity = 16, .line_size = 64, }, 194 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 195 .associativity = 24, .line_size = 64, }, 196 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 197 .associativity = 24, .line_size = 64, }, 198 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 199 .associativity = 24, .line_size = 64, }, 200 }; 201 202 /* 203 * "CPUID leaf 2 does not report cache descriptor information, 204 * use CPUID leaf 4 to query cache parameters" 205 */ 206 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 207 208 /* 209 * Return a CPUID 2 cache descriptor for a given cache. 210 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 211 */ 212 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 213 { 214 int i; 215 216 assert(cache->size > 0); 217 assert(cache->level > 0); 218 assert(cache->line_size > 0); 219 assert(cache->associativity > 0); 220 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 221 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 222 if (d->level == cache->level && d->type == cache->type && 223 d->size == cache->size && d->line_size == cache->line_size && 224 d->associativity == cache->associativity) { 225 return i; 226 } 227 } 228 229 return CACHE_DESCRIPTOR_UNAVAILABLE; 230 } 231 232 /* CPUID Leaf 4 constants: */ 233 234 /* EAX: */ 235 #define CACHE_TYPE_D 1 236 #define CACHE_TYPE_I 2 237 #define CACHE_TYPE_UNIFIED 3 238 239 #define CACHE_LEVEL(l) (l << 5) 240 241 #define CACHE_SELF_INIT_LEVEL (1 << 8) 242 243 /* EDX: */ 244 #define CACHE_NO_INVD_SHARING (1 << 0) 245 #define CACHE_INCLUSIVE (1 << 1) 246 #define CACHE_COMPLEX_IDX (1 << 2) 247 248 /* Encode CacheType for CPUID[4].EAX */ 249 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 250 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 251 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 252 0 /* Invalid value */) 253 254 255 /* Encode cache info for CPUID[4] */ 256 static void encode_cache_cpuid4(CPUCacheInfo *cache, 257 int num_apic_ids, int num_cores, 258 uint32_t *eax, uint32_t *ebx, 259 uint32_t *ecx, uint32_t *edx) 260 { 261 assert(cache->size == cache->line_size * cache->associativity * 262 cache->partitions * cache->sets); 263 264 assert(num_apic_ids > 0); 265 *eax = CACHE_TYPE(cache->type) | 266 CACHE_LEVEL(cache->level) | 267 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 268 ((num_cores - 1) << 26) | 269 ((num_apic_ids - 1) << 14); 270 271 assert(cache->line_size > 0); 272 assert(cache->partitions > 0); 273 assert(cache->associativity > 0); 274 /* We don't implement fully-associative caches */ 275 assert(cache->associativity < cache->sets); 276 *ebx = (cache->line_size - 1) | 277 ((cache->partitions - 1) << 12) | 278 ((cache->associativity - 1) << 22); 279 280 assert(cache->sets > 0); 281 *ecx = cache->sets - 1; 282 283 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 284 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 285 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 286 } 287 288 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 289 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 290 { 291 assert(cache->size % 1024 == 0); 292 assert(cache->lines_per_tag > 0); 293 assert(cache->associativity > 0); 294 assert(cache->line_size > 0); 295 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 296 (cache->lines_per_tag << 8) | (cache->line_size); 297 } 298 299 #define ASSOC_FULL 0xFF 300 301 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 302 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 303 a == 2 ? 0x2 : \ 304 a == 4 ? 0x4 : \ 305 a == 8 ? 0x6 : \ 306 a == 16 ? 0x8 : \ 307 a == 32 ? 0xA : \ 308 a == 48 ? 0xB : \ 309 a == 64 ? 0xC : \ 310 a == 96 ? 0xD : \ 311 a == 128 ? 0xE : \ 312 a == ASSOC_FULL ? 0xF : \ 313 0 /* invalid value */) 314 315 /* 316 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 317 * @l3 can be NULL. 318 */ 319 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 320 CPUCacheInfo *l3, 321 uint32_t *ecx, uint32_t *edx) 322 { 323 assert(l2->size % 1024 == 0); 324 assert(l2->associativity > 0); 325 assert(l2->lines_per_tag > 0); 326 assert(l2->line_size > 0); 327 *ecx = ((l2->size / 1024) << 16) | 328 (AMD_ENC_ASSOC(l2->associativity) << 12) | 329 (l2->lines_per_tag << 8) | (l2->line_size); 330 331 if (l3) { 332 assert(l3->size % (512 * 1024) == 0); 333 assert(l3->associativity > 0); 334 assert(l3->lines_per_tag > 0); 335 assert(l3->line_size > 0); 336 *edx = ((l3->size / (512 * 1024)) << 18) | 337 (AMD_ENC_ASSOC(l3->associativity) << 12) | 338 (l3->lines_per_tag << 8) | (l3->line_size); 339 } else { 340 *edx = 0; 341 } 342 } 343 344 /* Encode cache info for CPUID[8000001D] */ 345 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, 346 X86CPUTopoInfo *topo_info, 347 uint32_t *eax, uint32_t *ebx, 348 uint32_t *ecx, uint32_t *edx) 349 { 350 uint32_t l3_threads; 351 assert(cache->size == cache->line_size * cache->associativity * 352 cache->partitions * cache->sets); 353 354 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 355 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 356 357 /* L3 is shared among multiple cores */ 358 if (cache->level == 3) { 359 l3_threads = topo_info->cores_per_die * topo_info->threads_per_core; 360 *eax |= (l3_threads - 1) << 14; 361 } else { 362 *eax |= ((topo_info->threads_per_core - 1) << 14); 363 } 364 365 assert(cache->line_size > 0); 366 assert(cache->partitions > 0); 367 assert(cache->associativity > 0); 368 /* We don't implement fully-associative caches */ 369 assert(cache->associativity < cache->sets); 370 *ebx = (cache->line_size - 1) | 371 ((cache->partitions - 1) << 12) | 372 ((cache->associativity - 1) << 22); 373 374 assert(cache->sets > 0); 375 *ecx = cache->sets - 1; 376 377 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 378 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 379 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 380 } 381 382 /* Encode cache info for CPUID[8000001E] */ 383 static void encode_topo_cpuid8000001e(X86CPU *cpu, X86CPUTopoInfo *topo_info, 384 uint32_t *eax, uint32_t *ebx, 385 uint32_t *ecx, uint32_t *edx) 386 { 387 X86CPUTopoIDs topo_ids; 388 389 x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids); 390 391 *eax = cpu->apic_id; 392 393 /* 394 * CPUID_Fn8000001E_EBX [Core Identifiers] (CoreId) 395 * Read-only. Reset: 0000_XXXXh. 396 * See Core::X86::Cpuid::ExtApicId. 397 * Core::X86::Cpuid::CoreId_lthree[1:0]_core[3:0]_thread[1:0]; 398 * Bits Description 399 * 31:16 Reserved. 400 * 15:8 ThreadsPerCore: threads per core. Read-only. Reset: XXh. 401 * The number of threads per core is ThreadsPerCore+1. 402 * 7:0 CoreId: core ID. Read-only. Reset: XXh. 403 * 404 * NOTE: CoreId is already part of apic_id. Just use it. We can 405 * use all the 8 bits to represent the core_id here. 406 */ 407 *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.core_id & 0xFF); 408 409 /* 410 * CPUID_Fn8000001E_ECX [Node Identifiers] (NodeId) 411 * Read-only. Reset: 0000_0XXXh. 412 * Core::X86::Cpuid::NodeId_lthree[1:0]_core[3:0]_thread[1:0]; 413 * Bits Description 414 * 31:11 Reserved. 415 * 10:8 NodesPerProcessor: Node per processor. Read-only. Reset: XXXb. 416 * ValidValues: 417 * Value Description 418 * 000b 1 node per processor. 419 * 001b 2 nodes per processor. 420 * 010b Reserved. 421 * 011b 4 nodes per processor. 422 * 111b-100b Reserved. 423 * 7:0 NodeId: Node ID. Read-only. Reset: XXh. 424 * 425 * NOTE: Hardware reserves 3 bits for number of nodes per processor. 426 * But users can create more nodes than the actual hardware can 427 * support. To genaralize we can use all the upper 8 bits for nodes. 428 * NodeId is combination of node and socket_id which is already decoded 429 * in apic_id. Just use it by shifting. 430 */ 431 *ecx = ((topo_info->dies_per_pkg - 1) << 8) | 432 ((cpu->apic_id >> apicid_die_offset(topo_info)) & 0xFF); 433 434 *edx = 0; 435 } 436 437 /* 438 * Definitions of the hardcoded cache entries we expose: 439 * These are legacy cache values. If there is a need to change any 440 * of these values please use builtin_x86_defs 441 */ 442 443 /* L1 data cache: */ 444 static CPUCacheInfo legacy_l1d_cache = { 445 .type = DATA_CACHE, 446 .level = 1, 447 .size = 32 * KiB, 448 .self_init = 1, 449 .line_size = 64, 450 .associativity = 8, 451 .sets = 64, 452 .partitions = 1, 453 .no_invd_sharing = true, 454 }; 455 456 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 457 static CPUCacheInfo legacy_l1d_cache_amd = { 458 .type = DATA_CACHE, 459 .level = 1, 460 .size = 64 * KiB, 461 .self_init = 1, 462 .line_size = 64, 463 .associativity = 2, 464 .sets = 512, 465 .partitions = 1, 466 .lines_per_tag = 1, 467 .no_invd_sharing = true, 468 }; 469 470 /* L1 instruction cache: */ 471 static CPUCacheInfo legacy_l1i_cache = { 472 .type = INSTRUCTION_CACHE, 473 .level = 1, 474 .size = 32 * KiB, 475 .self_init = 1, 476 .line_size = 64, 477 .associativity = 8, 478 .sets = 64, 479 .partitions = 1, 480 .no_invd_sharing = true, 481 }; 482 483 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 484 static CPUCacheInfo legacy_l1i_cache_amd = { 485 .type = INSTRUCTION_CACHE, 486 .level = 1, 487 .size = 64 * KiB, 488 .self_init = 1, 489 .line_size = 64, 490 .associativity = 2, 491 .sets = 512, 492 .partitions = 1, 493 .lines_per_tag = 1, 494 .no_invd_sharing = true, 495 }; 496 497 /* Level 2 unified cache: */ 498 static CPUCacheInfo legacy_l2_cache = { 499 .type = UNIFIED_CACHE, 500 .level = 2, 501 .size = 4 * MiB, 502 .self_init = 1, 503 .line_size = 64, 504 .associativity = 16, 505 .sets = 4096, 506 .partitions = 1, 507 .no_invd_sharing = true, 508 }; 509 510 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 511 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 512 .type = UNIFIED_CACHE, 513 .level = 2, 514 .size = 2 * MiB, 515 .line_size = 64, 516 .associativity = 8, 517 }; 518 519 520 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 521 static CPUCacheInfo legacy_l2_cache_amd = { 522 .type = UNIFIED_CACHE, 523 .level = 2, 524 .size = 512 * KiB, 525 .line_size = 64, 526 .lines_per_tag = 1, 527 .associativity = 16, 528 .sets = 512, 529 .partitions = 1, 530 }; 531 532 /* Level 3 unified cache: */ 533 static CPUCacheInfo legacy_l3_cache = { 534 .type = UNIFIED_CACHE, 535 .level = 3, 536 .size = 16 * MiB, 537 .line_size = 64, 538 .associativity = 16, 539 .sets = 16384, 540 .partitions = 1, 541 .lines_per_tag = 1, 542 .self_init = true, 543 .inclusive = true, 544 .complex_indexing = true, 545 }; 546 547 /* TLB definitions: */ 548 549 #define L1_DTLB_2M_ASSOC 1 550 #define L1_DTLB_2M_ENTRIES 255 551 #define L1_DTLB_4K_ASSOC 1 552 #define L1_DTLB_4K_ENTRIES 255 553 554 #define L1_ITLB_2M_ASSOC 1 555 #define L1_ITLB_2M_ENTRIES 255 556 #define L1_ITLB_4K_ASSOC 1 557 #define L1_ITLB_4K_ENTRIES 255 558 559 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 560 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 561 #define L2_DTLB_4K_ASSOC 4 562 #define L2_DTLB_4K_ENTRIES 512 563 564 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 565 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 566 #define L2_ITLB_4K_ASSOC 4 567 #define L2_ITLB_4K_ENTRIES 512 568 569 /* CPUID Leaf 0x14 constants: */ 570 #define INTEL_PT_MAX_SUBLEAF 0x1 571 /* 572 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 573 * MSR can be accessed; 574 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 575 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 576 * of Intel PT MSRs across warm reset; 577 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 578 */ 579 #define INTEL_PT_MINIMAL_EBX 0xf 580 /* 581 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 582 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 583 * accessed; 584 * bit[01]: ToPA tables can hold any number of output entries, up to the 585 * maximum allowed by the MaskOrTableOffset field of 586 * IA32_RTIT_OUTPUT_MASK_PTRS; 587 * bit[02]: Support Single-Range Output scheme; 588 */ 589 #define INTEL_PT_MINIMAL_ECX 0x7 590 /* generated packets which contain IP payloads have LIP values */ 591 #define INTEL_PT_IP_LIP (1 << 31) 592 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 593 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 594 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 595 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 596 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 597 598 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 599 uint32_t vendor2, uint32_t vendor3) 600 { 601 int i; 602 for (i = 0; i < 4; i++) { 603 dst[i] = vendor1 >> (8 * i); 604 dst[i + 4] = vendor2 >> (8 * i); 605 dst[i + 8] = vendor3 >> (8 * i); 606 } 607 dst[CPUID_VENDOR_SZ] = '\0'; 608 } 609 610 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 611 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 612 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 613 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 614 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 615 CPUID_PSE36 | CPUID_FXSR) 616 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 617 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 618 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 619 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 620 CPUID_PAE | CPUID_SEP | CPUID_APIC) 621 622 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 623 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 624 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 625 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 626 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 627 /* partly implemented: 628 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 629 /* missing: 630 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 631 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 632 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 633 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 634 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 635 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 636 CPUID_EXT_RDRAND) 637 /* missing: 638 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 639 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 640 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 641 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 642 CPUID_EXT_F16C */ 643 644 #ifdef TARGET_X86_64 645 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 646 #else 647 #define TCG_EXT2_X86_64_FEATURES 0 648 #endif 649 650 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 651 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 652 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 653 TCG_EXT2_X86_64_FEATURES) 654 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 655 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 656 #define TCG_EXT4_FEATURES 0 657 #define TCG_SVM_FEATURES CPUID_SVM_NPT 658 #define TCG_KVM_FEATURES 0 659 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 660 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 661 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 662 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 663 CPUID_7_0_EBX_ERMS) 664 /* missing: 665 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 666 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 667 CPUID_7_0_EBX_RDSEED */ 668 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 669 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 670 CPUID_7_0_ECX_LA57) 671 #define TCG_7_0_EDX_FEATURES 0 672 #define TCG_7_1_EAX_FEATURES 0 673 #define TCG_APM_FEATURES 0 674 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 675 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 676 /* missing: 677 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 678 #define TCG_14_0_ECX_FEATURES 0 679 680 typedef enum FeatureWordType { 681 CPUID_FEATURE_WORD, 682 MSR_FEATURE_WORD, 683 } FeatureWordType; 684 685 typedef struct FeatureWordInfo { 686 FeatureWordType type; 687 /* feature flags names are taken from "Intel Processor Identification and 688 * the CPUID Instruction" and AMD's "CPUID Specification". 689 * In cases of disagreement between feature naming conventions, 690 * aliases may be added. 691 */ 692 const char *feat_names[64]; 693 union { 694 /* If type==CPUID_FEATURE_WORD */ 695 struct { 696 uint32_t eax; /* Input EAX for CPUID */ 697 bool needs_ecx; /* CPUID instruction uses ECX as input */ 698 uint32_t ecx; /* Input ECX value for CPUID */ 699 int reg; /* output register (R_* constant) */ 700 } cpuid; 701 /* If type==MSR_FEATURE_WORD */ 702 struct { 703 uint32_t index; 704 } msr; 705 }; 706 uint64_t tcg_features; /* Feature flags supported by TCG */ 707 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */ 708 uint64_t migratable_flags; /* Feature flags known to be migratable */ 709 /* Features that shouldn't be auto-enabled by "-cpu host" */ 710 uint64_t no_autoenable_flags; 711 } FeatureWordInfo; 712 713 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 714 [FEAT_1_EDX] = { 715 .type = CPUID_FEATURE_WORD, 716 .feat_names = { 717 "fpu", "vme", "de", "pse", 718 "tsc", "msr", "pae", "mce", 719 "cx8", "apic", NULL, "sep", 720 "mtrr", "pge", "mca", "cmov", 721 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 722 NULL, "ds" /* Intel dts */, "acpi", "mmx", 723 "fxsr", "sse", "sse2", "ss", 724 "ht" /* Intel htt */, "tm", "ia64", "pbe", 725 }, 726 .cpuid = {.eax = 1, .reg = R_EDX, }, 727 .tcg_features = TCG_FEATURES, 728 }, 729 [FEAT_1_ECX] = { 730 .type = CPUID_FEATURE_WORD, 731 .feat_names = { 732 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 733 "ds-cpl", "vmx", "smx", "est", 734 "tm2", "ssse3", "cid", NULL, 735 "fma", "cx16", "xtpr", "pdcm", 736 NULL, "pcid", "dca", "sse4.1", 737 "sse4.2", "x2apic", "movbe", "popcnt", 738 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 739 "avx", "f16c", "rdrand", "hypervisor", 740 }, 741 .cpuid = { .eax = 1, .reg = R_ECX, }, 742 .tcg_features = TCG_EXT_FEATURES, 743 }, 744 /* Feature names that are already defined on feature_name[] but 745 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 746 * names on feat_names below. They are copied automatically 747 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 748 */ 749 [FEAT_8000_0001_EDX] = { 750 .type = CPUID_FEATURE_WORD, 751 .feat_names = { 752 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 753 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 754 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 755 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 756 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 757 "nx", NULL, "mmxext", NULL /* mmx */, 758 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 759 NULL, "lm", "3dnowext", "3dnow", 760 }, 761 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 762 .tcg_features = TCG_EXT2_FEATURES, 763 }, 764 [FEAT_8000_0001_ECX] = { 765 .type = CPUID_FEATURE_WORD, 766 .feat_names = { 767 "lahf-lm", "cmp-legacy", "svm", "extapic", 768 "cr8legacy", "abm", "sse4a", "misalignsse", 769 "3dnowprefetch", "osvw", "ibs", "xop", 770 "skinit", "wdt", NULL, "lwp", 771 "fma4", "tce", NULL, "nodeid-msr", 772 NULL, "tbm", "topoext", "perfctr-core", 773 "perfctr-nb", NULL, NULL, NULL, 774 NULL, NULL, NULL, NULL, 775 }, 776 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 777 .tcg_features = TCG_EXT3_FEATURES, 778 /* 779 * TOPOEXT is always allowed but can't be enabled blindly by 780 * "-cpu host", as it requires consistent cache topology info 781 * to be provided so it doesn't confuse guests. 782 */ 783 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 784 }, 785 [FEAT_C000_0001_EDX] = { 786 .type = CPUID_FEATURE_WORD, 787 .feat_names = { 788 NULL, NULL, "xstore", "xstore-en", 789 NULL, NULL, "xcrypt", "xcrypt-en", 790 "ace2", "ace2-en", "phe", "phe-en", 791 "pmm", "pmm-en", NULL, NULL, 792 NULL, NULL, NULL, NULL, 793 NULL, NULL, NULL, NULL, 794 NULL, NULL, NULL, NULL, 795 NULL, NULL, NULL, NULL, 796 }, 797 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 798 .tcg_features = TCG_EXT4_FEATURES, 799 }, 800 [FEAT_KVM] = { 801 .type = CPUID_FEATURE_WORD, 802 .feat_names = { 803 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 804 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 805 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 806 "kvm-poll-control", "kvm-pv-sched-yield", "kvm-asyncpf-int", "kvm-msi-ext-dest-id", 807 NULL, NULL, NULL, NULL, 808 NULL, NULL, NULL, NULL, 809 "kvmclock-stable-bit", NULL, NULL, NULL, 810 NULL, NULL, NULL, NULL, 811 }, 812 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 813 .tcg_features = TCG_KVM_FEATURES, 814 }, 815 [FEAT_KVM_HINTS] = { 816 .type = CPUID_FEATURE_WORD, 817 .feat_names = { 818 "kvm-hint-dedicated", NULL, NULL, NULL, 819 NULL, NULL, NULL, NULL, 820 NULL, NULL, NULL, NULL, 821 NULL, NULL, NULL, NULL, 822 NULL, NULL, NULL, NULL, 823 NULL, NULL, NULL, NULL, 824 NULL, NULL, NULL, NULL, 825 NULL, NULL, NULL, NULL, 826 }, 827 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 828 .tcg_features = TCG_KVM_FEATURES, 829 /* 830 * KVM hints aren't auto-enabled by -cpu host, they need to be 831 * explicitly enabled in the command-line. 832 */ 833 .no_autoenable_flags = ~0U, 834 }, 835 /* 836 * .feat_names are commented out for Hyper-V enlightenments because we 837 * don't want to have two different ways for enabling them on QEMU command 838 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 839 * enabling several feature bits simultaneously, exposing these bits 840 * individually may just confuse guests. 841 */ 842 [FEAT_HYPERV_EAX] = { 843 .type = CPUID_FEATURE_WORD, 844 .feat_names = { 845 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 846 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 847 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 848 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 849 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 850 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 851 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 852 NULL, NULL, 853 NULL, NULL, NULL, NULL, 854 NULL, NULL, NULL, NULL, 855 NULL, NULL, NULL, NULL, 856 NULL, NULL, NULL, NULL, 857 }, 858 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 859 }, 860 [FEAT_HYPERV_EBX] = { 861 .type = CPUID_FEATURE_WORD, 862 .feat_names = { 863 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 864 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 865 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 866 NULL /* hv_create_port */, NULL /* hv_connect_port */, 867 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 868 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 869 NULL, NULL, 870 NULL, NULL, NULL, NULL, 871 NULL, NULL, NULL, NULL, 872 NULL, NULL, NULL, NULL, 873 NULL, NULL, NULL, NULL, 874 }, 875 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 876 }, 877 [FEAT_HYPERV_EDX] = { 878 .type = CPUID_FEATURE_WORD, 879 .feat_names = { 880 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 881 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 882 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 883 NULL, NULL, 884 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 885 NULL, NULL, NULL, NULL, 886 NULL, NULL, NULL, NULL, 887 NULL, NULL, NULL, NULL, 888 NULL, NULL, NULL, NULL, 889 NULL, NULL, NULL, NULL, 890 }, 891 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 892 }, 893 [FEAT_HV_RECOMM_EAX] = { 894 .type = CPUID_FEATURE_WORD, 895 .feat_names = { 896 NULL /* hv_recommend_pv_as_switch */, 897 NULL /* hv_recommend_pv_tlbflush_local */, 898 NULL /* hv_recommend_pv_tlbflush_remote */, 899 NULL /* hv_recommend_msr_apic_access */, 900 NULL /* hv_recommend_msr_reset */, 901 NULL /* hv_recommend_relaxed_timing */, 902 NULL /* hv_recommend_dma_remapping */, 903 NULL /* hv_recommend_int_remapping */, 904 NULL /* hv_recommend_x2apic_msrs */, 905 NULL /* hv_recommend_autoeoi_deprecation */, 906 NULL /* hv_recommend_pv_ipi */, 907 NULL /* hv_recommend_ex_hypercalls */, 908 NULL /* hv_hypervisor_is_nested */, 909 NULL /* hv_recommend_int_mbec */, 910 NULL /* hv_recommend_evmcs */, 911 NULL, 912 NULL, NULL, NULL, NULL, 913 NULL, NULL, NULL, NULL, 914 NULL, NULL, NULL, NULL, 915 NULL, NULL, NULL, NULL, 916 }, 917 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 918 }, 919 [FEAT_HV_NESTED_EAX] = { 920 .type = CPUID_FEATURE_WORD, 921 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 922 }, 923 [FEAT_SVM] = { 924 .type = CPUID_FEATURE_WORD, 925 .feat_names = { 926 "npt", "lbrv", "svm-lock", "nrip-save", 927 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 928 NULL, NULL, "pause-filter", NULL, 929 "pfthreshold", NULL, NULL, NULL, 930 NULL, NULL, NULL, NULL, 931 NULL, NULL, NULL, NULL, 932 NULL, NULL, NULL, NULL, 933 NULL, NULL, NULL, NULL, 934 }, 935 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 936 .tcg_features = TCG_SVM_FEATURES, 937 }, 938 [FEAT_7_0_EBX] = { 939 .type = CPUID_FEATURE_WORD, 940 .feat_names = { 941 "fsgsbase", "tsc-adjust", NULL, "bmi1", 942 "hle", "avx2", NULL, "smep", 943 "bmi2", "erms", "invpcid", "rtm", 944 NULL, NULL, "mpx", NULL, 945 "avx512f", "avx512dq", "rdseed", "adx", 946 "smap", "avx512ifma", "pcommit", "clflushopt", 947 "clwb", "intel-pt", "avx512pf", "avx512er", 948 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 949 }, 950 .cpuid = { 951 .eax = 7, 952 .needs_ecx = true, .ecx = 0, 953 .reg = R_EBX, 954 }, 955 .tcg_features = TCG_7_0_EBX_FEATURES, 956 }, 957 [FEAT_7_0_ECX] = { 958 .type = CPUID_FEATURE_WORD, 959 .feat_names = { 960 NULL, "avx512vbmi", "umip", "pku", 961 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL, 962 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 963 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 964 "la57", NULL, NULL, NULL, 965 NULL, NULL, "rdpid", NULL, 966 NULL, "cldemote", NULL, "movdiri", 967 "movdir64b", NULL, NULL, NULL, 968 }, 969 .cpuid = { 970 .eax = 7, 971 .needs_ecx = true, .ecx = 0, 972 .reg = R_ECX, 973 }, 974 .tcg_features = TCG_7_0_ECX_FEATURES, 975 }, 976 [FEAT_7_0_EDX] = { 977 .type = CPUID_FEATURE_WORD, 978 .feat_names = { 979 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 980 "fsrm", NULL, NULL, NULL, 981 "avx512-vp2intersect", NULL, "md-clear", NULL, 982 NULL, NULL, "serialize", NULL, 983 "tsx-ldtrk", NULL, NULL /* pconfig */, NULL, 984 NULL, NULL, NULL, "avx512-fp16", 985 NULL, NULL, "spec-ctrl", "stibp", 986 NULL, "arch-capabilities", "core-capability", "ssbd", 987 }, 988 .cpuid = { 989 .eax = 7, 990 .needs_ecx = true, .ecx = 0, 991 .reg = R_EDX, 992 }, 993 .tcg_features = TCG_7_0_EDX_FEATURES, 994 }, 995 [FEAT_7_1_EAX] = { 996 .type = CPUID_FEATURE_WORD, 997 .feat_names = { 998 NULL, NULL, NULL, NULL, 999 NULL, "avx512-bf16", NULL, NULL, 1000 NULL, NULL, NULL, NULL, 1001 NULL, NULL, NULL, NULL, 1002 NULL, NULL, NULL, NULL, 1003 NULL, NULL, NULL, NULL, 1004 NULL, NULL, NULL, NULL, 1005 NULL, NULL, NULL, NULL, 1006 }, 1007 .cpuid = { 1008 .eax = 7, 1009 .needs_ecx = true, .ecx = 1, 1010 .reg = R_EAX, 1011 }, 1012 .tcg_features = TCG_7_1_EAX_FEATURES, 1013 }, 1014 [FEAT_8000_0007_EDX] = { 1015 .type = CPUID_FEATURE_WORD, 1016 .feat_names = { 1017 NULL, NULL, NULL, NULL, 1018 NULL, NULL, NULL, NULL, 1019 "invtsc", NULL, NULL, NULL, 1020 NULL, NULL, NULL, NULL, 1021 NULL, NULL, NULL, NULL, 1022 NULL, NULL, NULL, NULL, 1023 NULL, NULL, NULL, NULL, 1024 NULL, NULL, NULL, NULL, 1025 }, 1026 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1027 .tcg_features = TCG_APM_FEATURES, 1028 .unmigratable_flags = CPUID_APM_INVTSC, 1029 }, 1030 [FEAT_8000_0008_EBX] = { 1031 .type = CPUID_FEATURE_WORD, 1032 .feat_names = { 1033 "clzero", NULL, "xsaveerptr", NULL, 1034 NULL, NULL, NULL, NULL, 1035 NULL, "wbnoinvd", NULL, NULL, 1036 "ibpb", NULL, NULL, "amd-stibp", 1037 NULL, NULL, NULL, NULL, 1038 NULL, NULL, NULL, NULL, 1039 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1040 NULL, NULL, NULL, NULL, 1041 }, 1042 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1043 .tcg_features = 0, 1044 .unmigratable_flags = 0, 1045 }, 1046 [FEAT_XSAVE] = { 1047 .type = CPUID_FEATURE_WORD, 1048 .feat_names = { 1049 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1050 NULL, NULL, NULL, NULL, 1051 NULL, NULL, NULL, NULL, 1052 NULL, NULL, NULL, NULL, 1053 NULL, NULL, NULL, NULL, 1054 NULL, NULL, NULL, NULL, 1055 NULL, NULL, NULL, NULL, 1056 NULL, NULL, NULL, NULL, 1057 }, 1058 .cpuid = { 1059 .eax = 0xd, 1060 .needs_ecx = true, .ecx = 1, 1061 .reg = R_EAX, 1062 }, 1063 .tcg_features = TCG_XSAVE_FEATURES, 1064 }, 1065 [FEAT_6_EAX] = { 1066 .type = CPUID_FEATURE_WORD, 1067 .feat_names = { 1068 NULL, NULL, "arat", NULL, 1069 NULL, NULL, NULL, NULL, 1070 NULL, NULL, NULL, NULL, 1071 NULL, NULL, NULL, NULL, 1072 NULL, NULL, NULL, NULL, 1073 NULL, NULL, NULL, NULL, 1074 NULL, NULL, NULL, NULL, 1075 NULL, NULL, NULL, NULL, 1076 }, 1077 .cpuid = { .eax = 6, .reg = R_EAX, }, 1078 .tcg_features = TCG_6_EAX_FEATURES, 1079 }, 1080 [FEAT_XSAVE_COMP_LO] = { 1081 .type = CPUID_FEATURE_WORD, 1082 .cpuid = { 1083 .eax = 0xD, 1084 .needs_ecx = true, .ecx = 0, 1085 .reg = R_EAX, 1086 }, 1087 .tcg_features = ~0U, 1088 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1089 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1090 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1091 XSTATE_PKRU_MASK, 1092 }, 1093 [FEAT_XSAVE_COMP_HI] = { 1094 .type = CPUID_FEATURE_WORD, 1095 .cpuid = { 1096 .eax = 0xD, 1097 .needs_ecx = true, .ecx = 0, 1098 .reg = R_EDX, 1099 }, 1100 .tcg_features = ~0U, 1101 }, 1102 /*Below are MSR exposed features*/ 1103 [FEAT_ARCH_CAPABILITIES] = { 1104 .type = MSR_FEATURE_WORD, 1105 .feat_names = { 1106 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1107 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", 1108 "taa-no", NULL, NULL, NULL, 1109 NULL, NULL, NULL, NULL, 1110 NULL, NULL, NULL, NULL, 1111 NULL, NULL, NULL, NULL, 1112 NULL, NULL, NULL, NULL, 1113 NULL, NULL, NULL, NULL, 1114 }, 1115 .msr = { 1116 .index = MSR_IA32_ARCH_CAPABILITIES, 1117 }, 1118 }, 1119 [FEAT_CORE_CAPABILITY] = { 1120 .type = MSR_FEATURE_WORD, 1121 .feat_names = { 1122 NULL, NULL, NULL, NULL, 1123 NULL, "split-lock-detect", NULL, NULL, 1124 NULL, NULL, NULL, NULL, 1125 NULL, NULL, NULL, NULL, 1126 NULL, NULL, NULL, NULL, 1127 NULL, NULL, NULL, NULL, 1128 NULL, NULL, NULL, NULL, 1129 NULL, NULL, NULL, NULL, 1130 }, 1131 .msr = { 1132 .index = MSR_IA32_CORE_CAPABILITY, 1133 }, 1134 }, 1135 [FEAT_PERF_CAPABILITIES] = { 1136 .type = MSR_FEATURE_WORD, 1137 .feat_names = { 1138 NULL, NULL, NULL, NULL, 1139 NULL, NULL, NULL, NULL, 1140 NULL, NULL, NULL, NULL, 1141 NULL, "full-width-write", NULL, NULL, 1142 NULL, NULL, NULL, NULL, 1143 NULL, NULL, NULL, NULL, 1144 NULL, NULL, NULL, NULL, 1145 NULL, NULL, NULL, NULL, 1146 }, 1147 .msr = { 1148 .index = MSR_IA32_PERF_CAPABILITIES, 1149 }, 1150 }, 1151 1152 [FEAT_VMX_PROCBASED_CTLS] = { 1153 .type = MSR_FEATURE_WORD, 1154 .feat_names = { 1155 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset", 1156 NULL, NULL, NULL, "vmx-hlt-exit", 1157 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit", 1158 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit", 1159 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit", 1160 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit", 1161 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf", 1162 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls", 1163 }, 1164 .msr = { 1165 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1166 } 1167 }, 1168 1169 [FEAT_VMX_SECONDARY_CTLS] = { 1170 .type = MSR_FEATURE_WORD, 1171 .feat_names = { 1172 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit", 1173 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest", 1174 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit", 1175 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit", 1176 "vmx-rdseed-exit", "vmx-pml", NULL, NULL, 1177 "vmx-xsaves", NULL, NULL, NULL, 1178 NULL, NULL, NULL, NULL, 1179 NULL, NULL, NULL, NULL, 1180 }, 1181 .msr = { 1182 .index = MSR_IA32_VMX_PROCBASED_CTLS2, 1183 } 1184 }, 1185 1186 [FEAT_VMX_PINBASED_CTLS] = { 1187 .type = MSR_FEATURE_WORD, 1188 .feat_names = { 1189 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit", 1190 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr", 1191 NULL, NULL, NULL, NULL, 1192 NULL, NULL, NULL, NULL, 1193 NULL, NULL, NULL, NULL, 1194 NULL, NULL, NULL, NULL, 1195 NULL, NULL, NULL, NULL, 1196 NULL, NULL, NULL, NULL, 1197 }, 1198 .msr = { 1199 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1200 } 1201 }, 1202 1203 [FEAT_VMX_EXIT_CTLS] = { 1204 .type = MSR_FEATURE_WORD, 1205 /* 1206 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from 1207 * the LM CPUID bit. 1208 */ 1209 .feat_names = { 1210 NULL, NULL, "vmx-exit-nosave-debugctl", NULL, 1211 NULL, NULL, NULL, NULL, 1212 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL, 1213 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr", 1214 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat", 1215 "vmx-exit-save-efer", "vmx-exit-load-efer", 1216 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", 1217 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, 1218 NULL, NULL, NULL, NULL, 1219 }, 1220 .msr = { 1221 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, 1222 } 1223 }, 1224 1225 [FEAT_VMX_ENTRY_CTLS] = { 1226 .type = MSR_FEATURE_WORD, 1227 .feat_names = { 1228 NULL, NULL, "vmx-entry-noload-debugctl", NULL, 1229 NULL, NULL, NULL, NULL, 1230 NULL, "vmx-entry-ia32e-mode", NULL, NULL, 1231 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", 1232 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, 1233 NULL, NULL, NULL, NULL, 1234 NULL, NULL, NULL, NULL, 1235 NULL, NULL, NULL, NULL, 1236 }, 1237 .msr = { 1238 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1239 } 1240 }, 1241 1242 [FEAT_VMX_MISC] = { 1243 .type = MSR_FEATURE_WORD, 1244 .feat_names = { 1245 NULL, NULL, NULL, NULL, 1246 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown", 1247 "vmx-activity-wait-sipi", NULL, NULL, NULL, 1248 NULL, NULL, NULL, NULL, 1249 NULL, NULL, NULL, NULL, 1250 NULL, NULL, NULL, NULL, 1251 NULL, NULL, NULL, NULL, 1252 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL, 1253 }, 1254 .msr = { 1255 .index = MSR_IA32_VMX_MISC, 1256 } 1257 }, 1258 1259 [FEAT_VMX_EPT_VPID_CAPS] = { 1260 .type = MSR_FEATURE_WORD, 1261 .feat_names = { 1262 "vmx-ept-execonly", NULL, NULL, NULL, 1263 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5", 1264 NULL, NULL, NULL, NULL, 1265 NULL, NULL, NULL, NULL, 1266 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL, 1267 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL, 1268 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL, 1269 NULL, NULL, NULL, NULL, 1270 "vmx-invvpid", NULL, NULL, NULL, 1271 NULL, NULL, NULL, NULL, 1272 "vmx-invvpid-single-addr", "vmx-invept-single-context", 1273 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals", 1274 NULL, NULL, NULL, NULL, 1275 NULL, NULL, NULL, NULL, 1276 NULL, NULL, NULL, NULL, 1277 NULL, NULL, NULL, NULL, 1278 NULL, NULL, NULL, NULL, 1279 }, 1280 .msr = { 1281 .index = MSR_IA32_VMX_EPT_VPID_CAP, 1282 } 1283 }, 1284 1285 [FEAT_VMX_BASIC] = { 1286 .type = MSR_FEATURE_WORD, 1287 .feat_names = { 1288 [54] = "vmx-ins-outs", 1289 [55] = "vmx-true-ctls", 1290 }, 1291 .msr = { 1292 .index = MSR_IA32_VMX_BASIC, 1293 }, 1294 /* Just to be safe - we don't support setting the MSEG version field. */ 1295 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR, 1296 }, 1297 1298 [FEAT_VMX_VMFUNC] = { 1299 .type = MSR_FEATURE_WORD, 1300 .feat_names = { 1301 [0] = "vmx-eptp-switching", 1302 }, 1303 .msr = { 1304 .index = MSR_IA32_VMX_VMFUNC, 1305 } 1306 }, 1307 1308 [FEAT_14_0_ECX] = { 1309 .type = CPUID_FEATURE_WORD, 1310 .feat_names = { 1311 NULL, NULL, NULL, NULL, 1312 NULL, NULL, NULL, NULL, 1313 NULL, NULL, NULL, NULL, 1314 NULL, NULL, NULL, NULL, 1315 NULL, NULL, NULL, NULL, 1316 NULL, NULL, NULL, NULL, 1317 NULL, NULL, NULL, NULL, 1318 NULL, NULL, NULL, "intel-pt-lip", 1319 }, 1320 .cpuid = { 1321 .eax = 0x14, 1322 .needs_ecx = true, .ecx = 0, 1323 .reg = R_ECX, 1324 }, 1325 .tcg_features = TCG_14_0_ECX_FEATURES, 1326 }, 1327 1328 }; 1329 1330 typedef struct FeatureMask { 1331 FeatureWord index; 1332 uint64_t mask; 1333 } FeatureMask; 1334 1335 typedef struct FeatureDep { 1336 FeatureMask from, to; 1337 } FeatureDep; 1338 1339 static FeatureDep feature_dependencies[] = { 1340 { 1341 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES }, 1342 .to = { FEAT_ARCH_CAPABILITIES, ~0ull }, 1343 }, 1344 { 1345 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY }, 1346 .to = { FEAT_CORE_CAPABILITY, ~0ull }, 1347 }, 1348 { 1349 .from = { FEAT_1_ECX, CPUID_EXT_PDCM }, 1350 .to = { FEAT_PERF_CAPABILITIES, ~0ull }, 1351 }, 1352 { 1353 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1354 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull }, 1355 }, 1356 { 1357 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1358 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull }, 1359 }, 1360 { 1361 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1362 .to = { FEAT_VMX_EXIT_CTLS, ~0ull }, 1363 }, 1364 { 1365 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1366 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull }, 1367 }, 1368 { 1369 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1370 .to = { FEAT_VMX_MISC, ~0ull }, 1371 }, 1372 { 1373 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1374 .to = { FEAT_VMX_BASIC, ~0ull }, 1375 }, 1376 { 1377 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM }, 1378 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE }, 1379 }, 1380 { 1381 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS }, 1382 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull }, 1383 }, 1384 { 1385 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES }, 1386 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES }, 1387 }, 1388 { 1389 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND }, 1390 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING }, 1391 }, 1392 { 1393 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID }, 1394 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID }, 1395 }, 1396 { 1397 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED }, 1398 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING }, 1399 }, 1400 { 1401 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT }, 1402 .to = { FEAT_14_0_ECX, ~0ull }, 1403 }, 1404 { 1405 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP }, 1406 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP }, 1407 }, 1408 { 1409 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1410 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull }, 1411 }, 1412 { 1413 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1414 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST }, 1415 }, 1416 { 1417 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID }, 1418 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 }, 1419 }, 1420 { 1421 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC }, 1422 .to = { FEAT_VMX_VMFUNC, ~0ull }, 1423 }, 1424 { 1425 .from = { FEAT_8000_0001_ECX, CPUID_EXT3_SVM }, 1426 .to = { FEAT_SVM, ~0ull }, 1427 }, 1428 }; 1429 1430 typedef struct X86RegisterInfo32 { 1431 /* Name of register */ 1432 const char *name; 1433 /* QAPI enum value register */ 1434 X86CPURegister32 qapi_enum; 1435 } X86RegisterInfo32; 1436 1437 #define REGISTER(reg) \ 1438 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1439 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1440 REGISTER(EAX), 1441 REGISTER(ECX), 1442 REGISTER(EDX), 1443 REGISTER(EBX), 1444 REGISTER(ESP), 1445 REGISTER(EBP), 1446 REGISTER(ESI), 1447 REGISTER(EDI), 1448 }; 1449 #undef REGISTER 1450 1451 typedef struct ExtSaveArea { 1452 uint32_t feature, bits; 1453 uint32_t offset, size; 1454 } ExtSaveArea; 1455 1456 static const ExtSaveArea x86_ext_save_areas[] = { 1457 [XSTATE_FP_BIT] = { 1458 /* x87 FP state component is always enabled if XSAVE is supported */ 1459 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1460 /* x87 state is in the legacy region of the XSAVE area */ 1461 .offset = 0, 1462 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1463 }, 1464 [XSTATE_SSE_BIT] = { 1465 /* SSE state component is always enabled if XSAVE is supported */ 1466 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1467 /* SSE state is in the legacy region of the XSAVE area */ 1468 .offset = 0, 1469 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1470 }, 1471 [XSTATE_YMM_BIT] = 1472 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1473 .offset = offsetof(X86XSaveArea, avx_state), 1474 .size = sizeof(XSaveAVX) }, 1475 [XSTATE_BNDREGS_BIT] = 1476 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1477 .offset = offsetof(X86XSaveArea, bndreg_state), 1478 .size = sizeof(XSaveBNDREG) }, 1479 [XSTATE_BNDCSR_BIT] = 1480 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1481 .offset = offsetof(X86XSaveArea, bndcsr_state), 1482 .size = sizeof(XSaveBNDCSR) }, 1483 [XSTATE_OPMASK_BIT] = 1484 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1485 .offset = offsetof(X86XSaveArea, opmask_state), 1486 .size = sizeof(XSaveOpmask) }, 1487 [XSTATE_ZMM_Hi256_BIT] = 1488 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1489 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1490 .size = sizeof(XSaveZMM_Hi256) }, 1491 [XSTATE_Hi16_ZMM_BIT] = 1492 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1493 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1494 .size = sizeof(XSaveHi16_ZMM) }, 1495 [XSTATE_PKRU_BIT] = 1496 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1497 .offset = offsetof(X86XSaveArea, pkru_state), 1498 .size = sizeof(XSavePKRU) }, 1499 }; 1500 1501 static uint32_t xsave_area_size(uint64_t mask) 1502 { 1503 int i; 1504 uint64_t ret = 0; 1505 1506 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1507 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1508 if ((mask >> i) & 1) { 1509 ret = MAX(ret, esa->offset + esa->size); 1510 } 1511 } 1512 return ret; 1513 } 1514 1515 static inline bool accel_uses_host_cpuid(void) 1516 { 1517 return kvm_enabled() || hvf_enabled(); 1518 } 1519 1520 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1521 { 1522 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1523 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1524 } 1525 1526 /* Return name of 32-bit register, from a R_* constant */ 1527 static const char *get_register_name_32(unsigned int reg) 1528 { 1529 if (reg >= CPU_NB_REGS32) { 1530 return NULL; 1531 } 1532 return x86_reg_info_32[reg].name; 1533 } 1534 1535 /* 1536 * Returns the set of feature flags that are supported and migratable by 1537 * QEMU, for a given FeatureWord. 1538 */ 1539 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) 1540 { 1541 FeatureWordInfo *wi = &feature_word_info[w]; 1542 uint64_t r = 0; 1543 int i; 1544 1545 for (i = 0; i < 64; i++) { 1546 uint64_t f = 1ULL << i; 1547 1548 /* If the feature name is known, it is implicitly considered migratable, 1549 * unless it is explicitly set in unmigratable_flags */ 1550 if ((wi->migratable_flags & f) || 1551 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1552 r |= f; 1553 } 1554 } 1555 return r; 1556 } 1557 1558 void host_cpuid(uint32_t function, uint32_t count, 1559 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1560 { 1561 uint32_t vec[4]; 1562 1563 #ifdef __x86_64__ 1564 asm volatile("cpuid" 1565 : "=a"(vec[0]), "=b"(vec[1]), 1566 "=c"(vec[2]), "=d"(vec[3]) 1567 : "0"(function), "c"(count) : "cc"); 1568 #elif defined(__i386__) 1569 asm volatile("pusha \n\t" 1570 "cpuid \n\t" 1571 "mov %%eax, 0(%2) \n\t" 1572 "mov %%ebx, 4(%2) \n\t" 1573 "mov %%ecx, 8(%2) \n\t" 1574 "mov %%edx, 12(%2) \n\t" 1575 "popa" 1576 : : "a"(function), "c"(count), "S"(vec) 1577 : "memory", "cc"); 1578 #else 1579 abort(); 1580 #endif 1581 1582 if (eax) 1583 *eax = vec[0]; 1584 if (ebx) 1585 *ebx = vec[1]; 1586 if (ecx) 1587 *ecx = vec[2]; 1588 if (edx) 1589 *edx = vec[3]; 1590 } 1591 1592 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1593 { 1594 uint32_t eax, ebx, ecx, edx; 1595 1596 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1597 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1598 1599 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1600 if (family) { 1601 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1602 } 1603 if (model) { 1604 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1605 } 1606 if (stepping) { 1607 *stepping = eax & 0x0F; 1608 } 1609 } 1610 1611 /* CPU class name definitions: */ 1612 1613 /* Return type name for a given CPU model name 1614 * Caller is responsible for freeing the returned string. 1615 */ 1616 static char *x86_cpu_type_name(const char *model_name) 1617 { 1618 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1619 } 1620 1621 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1622 { 1623 g_autofree char *typename = x86_cpu_type_name(cpu_model); 1624 return object_class_by_name(typename); 1625 } 1626 1627 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1628 { 1629 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1630 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1631 return g_strndup(class_name, 1632 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1633 } 1634 1635 typedef struct PropValue { 1636 const char *prop, *value; 1637 } PropValue; 1638 1639 typedef struct X86CPUVersionDefinition { 1640 X86CPUVersion version; 1641 const char *alias; 1642 const char *note; 1643 PropValue *props; 1644 } X86CPUVersionDefinition; 1645 1646 /* Base definition for a CPU model */ 1647 typedef struct X86CPUDefinition { 1648 const char *name; 1649 uint32_t level; 1650 uint32_t xlevel; 1651 /* vendor is zero-terminated, 12 character ASCII string */ 1652 char vendor[CPUID_VENDOR_SZ + 1]; 1653 int family; 1654 int model; 1655 int stepping; 1656 FeatureWordArray features; 1657 const char *model_id; 1658 CPUCaches *cache_info; 1659 /* 1660 * Definitions for alternative versions of CPU model. 1661 * List is terminated by item with version == 0. 1662 * If NULL, version 1 will be registered automatically. 1663 */ 1664 const X86CPUVersionDefinition *versions; 1665 const char *deprecation_note; 1666 } X86CPUDefinition; 1667 1668 /* Reference to a specific CPU model version */ 1669 struct X86CPUModel { 1670 /* Base CPU definition */ 1671 X86CPUDefinition *cpudef; 1672 /* CPU model version */ 1673 X86CPUVersion version; 1674 const char *note; 1675 /* 1676 * If true, this is an alias CPU model. 1677 * This matters only for "-cpu help" and query-cpu-definitions 1678 */ 1679 bool is_alias; 1680 }; 1681 1682 /* Get full model name for CPU version */ 1683 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, 1684 X86CPUVersion version) 1685 { 1686 assert(version > 0); 1687 return g_strdup_printf("%s-v%d", cpudef->name, (int)version); 1688 } 1689 1690 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) 1691 { 1692 /* When X86CPUDefinition::versions is NULL, we register only v1 */ 1693 static const X86CPUVersionDefinition default_version_list[] = { 1694 { 1 }, 1695 { /* end of list */ } 1696 }; 1697 1698 return def->versions ?: default_version_list; 1699 } 1700 1701 static CPUCaches epyc_cache_info = { 1702 .l1d_cache = &(CPUCacheInfo) { 1703 .type = DATA_CACHE, 1704 .level = 1, 1705 .size = 32 * KiB, 1706 .line_size = 64, 1707 .associativity = 8, 1708 .partitions = 1, 1709 .sets = 64, 1710 .lines_per_tag = 1, 1711 .self_init = 1, 1712 .no_invd_sharing = true, 1713 }, 1714 .l1i_cache = &(CPUCacheInfo) { 1715 .type = INSTRUCTION_CACHE, 1716 .level = 1, 1717 .size = 64 * KiB, 1718 .line_size = 64, 1719 .associativity = 4, 1720 .partitions = 1, 1721 .sets = 256, 1722 .lines_per_tag = 1, 1723 .self_init = 1, 1724 .no_invd_sharing = true, 1725 }, 1726 .l2_cache = &(CPUCacheInfo) { 1727 .type = UNIFIED_CACHE, 1728 .level = 2, 1729 .size = 512 * KiB, 1730 .line_size = 64, 1731 .associativity = 8, 1732 .partitions = 1, 1733 .sets = 1024, 1734 .lines_per_tag = 1, 1735 }, 1736 .l3_cache = &(CPUCacheInfo) { 1737 .type = UNIFIED_CACHE, 1738 .level = 3, 1739 .size = 8 * MiB, 1740 .line_size = 64, 1741 .associativity = 16, 1742 .partitions = 1, 1743 .sets = 8192, 1744 .lines_per_tag = 1, 1745 .self_init = true, 1746 .inclusive = true, 1747 .complex_indexing = true, 1748 }, 1749 }; 1750 1751 static CPUCaches epyc_rome_cache_info = { 1752 .l1d_cache = &(CPUCacheInfo) { 1753 .type = DATA_CACHE, 1754 .level = 1, 1755 .size = 32 * KiB, 1756 .line_size = 64, 1757 .associativity = 8, 1758 .partitions = 1, 1759 .sets = 64, 1760 .lines_per_tag = 1, 1761 .self_init = 1, 1762 .no_invd_sharing = true, 1763 }, 1764 .l1i_cache = &(CPUCacheInfo) { 1765 .type = INSTRUCTION_CACHE, 1766 .level = 1, 1767 .size = 32 * KiB, 1768 .line_size = 64, 1769 .associativity = 8, 1770 .partitions = 1, 1771 .sets = 64, 1772 .lines_per_tag = 1, 1773 .self_init = 1, 1774 .no_invd_sharing = true, 1775 }, 1776 .l2_cache = &(CPUCacheInfo) { 1777 .type = UNIFIED_CACHE, 1778 .level = 2, 1779 .size = 512 * KiB, 1780 .line_size = 64, 1781 .associativity = 8, 1782 .partitions = 1, 1783 .sets = 1024, 1784 .lines_per_tag = 1, 1785 }, 1786 .l3_cache = &(CPUCacheInfo) { 1787 .type = UNIFIED_CACHE, 1788 .level = 3, 1789 .size = 16 * MiB, 1790 .line_size = 64, 1791 .associativity = 16, 1792 .partitions = 1, 1793 .sets = 16384, 1794 .lines_per_tag = 1, 1795 .self_init = true, 1796 .inclusive = true, 1797 .complex_indexing = true, 1798 }, 1799 }; 1800 1801 /* The following VMX features are not supported by KVM and are left out in the 1802 * CPU definitions: 1803 * 1804 * Dual-monitor support (all processors) 1805 * Entry to SMM 1806 * Deactivate dual-monitor treatment 1807 * Number of CR3-target values 1808 * Shutdown activity state 1809 * Wait-for-SIPI activity state 1810 * PAUSE-loop exiting (Westmere and newer) 1811 * EPT-violation #VE (Broadwell and newer) 1812 * Inject event with insn length=0 (Skylake and newer) 1813 * Conceal non-root operation from PT 1814 * Conceal VM exits from PT 1815 * Conceal VM entries from PT 1816 * Enable ENCLS exiting 1817 * Mode-based execute control (XS/XU) 1818 s TSC scaling (Skylake Server and newer) 1819 * GPA translation for PT (IceLake and newer) 1820 * User wait and pause 1821 * ENCLV exiting 1822 * Load IA32_RTIT_CTL 1823 * Clear IA32_RTIT_CTL 1824 * Advanced VM-exit information for EPT violations 1825 * Sub-page write permissions 1826 * PT in VMX operation 1827 */ 1828 1829 static X86CPUDefinition builtin_x86_defs[] = { 1830 { 1831 .name = "qemu64", 1832 .level = 0xd, 1833 .vendor = CPUID_VENDOR_AMD, 1834 .family = 6, 1835 .model = 6, 1836 .stepping = 3, 1837 .features[FEAT_1_EDX] = 1838 PPRO_FEATURES | 1839 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1840 CPUID_PSE36, 1841 .features[FEAT_1_ECX] = 1842 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1843 .features[FEAT_8000_0001_EDX] = 1844 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1845 .features[FEAT_8000_0001_ECX] = 1846 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1847 .xlevel = 0x8000000A, 1848 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1849 }, 1850 { 1851 .name = "phenom", 1852 .level = 5, 1853 .vendor = CPUID_VENDOR_AMD, 1854 .family = 16, 1855 .model = 2, 1856 .stepping = 3, 1857 /* Missing: CPUID_HT */ 1858 .features[FEAT_1_EDX] = 1859 PPRO_FEATURES | 1860 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1861 CPUID_PSE36 | CPUID_VME, 1862 .features[FEAT_1_ECX] = 1863 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1864 CPUID_EXT_POPCNT, 1865 .features[FEAT_8000_0001_EDX] = 1866 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1867 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1868 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1869 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1870 CPUID_EXT3_CR8LEG, 1871 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1872 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1873 .features[FEAT_8000_0001_ECX] = 1874 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1875 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1876 /* Missing: CPUID_SVM_LBRV */ 1877 .features[FEAT_SVM] = 1878 CPUID_SVM_NPT, 1879 .xlevel = 0x8000001A, 1880 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1881 }, 1882 { 1883 .name = "core2duo", 1884 .level = 10, 1885 .vendor = CPUID_VENDOR_INTEL, 1886 .family = 6, 1887 .model = 15, 1888 .stepping = 11, 1889 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1890 .features[FEAT_1_EDX] = 1891 PPRO_FEATURES | 1892 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1893 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1894 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1895 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1896 .features[FEAT_1_ECX] = 1897 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1898 CPUID_EXT_CX16, 1899 .features[FEAT_8000_0001_EDX] = 1900 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1901 .features[FEAT_8000_0001_ECX] = 1902 CPUID_EXT3_LAHF_LM, 1903 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 1904 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1905 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1906 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1907 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1908 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 1909 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1910 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1911 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1912 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1913 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1914 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1915 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1916 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 1917 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 1918 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 1919 .features[FEAT_VMX_SECONDARY_CTLS] = 1920 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 1921 .xlevel = 0x80000008, 1922 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1923 }, 1924 { 1925 .name = "kvm64", 1926 .level = 0xd, 1927 .vendor = CPUID_VENDOR_INTEL, 1928 .family = 15, 1929 .model = 6, 1930 .stepping = 1, 1931 /* Missing: CPUID_HT */ 1932 .features[FEAT_1_EDX] = 1933 PPRO_FEATURES | CPUID_VME | 1934 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1935 CPUID_PSE36, 1936 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1937 .features[FEAT_1_ECX] = 1938 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1939 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1940 .features[FEAT_8000_0001_EDX] = 1941 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1942 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1943 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1944 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1945 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1946 .features[FEAT_8000_0001_ECX] = 1947 0, 1948 /* VMX features from Cedar Mill/Prescott */ 1949 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1950 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1951 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1952 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1953 VMX_PIN_BASED_NMI_EXITING, 1954 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1955 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1956 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1957 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1958 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1959 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1960 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1961 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING, 1962 .xlevel = 0x80000008, 1963 .model_id = "Common KVM processor" 1964 }, 1965 { 1966 .name = "qemu32", 1967 .level = 4, 1968 .vendor = CPUID_VENDOR_INTEL, 1969 .family = 6, 1970 .model = 6, 1971 .stepping = 3, 1972 .features[FEAT_1_EDX] = 1973 PPRO_FEATURES, 1974 .features[FEAT_1_ECX] = 1975 CPUID_EXT_SSE3, 1976 .xlevel = 0x80000004, 1977 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1978 }, 1979 { 1980 .name = "kvm32", 1981 .level = 5, 1982 .vendor = CPUID_VENDOR_INTEL, 1983 .family = 15, 1984 .model = 6, 1985 .stepping = 1, 1986 .features[FEAT_1_EDX] = 1987 PPRO_FEATURES | CPUID_VME | 1988 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1989 .features[FEAT_1_ECX] = 1990 CPUID_EXT_SSE3, 1991 .features[FEAT_8000_0001_ECX] = 1992 0, 1993 /* VMX features from Yonah */ 1994 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1995 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1996 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1997 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1998 VMX_PIN_BASED_NMI_EXITING, 1999 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2000 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2001 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2002 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2003 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2004 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2005 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2006 .xlevel = 0x80000008, 2007 .model_id = "Common 32-bit KVM processor" 2008 }, 2009 { 2010 .name = "coreduo", 2011 .level = 10, 2012 .vendor = CPUID_VENDOR_INTEL, 2013 .family = 6, 2014 .model = 14, 2015 .stepping = 8, 2016 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2017 .features[FEAT_1_EDX] = 2018 PPRO_FEATURES | CPUID_VME | 2019 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 2020 CPUID_SS, 2021 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 2022 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 2023 .features[FEAT_1_ECX] = 2024 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 2025 .features[FEAT_8000_0001_EDX] = 2026 CPUID_EXT2_NX, 2027 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2028 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2029 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2030 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2031 VMX_PIN_BASED_NMI_EXITING, 2032 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2033 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2034 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2035 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2036 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2037 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2038 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2039 .xlevel = 0x80000008, 2040 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 2041 }, 2042 { 2043 .name = "486", 2044 .level = 1, 2045 .vendor = CPUID_VENDOR_INTEL, 2046 .family = 4, 2047 .model = 8, 2048 .stepping = 0, 2049 .features[FEAT_1_EDX] = 2050 I486_FEATURES, 2051 .xlevel = 0, 2052 .model_id = "", 2053 }, 2054 { 2055 .name = "pentium", 2056 .level = 1, 2057 .vendor = CPUID_VENDOR_INTEL, 2058 .family = 5, 2059 .model = 4, 2060 .stepping = 3, 2061 .features[FEAT_1_EDX] = 2062 PENTIUM_FEATURES, 2063 .xlevel = 0, 2064 .model_id = "", 2065 }, 2066 { 2067 .name = "pentium2", 2068 .level = 2, 2069 .vendor = CPUID_VENDOR_INTEL, 2070 .family = 6, 2071 .model = 5, 2072 .stepping = 2, 2073 .features[FEAT_1_EDX] = 2074 PENTIUM2_FEATURES, 2075 .xlevel = 0, 2076 .model_id = "", 2077 }, 2078 { 2079 .name = "pentium3", 2080 .level = 3, 2081 .vendor = CPUID_VENDOR_INTEL, 2082 .family = 6, 2083 .model = 7, 2084 .stepping = 3, 2085 .features[FEAT_1_EDX] = 2086 PENTIUM3_FEATURES, 2087 .xlevel = 0, 2088 .model_id = "", 2089 }, 2090 { 2091 .name = "athlon", 2092 .level = 2, 2093 .vendor = CPUID_VENDOR_AMD, 2094 .family = 6, 2095 .model = 2, 2096 .stepping = 3, 2097 .features[FEAT_1_EDX] = 2098 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 2099 CPUID_MCA, 2100 .features[FEAT_8000_0001_EDX] = 2101 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 2102 .xlevel = 0x80000008, 2103 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 2104 }, 2105 { 2106 .name = "n270", 2107 .level = 10, 2108 .vendor = CPUID_VENDOR_INTEL, 2109 .family = 6, 2110 .model = 28, 2111 .stepping = 2, 2112 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2113 .features[FEAT_1_EDX] = 2114 PPRO_FEATURES | 2115 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 2116 CPUID_ACPI | CPUID_SS, 2117 /* Some CPUs got no CPUID_SEP */ 2118 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 2119 * CPUID_EXT_XTPR */ 2120 .features[FEAT_1_ECX] = 2121 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 2122 CPUID_EXT_MOVBE, 2123 .features[FEAT_8000_0001_EDX] = 2124 CPUID_EXT2_NX, 2125 .features[FEAT_8000_0001_ECX] = 2126 CPUID_EXT3_LAHF_LM, 2127 .xlevel = 0x80000008, 2128 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 2129 }, 2130 { 2131 .name = "Conroe", 2132 .level = 10, 2133 .vendor = CPUID_VENDOR_INTEL, 2134 .family = 6, 2135 .model = 15, 2136 .stepping = 3, 2137 .features[FEAT_1_EDX] = 2138 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2139 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2140 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2141 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2142 CPUID_DE | CPUID_FP87, 2143 .features[FEAT_1_ECX] = 2144 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2145 .features[FEAT_8000_0001_EDX] = 2146 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2147 .features[FEAT_8000_0001_ECX] = 2148 CPUID_EXT3_LAHF_LM, 2149 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2150 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2151 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2152 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2153 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2154 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2155 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2156 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2157 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2158 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2159 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2160 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2161 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2162 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2163 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2164 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2165 .features[FEAT_VMX_SECONDARY_CTLS] = 2166 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 2167 .xlevel = 0x80000008, 2168 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 2169 }, 2170 { 2171 .name = "Penryn", 2172 .level = 10, 2173 .vendor = CPUID_VENDOR_INTEL, 2174 .family = 6, 2175 .model = 23, 2176 .stepping = 3, 2177 .features[FEAT_1_EDX] = 2178 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2179 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2180 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2181 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2182 CPUID_DE | CPUID_FP87, 2183 .features[FEAT_1_ECX] = 2184 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2185 CPUID_EXT_SSE3, 2186 .features[FEAT_8000_0001_EDX] = 2187 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2188 .features[FEAT_8000_0001_ECX] = 2189 CPUID_EXT3_LAHF_LM, 2190 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2191 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2192 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 2193 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | 2194 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 2195 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2196 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2197 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2198 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2199 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2200 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2201 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2202 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2203 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2204 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2205 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2206 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2207 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2208 .features[FEAT_VMX_SECONDARY_CTLS] = 2209 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2210 VMX_SECONDARY_EXEC_WBINVD_EXITING, 2211 .xlevel = 0x80000008, 2212 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 2213 }, 2214 { 2215 .name = "Nehalem", 2216 .level = 11, 2217 .vendor = CPUID_VENDOR_INTEL, 2218 .family = 6, 2219 .model = 26, 2220 .stepping = 3, 2221 .features[FEAT_1_EDX] = 2222 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2223 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2224 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2225 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2226 CPUID_DE | CPUID_FP87, 2227 .features[FEAT_1_ECX] = 2228 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2229 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2230 .features[FEAT_8000_0001_EDX] = 2231 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2232 .features[FEAT_8000_0001_ECX] = 2233 CPUID_EXT3_LAHF_LM, 2234 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2235 MSR_VMX_BASIC_TRUE_CTLS, 2236 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2237 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2238 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2239 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2240 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2241 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2242 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2243 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2244 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2245 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2246 .features[FEAT_VMX_EXIT_CTLS] = 2247 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2248 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2249 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2250 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2251 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2252 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2253 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2254 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2255 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2256 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2257 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2258 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2259 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2260 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2261 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2262 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2263 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2264 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2265 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2266 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2267 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2268 .features[FEAT_VMX_SECONDARY_CTLS] = 2269 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2270 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2271 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2272 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2273 VMX_SECONDARY_EXEC_ENABLE_VPID, 2274 .xlevel = 0x80000008, 2275 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 2276 .versions = (X86CPUVersionDefinition[]) { 2277 { .version = 1 }, 2278 { 2279 .version = 2, 2280 .alias = "Nehalem-IBRS", 2281 .props = (PropValue[]) { 2282 { "spec-ctrl", "on" }, 2283 { "model-id", 2284 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, 2285 { /* end of list */ } 2286 } 2287 }, 2288 { /* end of list */ } 2289 } 2290 }, 2291 { 2292 .name = "Westmere", 2293 .level = 11, 2294 .vendor = CPUID_VENDOR_INTEL, 2295 .family = 6, 2296 .model = 44, 2297 .stepping = 1, 2298 .features[FEAT_1_EDX] = 2299 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2300 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2301 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2302 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2303 CPUID_DE | CPUID_FP87, 2304 .features[FEAT_1_ECX] = 2305 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2306 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2307 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2308 .features[FEAT_8000_0001_EDX] = 2309 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2310 .features[FEAT_8000_0001_ECX] = 2311 CPUID_EXT3_LAHF_LM, 2312 .features[FEAT_6_EAX] = 2313 CPUID_6_EAX_ARAT, 2314 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2315 MSR_VMX_BASIC_TRUE_CTLS, 2316 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2317 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2318 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2319 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2320 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2321 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2322 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2323 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2324 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2325 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2326 .features[FEAT_VMX_EXIT_CTLS] = 2327 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2328 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2329 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2330 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2331 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2332 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2333 MSR_VMX_MISC_STORE_LMA, 2334 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2335 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2336 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2337 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2338 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2339 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2340 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2341 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2342 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2343 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2344 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2345 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2346 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2347 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2348 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2349 .features[FEAT_VMX_SECONDARY_CTLS] = 2350 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2351 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2352 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2353 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2354 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2355 .xlevel = 0x80000008, 2356 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 2357 .versions = (X86CPUVersionDefinition[]) { 2358 { .version = 1 }, 2359 { 2360 .version = 2, 2361 .alias = "Westmere-IBRS", 2362 .props = (PropValue[]) { 2363 { "spec-ctrl", "on" }, 2364 { "model-id", 2365 "Westmere E56xx/L56xx/X56xx (IBRS update)" }, 2366 { /* end of list */ } 2367 } 2368 }, 2369 { /* end of list */ } 2370 } 2371 }, 2372 { 2373 .name = "SandyBridge", 2374 .level = 0xd, 2375 .vendor = CPUID_VENDOR_INTEL, 2376 .family = 6, 2377 .model = 42, 2378 .stepping = 1, 2379 .features[FEAT_1_EDX] = 2380 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2381 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2382 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2383 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2384 CPUID_DE | CPUID_FP87, 2385 .features[FEAT_1_ECX] = 2386 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2387 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2388 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2389 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2390 CPUID_EXT_SSE3, 2391 .features[FEAT_8000_0001_EDX] = 2392 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2393 CPUID_EXT2_SYSCALL, 2394 .features[FEAT_8000_0001_ECX] = 2395 CPUID_EXT3_LAHF_LM, 2396 .features[FEAT_XSAVE] = 2397 CPUID_XSAVE_XSAVEOPT, 2398 .features[FEAT_6_EAX] = 2399 CPUID_6_EAX_ARAT, 2400 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2401 MSR_VMX_BASIC_TRUE_CTLS, 2402 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2403 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2404 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2405 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2406 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2407 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2408 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2409 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2410 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2411 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2412 .features[FEAT_VMX_EXIT_CTLS] = 2413 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2414 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2415 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2416 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2417 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2418 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2419 MSR_VMX_MISC_STORE_LMA, 2420 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2421 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2422 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2423 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2424 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2425 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2426 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2427 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2428 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2429 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2430 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2431 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2432 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2433 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2434 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2435 .features[FEAT_VMX_SECONDARY_CTLS] = 2436 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2437 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2438 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2439 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2440 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2441 .xlevel = 0x80000008, 2442 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 2443 .versions = (X86CPUVersionDefinition[]) { 2444 { .version = 1 }, 2445 { 2446 .version = 2, 2447 .alias = "SandyBridge-IBRS", 2448 .props = (PropValue[]) { 2449 { "spec-ctrl", "on" }, 2450 { "model-id", 2451 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, 2452 { /* end of list */ } 2453 } 2454 }, 2455 { /* end of list */ } 2456 } 2457 }, 2458 { 2459 .name = "IvyBridge", 2460 .level = 0xd, 2461 .vendor = CPUID_VENDOR_INTEL, 2462 .family = 6, 2463 .model = 58, 2464 .stepping = 9, 2465 .features[FEAT_1_EDX] = 2466 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2467 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2468 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2469 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2470 CPUID_DE | CPUID_FP87, 2471 .features[FEAT_1_ECX] = 2472 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2473 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2474 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2475 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2476 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2477 .features[FEAT_7_0_EBX] = 2478 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 2479 CPUID_7_0_EBX_ERMS, 2480 .features[FEAT_8000_0001_EDX] = 2481 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2482 CPUID_EXT2_SYSCALL, 2483 .features[FEAT_8000_0001_ECX] = 2484 CPUID_EXT3_LAHF_LM, 2485 .features[FEAT_XSAVE] = 2486 CPUID_XSAVE_XSAVEOPT, 2487 .features[FEAT_6_EAX] = 2488 CPUID_6_EAX_ARAT, 2489 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2490 MSR_VMX_BASIC_TRUE_CTLS, 2491 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2492 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2493 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2494 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2495 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2496 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2497 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2498 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2499 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2500 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2501 .features[FEAT_VMX_EXIT_CTLS] = 2502 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2503 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2504 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2505 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2506 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2507 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2508 MSR_VMX_MISC_STORE_LMA, 2509 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2510 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2511 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2512 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2513 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2514 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2515 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2516 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2517 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2518 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2519 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2520 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2521 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2522 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2523 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2524 .features[FEAT_VMX_SECONDARY_CTLS] = 2525 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2526 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2527 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2528 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2529 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2530 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2531 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2532 VMX_SECONDARY_EXEC_RDRAND_EXITING, 2533 .xlevel = 0x80000008, 2534 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 2535 .versions = (X86CPUVersionDefinition[]) { 2536 { .version = 1 }, 2537 { 2538 .version = 2, 2539 .alias = "IvyBridge-IBRS", 2540 .props = (PropValue[]) { 2541 { "spec-ctrl", "on" }, 2542 { "model-id", 2543 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, 2544 { /* end of list */ } 2545 } 2546 }, 2547 { /* end of list */ } 2548 } 2549 }, 2550 { 2551 .name = "Haswell", 2552 .level = 0xd, 2553 .vendor = CPUID_VENDOR_INTEL, 2554 .family = 6, 2555 .model = 60, 2556 .stepping = 4, 2557 .features[FEAT_1_EDX] = 2558 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2559 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2560 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2561 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2562 CPUID_DE | CPUID_FP87, 2563 .features[FEAT_1_ECX] = 2564 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2565 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2566 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2567 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2568 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2569 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2570 .features[FEAT_8000_0001_EDX] = 2571 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2572 CPUID_EXT2_SYSCALL, 2573 .features[FEAT_8000_0001_ECX] = 2574 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2575 .features[FEAT_7_0_EBX] = 2576 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2577 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2578 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2579 CPUID_7_0_EBX_RTM, 2580 .features[FEAT_XSAVE] = 2581 CPUID_XSAVE_XSAVEOPT, 2582 .features[FEAT_6_EAX] = 2583 CPUID_6_EAX_ARAT, 2584 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2585 MSR_VMX_BASIC_TRUE_CTLS, 2586 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2587 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2588 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2589 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2590 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2591 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2592 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2593 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2594 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2595 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2596 .features[FEAT_VMX_EXIT_CTLS] = 2597 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2598 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2599 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2600 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2601 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2602 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2603 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2604 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2605 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2606 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2607 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2608 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2609 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2610 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2611 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2612 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2613 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2614 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2615 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2616 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2617 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2618 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2619 .features[FEAT_VMX_SECONDARY_CTLS] = 2620 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2621 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2622 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2623 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2624 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2625 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2626 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2627 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2628 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 2629 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2630 .xlevel = 0x80000008, 2631 .model_id = "Intel Core Processor (Haswell)", 2632 .versions = (X86CPUVersionDefinition[]) { 2633 { .version = 1 }, 2634 { 2635 .version = 2, 2636 .alias = "Haswell-noTSX", 2637 .props = (PropValue[]) { 2638 { "hle", "off" }, 2639 { "rtm", "off" }, 2640 { "stepping", "1" }, 2641 { "model-id", "Intel Core Processor (Haswell, no TSX)", }, 2642 { /* end of list */ } 2643 }, 2644 }, 2645 { 2646 .version = 3, 2647 .alias = "Haswell-IBRS", 2648 .props = (PropValue[]) { 2649 /* Restore TSX features removed by -v2 above */ 2650 { "hle", "on" }, 2651 { "rtm", "on" }, 2652 /* 2653 * Haswell and Haswell-IBRS had stepping=4 in 2654 * QEMU 4.0 and older 2655 */ 2656 { "stepping", "4" }, 2657 { "spec-ctrl", "on" }, 2658 { "model-id", 2659 "Intel Core Processor (Haswell, IBRS)" }, 2660 { /* end of list */ } 2661 } 2662 }, 2663 { 2664 .version = 4, 2665 .alias = "Haswell-noTSX-IBRS", 2666 .props = (PropValue[]) { 2667 { "hle", "off" }, 2668 { "rtm", "off" }, 2669 /* spec-ctrl was already enabled by -v3 above */ 2670 { "stepping", "1" }, 2671 { "model-id", 2672 "Intel Core Processor (Haswell, no TSX, IBRS)" }, 2673 { /* end of list */ } 2674 } 2675 }, 2676 { /* end of list */ } 2677 } 2678 }, 2679 { 2680 .name = "Broadwell", 2681 .level = 0xd, 2682 .vendor = CPUID_VENDOR_INTEL, 2683 .family = 6, 2684 .model = 61, 2685 .stepping = 2, 2686 .features[FEAT_1_EDX] = 2687 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2688 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2689 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2690 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2691 CPUID_DE | CPUID_FP87, 2692 .features[FEAT_1_ECX] = 2693 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2694 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2695 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2696 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2697 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2698 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2699 .features[FEAT_8000_0001_EDX] = 2700 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2701 CPUID_EXT2_SYSCALL, 2702 .features[FEAT_8000_0001_ECX] = 2703 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2704 .features[FEAT_7_0_EBX] = 2705 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2706 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2707 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2708 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2709 CPUID_7_0_EBX_SMAP, 2710 .features[FEAT_XSAVE] = 2711 CPUID_XSAVE_XSAVEOPT, 2712 .features[FEAT_6_EAX] = 2713 CPUID_6_EAX_ARAT, 2714 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2715 MSR_VMX_BASIC_TRUE_CTLS, 2716 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2717 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2718 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2719 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2720 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2721 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2722 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2723 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2724 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2725 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2726 .features[FEAT_VMX_EXIT_CTLS] = 2727 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2728 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2729 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2730 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2731 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2732 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2733 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2734 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2735 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2736 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2737 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2738 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2739 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2740 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2741 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2742 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2743 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2744 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2745 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2746 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2747 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2748 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2749 .features[FEAT_VMX_SECONDARY_CTLS] = 2750 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2751 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2752 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2753 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2754 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2755 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2756 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2757 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2758 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2759 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2760 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2761 .xlevel = 0x80000008, 2762 .model_id = "Intel Core Processor (Broadwell)", 2763 .versions = (X86CPUVersionDefinition[]) { 2764 { .version = 1 }, 2765 { 2766 .version = 2, 2767 .alias = "Broadwell-noTSX", 2768 .props = (PropValue[]) { 2769 { "hle", "off" }, 2770 { "rtm", "off" }, 2771 { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, 2772 { /* end of list */ } 2773 }, 2774 }, 2775 { 2776 .version = 3, 2777 .alias = "Broadwell-IBRS", 2778 .props = (PropValue[]) { 2779 /* Restore TSX features removed by -v2 above */ 2780 { "hle", "on" }, 2781 { "rtm", "on" }, 2782 { "spec-ctrl", "on" }, 2783 { "model-id", 2784 "Intel Core Processor (Broadwell, IBRS)" }, 2785 { /* end of list */ } 2786 } 2787 }, 2788 { 2789 .version = 4, 2790 .alias = "Broadwell-noTSX-IBRS", 2791 .props = (PropValue[]) { 2792 { "hle", "off" }, 2793 { "rtm", "off" }, 2794 /* spec-ctrl was already enabled by -v3 above */ 2795 { "model-id", 2796 "Intel Core Processor (Broadwell, no TSX, IBRS)" }, 2797 { /* end of list */ } 2798 } 2799 }, 2800 { /* end of list */ } 2801 } 2802 }, 2803 { 2804 .name = "Skylake-Client", 2805 .level = 0xd, 2806 .vendor = CPUID_VENDOR_INTEL, 2807 .family = 6, 2808 .model = 94, 2809 .stepping = 3, 2810 .features[FEAT_1_EDX] = 2811 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2812 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2813 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2814 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2815 CPUID_DE | CPUID_FP87, 2816 .features[FEAT_1_ECX] = 2817 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2818 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2819 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2820 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2821 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2822 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2823 .features[FEAT_8000_0001_EDX] = 2824 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2825 CPUID_EXT2_SYSCALL, 2826 .features[FEAT_8000_0001_ECX] = 2827 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2828 .features[FEAT_7_0_EBX] = 2829 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2830 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2831 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2832 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2833 CPUID_7_0_EBX_SMAP, 2834 /* Missing: XSAVES (not supported by some Linux versions, 2835 * including v4.1 to v4.12). 2836 * KVM doesn't yet expose any XSAVES state save component, 2837 * and the only one defined in Skylake (processor tracing) 2838 * probably will block migration anyway. 2839 */ 2840 .features[FEAT_XSAVE] = 2841 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2842 CPUID_XSAVE_XGETBV1, 2843 .features[FEAT_6_EAX] = 2844 CPUID_6_EAX_ARAT, 2845 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2846 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2847 MSR_VMX_BASIC_TRUE_CTLS, 2848 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2849 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2850 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2851 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2852 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2853 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2854 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2855 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2856 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2857 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2858 .features[FEAT_VMX_EXIT_CTLS] = 2859 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2860 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2861 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2862 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2863 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2864 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2865 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2866 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2867 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2868 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2869 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2870 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2871 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2872 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2873 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2874 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2875 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2876 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2877 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2878 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2879 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2880 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2881 .features[FEAT_VMX_SECONDARY_CTLS] = 2882 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2883 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2884 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2885 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2886 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2887 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2888 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2889 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2890 .xlevel = 0x80000008, 2891 .model_id = "Intel Core Processor (Skylake)", 2892 .versions = (X86CPUVersionDefinition[]) { 2893 { .version = 1 }, 2894 { 2895 .version = 2, 2896 .alias = "Skylake-Client-IBRS", 2897 .props = (PropValue[]) { 2898 { "spec-ctrl", "on" }, 2899 { "model-id", 2900 "Intel Core Processor (Skylake, IBRS)" }, 2901 { /* end of list */ } 2902 } 2903 }, 2904 { 2905 .version = 3, 2906 .alias = "Skylake-Client-noTSX-IBRS", 2907 .props = (PropValue[]) { 2908 { "hle", "off" }, 2909 { "rtm", "off" }, 2910 { "model-id", 2911 "Intel Core Processor (Skylake, IBRS, no TSX)" }, 2912 { /* end of list */ } 2913 } 2914 }, 2915 { /* end of list */ } 2916 } 2917 }, 2918 { 2919 .name = "Skylake-Server", 2920 .level = 0xd, 2921 .vendor = CPUID_VENDOR_INTEL, 2922 .family = 6, 2923 .model = 85, 2924 .stepping = 4, 2925 .features[FEAT_1_EDX] = 2926 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2927 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2928 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2929 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2930 CPUID_DE | CPUID_FP87, 2931 .features[FEAT_1_ECX] = 2932 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2933 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2934 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2935 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2936 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2937 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2938 .features[FEAT_8000_0001_EDX] = 2939 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2940 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2941 .features[FEAT_8000_0001_ECX] = 2942 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2943 .features[FEAT_7_0_EBX] = 2944 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2945 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2946 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2947 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2948 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2949 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2950 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2951 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2952 .features[FEAT_7_0_ECX] = 2953 CPUID_7_0_ECX_PKU, 2954 /* Missing: XSAVES (not supported by some Linux versions, 2955 * including v4.1 to v4.12). 2956 * KVM doesn't yet expose any XSAVES state save component, 2957 * and the only one defined in Skylake (processor tracing) 2958 * probably will block migration anyway. 2959 */ 2960 .features[FEAT_XSAVE] = 2961 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2962 CPUID_XSAVE_XGETBV1, 2963 .features[FEAT_6_EAX] = 2964 CPUID_6_EAX_ARAT, 2965 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2966 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2967 MSR_VMX_BASIC_TRUE_CTLS, 2968 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2969 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2970 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2971 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2972 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2973 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2974 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2975 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2976 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2977 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2978 .features[FEAT_VMX_EXIT_CTLS] = 2979 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2980 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2981 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2982 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2983 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2984 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2985 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2986 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2987 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2988 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2989 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2990 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2991 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2992 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2993 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2994 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2995 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2996 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2997 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2998 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2999 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3000 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3001 .features[FEAT_VMX_SECONDARY_CTLS] = 3002 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3003 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3004 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3005 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3006 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3007 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3008 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3009 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3010 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3011 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3012 .xlevel = 0x80000008, 3013 .model_id = "Intel Xeon Processor (Skylake)", 3014 .versions = (X86CPUVersionDefinition[]) { 3015 { .version = 1 }, 3016 { 3017 .version = 2, 3018 .alias = "Skylake-Server-IBRS", 3019 .props = (PropValue[]) { 3020 /* clflushopt was not added to Skylake-Server-IBRS */ 3021 /* TODO: add -v3 including clflushopt */ 3022 { "clflushopt", "off" }, 3023 { "spec-ctrl", "on" }, 3024 { "model-id", 3025 "Intel Xeon Processor (Skylake, IBRS)" }, 3026 { /* end of list */ } 3027 } 3028 }, 3029 { 3030 .version = 3, 3031 .alias = "Skylake-Server-noTSX-IBRS", 3032 .props = (PropValue[]) { 3033 { "hle", "off" }, 3034 { "rtm", "off" }, 3035 { "model-id", 3036 "Intel Xeon Processor (Skylake, IBRS, no TSX)" }, 3037 { /* end of list */ } 3038 } 3039 }, 3040 { 3041 .version = 4, 3042 .props = (PropValue[]) { 3043 { "vmx-eptp-switching", "on" }, 3044 { /* end of list */ } 3045 } 3046 }, 3047 { /* end of list */ } 3048 } 3049 }, 3050 { 3051 .name = "Cascadelake-Server", 3052 .level = 0xd, 3053 .vendor = CPUID_VENDOR_INTEL, 3054 .family = 6, 3055 .model = 85, 3056 .stepping = 6, 3057 .features[FEAT_1_EDX] = 3058 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3059 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3060 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3061 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3062 CPUID_DE | CPUID_FP87, 3063 .features[FEAT_1_ECX] = 3064 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3065 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3066 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3067 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3068 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3069 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3070 .features[FEAT_8000_0001_EDX] = 3071 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3072 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3073 .features[FEAT_8000_0001_ECX] = 3074 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3075 .features[FEAT_7_0_EBX] = 3076 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3077 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3078 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3079 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3080 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3081 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3082 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3083 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3084 .features[FEAT_7_0_ECX] = 3085 CPUID_7_0_ECX_PKU | 3086 CPUID_7_0_ECX_AVX512VNNI, 3087 .features[FEAT_7_0_EDX] = 3088 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3089 /* Missing: XSAVES (not supported by some Linux versions, 3090 * including v4.1 to v4.12). 3091 * KVM doesn't yet expose any XSAVES state save component, 3092 * and the only one defined in Skylake (processor tracing) 3093 * probably will block migration anyway. 3094 */ 3095 .features[FEAT_XSAVE] = 3096 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3097 CPUID_XSAVE_XGETBV1, 3098 .features[FEAT_6_EAX] = 3099 CPUID_6_EAX_ARAT, 3100 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3101 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3102 MSR_VMX_BASIC_TRUE_CTLS, 3103 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3104 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3105 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3106 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3107 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3108 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3109 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3110 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3111 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3112 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3113 .features[FEAT_VMX_EXIT_CTLS] = 3114 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3115 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3116 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3117 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3118 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3119 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3120 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3121 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3122 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3123 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3124 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3125 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3126 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3127 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3128 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3129 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3130 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3131 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3132 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3133 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3134 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3135 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3136 .features[FEAT_VMX_SECONDARY_CTLS] = 3137 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3138 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3139 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3140 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3141 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3142 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3143 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3144 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3145 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3146 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3147 .xlevel = 0x80000008, 3148 .model_id = "Intel Xeon Processor (Cascadelake)", 3149 .versions = (X86CPUVersionDefinition[]) { 3150 { .version = 1 }, 3151 { .version = 2, 3152 .note = "ARCH_CAPABILITIES", 3153 .props = (PropValue[]) { 3154 { "arch-capabilities", "on" }, 3155 { "rdctl-no", "on" }, 3156 { "ibrs-all", "on" }, 3157 { "skip-l1dfl-vmentry", "on" }, 3158 { "mds-no", "on" }, 3159 { /* end of list */ } 3160 }, 3161 }, 3162 { .version = 3, 3163 .alias = "Cascadelake-Server-noTSX", 3164 .note = "ARCH_CAPABILITIES, no TSX", 3165 .props = (PropValue[]) { 3166 { "hle", "off" }, 3167 { "rtm", "off" }, 3168 { /* end of list */ } 3169 }, 3170 }, 3171 { .version = 4, 3172 .note = "ARCH_CAPABILITIES, no TSX", 3173 .props = (PropValue[]) { 3174 { "vmx-eptp-switching", "on" }, 3175 { /* end of list */ } 3176 }, 3177 }, 3178 { /* end of list */ } 3179 } 3180 }, 3181 { 3182 .name = "Cooperlake", 3183 .level = 0xd, 3184 .vendor = CPUID_VENDOR_INTEL, 3185 .family = 6, 3186 .model = 85, 3187 .stepping = 10, 3188 .features[FEAT_1_EDX] = 3189 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3190 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3191 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3192 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3193 CPUID_DE | CPUID_FP87, 3194 .features[FEAT_1_ECX] = 3195 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3196 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3197 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3198 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3199 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3200 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3201 .features[FEAT_8000_0001_EDX] = 3202 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3203 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3204 .features[FEAT_8000_0001_ECX] = 3205 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3206 .features[FEAT_7_0_EBX] = 3207 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3208 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3209 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3210 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3211 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3212 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3213 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3214 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3215 .features[FEAT_7_0_ECX] = 3216 CPUID_7_0_ECX_PKU | 3217 CPUID_7_0_ECX_AVX512VNNI, 3218 .features[FEAT_7_0_EDX] = 3219 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP | 3220 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES, 3221 .features[FEAT_ARCH_CAPABILITIES] = 3222 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | 3223 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | 3224 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, 3225 .features[FEAT_7_1_EAX] = 3226 CPUID_7_1_EAX_AVX512_BF16, 3227 /* 3228 * Missing: XSAVES (not supported by some Linux versions, 3229 * including v4.1 to v4.12). 3230 * KVM doesn't yet expose any XSAVES state save component, 3231 * and the only one defined in Skylake (processor tracing) 3232 * probably will block migration anyway. 3233 */ 3234 .features[FEAT_XSAVE] = 3235 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3236 CPUID_XSAVE_XGETBV1, 3237 .features[FEAT_6_EAX] = 3238 CPUID_6_EAX_ARAT, 3239 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3240 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3241 MSR_VMX_BASIC_TRUE_CTLS, 3242 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3243 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3244 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3245 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3246 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3247 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3248 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3249 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3250 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3251 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3252 .features[FEAT_VMX_EXIT_CTLS] = 3253 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3254 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3255 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3256 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3257 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3258 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3259 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3260 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3261 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3262 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3263 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3264 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3265 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3266 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3267 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3268 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3269 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3270 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3271 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3272 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3273 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3274 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3275 .features[FEAT_VMX_SECONDARY_CTLS] = 3276 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3277 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3278 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3279 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3280 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3281 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3282 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3283 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3284 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3285 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3286 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3287 .xlevel = 0x80000008, 3288 .model_id = "Intel Xeon Processor (Cooperlake)", 3289 }, 3290 { 3291 .name = "Icelake-Client", 3292 .level = 0xd, 3293 .vendor = CPUID_VENDOR_INTEL, 3294 .family = 6, 3295 .model = 126, 3296 .stepping = 0, 3297 .features[FEAT_1_EDX] = 3298 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3299 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3300 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3301 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3302 CPUID_DE | CPUID_FP87, 3303 .features[FEAT_1_ECX] = 3304 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3305 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3306 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3307 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3308 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3309 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3310 .features[FEAT_8000_0001_EDX] = 3311 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 3312 CPUID_EXT2_SYSCALL, 3313 .features[FEAT_8000_0001_ECX] = 3314 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3315 .features[FEAT_8000_0008_EBX] = 3316 CPUID_8000_0008_EBX_WBNOINVD, 3317 .features[FEAT_7_0_EBX] = 3318 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3319 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3320 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3321 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3322 CPUID_7_0_EBX_SMAP, 3323 .features[FEAT_7_0_ECX] = 3324 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3325 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3326 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3327 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3328 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3329 .features[FEAT_7_0_EDX] = 3330 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3331 /* Missing: XSAVES (not supported by some Linux versions, 3332 * including v4.1 to v4.12). 3333 * KVM doesn't yet expose any XSAVES state save component, 3334 * and the only one defined in Skylake (processor tracing) 3335 * probably will block migration anyway. 3336 */ 3337 .features[FEAT_XSAVE] = 3338 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3339 CPUID_XSAVE_XGETBV1, 3340 .features[FEAT_6_EAX] = 3341 CPUID_6_EAX_ARAT, 3342 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3343 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3344 MSR_VMX_BASIC_TRUE_CTLS, 3345 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3346 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3347 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3348 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3349 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3350 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3351 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3352 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3353 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3354 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3355 .features[FEAT_VMX_EXIT_CTLS] = 3356 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3357 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3358 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3359 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3360 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3361 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3362 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3363 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3364 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3365 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 3366 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3367 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3368 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3369 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3370 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3371 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3372 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3373 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3374 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3375 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3376 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3377 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3378 .features[FEAT_VMX_SECONDARY_CTLS] = 3379 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3380 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3381 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3382 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3383 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3384 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3385 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3386 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3387 .xlevel = 0x80000008, 3388 .model_id = "Intel Core Processor (Icelake)", 3389 .versions = (X86CPUVersionDefinition[]) { 3390 { 3391 .version = 1, 3392 .note = "deprecated" 3393 }, 3394 { 3395 .version = 2, 3396 .note = "no TSX, deprecated", 3397 .alias = "Icelake-Client-noTSX", 3398 .props = (PropValue[]) { 3399 { "hle", "off" }, 3400 { "rtm", "off" }, 3401 { /* end of list */ } 3402 }, 3403 }, 3404 { /* end of list */ } 3405 }, 3406 .deprecation_note = "use Icelake-Server instead" 3407 }, 3408 { 3409 .name = "Icelake-Server", 3410 .level = 0xd, 3411 .vendor = CPUID_VENDOR_INTEL, 3412 .family = 6, 3413 .model = 134, 3414 .stepping = 0, 3415 .features[FEAT_1_EDX] = 3416 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3417 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3418 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3419 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3420 CPUID_DE | CPUID_FP87, 3421 .features[FEAT_1_ECX] = 3422 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3423 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3424 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3425 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3426 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3427 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3428 .features[FEAT_8000_0001_EDX] = 3429 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3430 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3431 .features[FEAT_8000_0001_ECX] = 3432 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3433 .features[FEAT_8000_0008_EBX] = 3434 CPUID_8000_0008_EBX_WBNOINVD, 3435 .features[FEAT_7_0_EBX] = 3436 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3437 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3438 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3439 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3440 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3441 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3442 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3443 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3444 .features[FEAT_7_0_ECX] = 3445 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3446 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3447 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3448 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3449 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 3450 .features[FEAT_7_0_EDX] = 3451 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3452 /* Missing: XSAVES (not supported by some Linux versions, 3453 * including v4.1 to v4.12). 3454 * KVM doesn't yet expose any XSAVES state save component, 3455 * and the only one defined in Skylake (processor tracing) 3456 * probably will block migration anyway. 3457 */ 3458 .features[FEAT_XSAVE] = 3459 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3460 CPUID_XSAVE_XGETBV1, 3461 .features[FEAT_6_EAX] = 3462 CPUID_6_EAX_ARAT, 3463 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3464 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3465 MSR_VMX_BASIC_TRUE_CTLS, 3466 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3467 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3468 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3469 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3470 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3471 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3472 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3473 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3474 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3475 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3476 .features[FEAT_VMX_EXIT_CTLS] = 3477 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3478 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3479 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3480 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3481 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3482 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3483 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3484 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3485 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3486 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3487 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3488 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3489 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3490 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3491 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3492 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3493 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3494 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3495 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3496 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3497 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3498 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3499 .features[FEAT_VMX_SECONDARY_CTLS] = 3500 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3501 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3502 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3503 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3504 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3505 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3506 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3507 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3508 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 3509 .xlevel = 0x80000008, 3510 .model_id = "Intel Xeon Processor (Icelake)", 3511 .versions = (X86CPUVersionDefinition[]) { 3512 { .version = 1 }, 3513 { 3514 .version = 2, 3515 .note = "no TSX", 3516 .alias = "Icelake-Server-noTSX", 3517 .props = (PropValue[]) { 3518 { "hle", "off" }, 3519 { "rtm", "off" }, 3520 { /* end of list */ } 3521 }, 3522 }, 3523 { 3524 .version = 3, 3525 .props = (PropValue[]) { 3526 { "arch-capabilities", "on" }, 3527 { "rdctl-no", "on" }, 3528 { "ibrs-all", "on" }, 3529 { "skip-l1dfl-vmentry", "on" }, 3530 { "mds-no", "on" }, 3531 { "pschange-mc-no", "on" }, 3532 { "taa-no", "on" }, 3533 { /* end of list */ } 3534 }, 3535 }, 3536 { 3537 .version = 4, 3538 .props = (PropValue[]) { 3539 { "sha-ni", "on" }, 3540 { "avx512ifma", "on" }, 3541 { "rdpid", "on" }, 3542 { "fsrm", "on" }, 3543 { "vmx-rdseed-exit", "on" }, 3544 { "vmx-pml", "on" }, 3545 { "vmx-eptp-switching", "on" }, 3546 { "model", "106" }, 3547 { /* end of list */ } 3548 }, 3549 }, 3550 { /* end of list */ } 3551 } 3552 }, 3553 { 3554 .name = "Denverton", 3555 .level = 21, 3556 .vendor = CPUID_VENDOR_INTEL, 3557 .family = 6, 3558 .model = 95, 3559 .stepping = 1, 3560 .features[FEAT_1_EDX] = 3561 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | 3562 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | 3563 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3564 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | 3565 CPUID_SSE | CPUID_SSE2, 3566 .features[FEAT_1_ECX] = 3567 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3568 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 | 3569 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3570 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | 3571 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND, 3572 .features[FEAT_8000_0001_EDX] = 3573 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | 3574 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, 3575 .features[FEAT_8000_0001_ECX] = 3576 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3577 .features[FEAT_7_0_EBX] = 3578 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS | 3579 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP | 3580 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI, 3581 .features[FEAT_7_0_EDX] = 3582 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | 3583 CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3584 /* 3585 * Missing: XSAVES (not supported by some Linux versions, 3586 * including v4.1 to v4.12). 3587 * KVM doesn't yet expose any XSAVES state save component, 3588 * and the only one defined in Skylake (processor tracing) 3589 * probably will block migration anyway. 3590 */ 3591 .features[FEAT_XSAVE] = 3592 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, 3593 .features[FEAT_6_EAX] = 3594 CPUID_6_EAX_ARAT, 3595 .features[FEAT_ARCH_CAPABILITIES] = 3596 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY, 3597 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3598 MSR_VMX_BASIC_TRUE_CTLS, 3599 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3600 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3601 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3602 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3603 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3604 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3605 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3606 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3607 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3608 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3609 .features[FEAT_VMX_EXIT_CTLS] = 3610 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3611 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3612 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3613 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3614 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3615 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3616 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3617 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3618 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3619 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3620 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3621 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3622 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3623 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3624 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3625 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3626 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3627 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3628 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3629 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3630 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3631 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3632 .features[FEAT_VMX_SECONDARY_CTLS] = 3633 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3634 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3635 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3636 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3637 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3638 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3639 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3640 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3641 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3642 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3643 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3644 .xlevel = 0x80000008, 3645 .model_id = "Intel Atom Processor (Denverton)", 3646 .versions = (X86CPUVersionDefinition[]) { 3647 { .version = 1 }, 3648 { 3649 .version = 2, 3650 .note = "no MPX, no MONITOR", 3651 .props = (PropValue[]) { 3652 { "monitor", "off" }, 3653 { "mpx", "off" }, 3654 { /* end of list */ }, 3655 }, 3656 }, 3657 { /* end of list */ }, 3658 }, 3659 }, 3660 { 3661 .name = "Snowridge", 3662 .level = 27, 3663 .vendor = CPUID_VENDOR_INTEL, 3664 .family = 6, 3665 .model = 134, 3666 .stepping = 1, 3667 .features[FEAT_1_EDX] = 3668 /* missing: CPUID_PN CPUID_IA64 */ 3669 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 3670 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | 3671 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | 3672 CPUID_CX8 | CPUID_APIC | CPUID_SEP | 3673 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3674 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | 3675 CPUID_MMX | 3676 CPUID_FXSR | CPUID_SSE | CPUID_SSE2, 3677 .features[FEAT_1_ECX] = 3678 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3679 CPUID_EXT_SSSE3 | 3680 CPUID_EXT_CX16 | 3681 CPUID_EXT_SSE41 | 3682 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3683 CPUID_EXT_POPCNT | 3684 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | 3685 CPUID_EXT_RDRAND, 3686 .features[FEAT_8000_0001_EDX] = 3687 CPUID_EXT2_SYSCALL | 3688 CPUID_EXT2_NX | 3689 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3690 CPUID_EXT2_LM, 3691 .features[FEAT_8000_0001_ECX] = 3692 CPUID_EXT3_LAHF_LM | 3693 CPUID_EXT3_3DNOWPREFETCH, 3694 .features[FEAT_7_0_EBX] = 3695 CPUID_7_0_EBX_FSGSBASE | 3696 CPUID_7_0_EBX_SMEP | 3697 CPUID_7_0_EBX_ERMS | 3698 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ 3699 CPUID_7_0_EBX_RDSEED | 3700 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3701 CPUID_7_0_EBX_CLWB | 3702 CPUID_7_0_EBX_SHA_NI, 3703 .features[FEAT_7_0_ECX] = 3704 CPUID_7_0_ECX_UMIP | 3705 /* missing bit 5 */ 3706 CPUID_7_0_ECX_GFNI | 3707 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | 3708 CPUID_7_0_ECX_MOVDIR64B, 3709 .features[FEAT_7_0_EDX] = 3710 CPUID_7_0_EDX_SPEC_CTRL | 3711 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | 3712 CPUID_7_0_EDX_CORE_CAPABILITY, 3713 .features[FEAT_CORE_CAPABILITY] = 3714 MSR_CORE_CAP_SPLIT_LOCK_DETECT, 3715 /* 3716 * Missing: XSAVES (not supported by some Linux versions, 3717 * including v4.1 to v4.12). 3718 * KVM doesn't yet expose any XSAVES state save component, 3719 * and the only one defined in Skylake (processor tracing) 3720 * probably will block migration anyway. 3721 */ 3722 .features[FEAT_XSAVE] = 3723 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3724 CPUID_XSAVE_XGETBV1, 3725 .features[FEAT_6_EAX] = 3726 CPUID_6_EAX_ARAT, 3727 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3728 MSR_VMX_BASIC_TRUE_CTLS, 3729 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3730 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3731 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3732 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3733 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3734 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3735 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3736 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3737 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3738 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3739 .features[FEAT_VMX_EXIT_CTLS] = 3740 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3741 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3742 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3743 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3744 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3745 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3746 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3747 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3748 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3749 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3750 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3751 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3752 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3753 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3754 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3755 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3756 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3757 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3758 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3759 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3760 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3761 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3762 .features[FEAT_VMX_SECONDARY_CTLS] = 3763 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3764 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3765 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3766 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3767 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3768 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3769 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3770 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3771 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3772 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3773 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3774 .xlevel = 0x80000008, 3775 .model_id = "Intel Atom Processor (SnowRidge)", 3776 .versions = (X86CPUVersionDefinition[]) { 3777 { .version = 1 }, 3778 { 3779 .version = 2, 3780 .props = (PropValue[]) { 3781 { "mpx", "off" }, 3782 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" }, 3783 { /* end of list */ }, 3784 }, 3785 }, 3786 { /* end of list */ }, 3787 }, 3788 }, 3789 { 3790 .name = "KnightsMill", 3791 .level = 0xd, 3792 .vendor = CPUID_VENDOR_INTEL, 3793 .family = 6, 3794 .model = 133, 3795 .stepping = 0, 3796 .features[FEAT_1_EDX] = 3797 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 3798 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 3799 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 3800 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 3801 CPUID_PSE | CPUID_DE | CPUID_FP87, 3802 .features[FEAT_1_ECX] = 3803 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3804 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3805 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3806 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3807 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3808 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3809 .features[FEAT_8000_0001_EDX] = 3810 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3811 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3812 .features[FEAT_8000_0001_ECX] = 3813 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3814 .features[FEAT_7_0_EBX] = 3815 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3816 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 3817 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 3818 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 3819 CPUID_7_0_EBX_AVX512ER, 3820 .features[FEAT_7_0_ECX] = 3821 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3822 .features[FEAT_7_0_EDX] = 3823 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 3824 .features[FEAT_XSAVE] = 3825 CPUID_XSAVE_XSAVEOPT, 3826 .features[FEAT_6_EAX] = 3827 CPUID_6_EAX_ARAT, 3828 .xlevel = 0x80000008, 3829 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 3830 }, 3831 { 3832 .name = "Opteron_G1", 3833 .level = 5, 3834 .vendor = CPUID_VENDOR_AMD, 3835 .family = 15, 3836 .model = 6, 3837 .stepping = 1, 3838 .features[FEAT_1_EDX] = 3839 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3840 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3841 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3842 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3843 CPUID_DE | CPUID_FP87, 3844 .features[FEAT_1_ECX] = 3845 CPUID_EXT_SSE3, 3846 .features[FEAT_8000_0001_EDX] = 3847 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3848 .xlevel = 0x80000008, 3849 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 3850 }, 3851 { 3852 .name = "Opteron_G2", 3853 .level = 5, 3854 .vendor = CPUID_VENDOR_AMD, 3855 .family = 15, 3856 .model = 6, 3857 .stepping = 1, 3858 .features[FEAT_1_EDX] = 3859 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3860 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3861 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3862 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3863 CPUID_DE | CPUID_FP87, 3864 .features[FEAT_1_ECX] = 3865 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 3866 .features[FEAT_8000_0001_EDX] = 3867 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3868 .features[FEAT_8000_0001_ECX] = 3869 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3870 .xlevel = 0x80000008, 3871 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 3872 }, 3873 { 3874 .name = "Opteron_G3", 3875 .level = 5, 3876 .vendor = CPUID_VENDOR_AMD, 3877 .family = 16, 3878 .model = 2, 3879 .stepping = 3, 3880 .features[FEAT_1_EDX] = 3881 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3882 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3883 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3884 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3885 CPUID_DE | CPUID_FP87, 3886 .features[FEAT_1_ECX] = 3887 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 3888 CPUID_EXT_SSE3, 3889 .features[FEAT_8000_0001_EDX] = 3890 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 3891 CPUID_EXT2_RDTSCP, 3892 .features[FEAT_8000_0001_ECX] = 3893 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 3894 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3895 .xlevel = 0x80000008, 3896 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 3897 }, 3898 { 3899 .name = "Opteron_G4", 3900 .level = 0xd, 3901 .vendor = CPUID_VENDOR_AMD, 3902 .family = 21, 3903 .model = 1, 3904 .stepping = 2, 3905 .features[FEAT_1_EDX] = 3906 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3907 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3908 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3909 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3910 CPUID_DE | CPUID_FP87, 3911 .features[FEAT_1_ECX] = 3912 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3913 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3914 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 3915 CPUID_EXT_SSE3, 3916 .features[FEAT_8000_0001_EDX] = 3917 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3918 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3919 .features[FEAT_8000_0001_ECX] = 3920 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3921 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3922 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3923 CPUID_EXT3_LAHF_LM, 3924 .features[FEAT_SVM] = 3925 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3926 /* no xsaveopt! */ 3927 .xlevel = 0x8000001A, 3928 .model_id = "AMD Opteron 62xx class CPU", 3929 }, 3930 { 3931 .name = "Opteron_G5", 3932 .level = 0xd, 3933 .vendor = CPUID_VENDOR_AMD, 3934 .family = 21, 3935 .model = 2, 3936 .stepping = 0, 3937 .features[FEAT_1_EDX] = 3938 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3939 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3940 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3941 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3942 CPUID_DE | CPUID_FP87, 3943 .features[FEAT_1_ECX] = 3944 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 3945 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 3946 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 3947 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3948 .features[FEAT_8000_0001_EDX] = 3949 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3950 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3951 .features[FEAT_8000_0001_ECX] = 3952 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3953 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3954 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3955 CPUID_EXT3_LAHF_LM, 3956 .features[FEAT_SVM] = 3957 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3958 /* no xsaveopt! */ 3959 .xlevel = 0x8000001A, 3960 .model_id = "AMD Opteron 63xx class CPU", 3961 }, 3962 { 3963 .name = "EPYC", 3964 .level = 0xd, 3965 .vendor = CPUID_VENDOR_AMD, 3966 .family = 23, 3967 .model = 1, 3968 .stepping = 2, 3969 .features[FEAT_1_EDX] = 3970 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 3971 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 3972 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 3973 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 3974 CPUID_VME | CPUID_FP87, 3975 .features[FEAT_1_ECX] = 3976 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 3977 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 3978 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3979 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 3980 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3981 .features[FEAT_8000_0001_EDX] = 3982 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 3983 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 3984 CPUID_EXT2_SYSCALL, 3985 .features[FEAT_8000_0001_ECX] = 3986 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 3987 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 3988 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 3989 CPUID_EXT3_TOPOEXT, 3990 .features[FEAT_7_0_EBX] = 3991 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3992 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 3993 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3994 CPUID_7_0_EBX_SHA_NI, 3995 .features[FEAT_XSAVE] = 3996 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3997 CPUID_XSAVE_XGETBV1, 3998 .features[FEAT_6_EAX] = 3999 CPUID_6_EAX_ARAT, 4000 .features[FEAT_SVM] = 4001 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4002 .xlevel = 0x8000001E, 4003 .model_id = "AMD EPYC Processor", 4004 .cache_info = &epyc_cache_info, 4005 .versions = (X86CPUVersionDefinition[]) { 4006 { .version = 1 }, 4007 { 4008 .version = 2, 4009 .alias = "EPYC-IBPB", 4010 .props = (PropValue[]) { 4011 { "ibpb", "on" }, 4012 { "model-id", 4013 "AMD EPYC Processor (with IBPB)" }, 4014 { /* end of list */ } 4015 } 4016 }, 4017 { 4018 .version = 3, 4019 .props = (PropValue[]) { 4020 { "ibpb", "on" }, 4021 { "perfctr-core", "on" }, 4022 { "clzero", "on" }, 4023 { "xsaveerptr", "on" }, 4024 { "xsaves", "on" }, 4025 { "model-id", 4026 "AMD EPYC Processor" }, 4027 { /* end of list */ } 4028 } 4029 }, 4030 { /* end of list */ } 4031 } 4032 }, 4033 { 4034 .name = "Dhyana", 4035 .level = 0xd, 4036 .vendor = CPUID_VENDOR_HYGON, 4037 .family = 24, 4038 .model = 0, 4039 .stepping = 1, 4040 .features[FEAT_1_EDX] = 4041 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4042 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4043 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4044 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4045 CPUID_VME | CPUID_FP87, 4046 .features[FEAT_1_ECX] = 4047 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4048 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 4049 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4050 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4051 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 4052 .features[FEAT_8000_0001_EDX] = 4053 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4054 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4055 CPUID_EXT2_SYSCALL, 4056 .features[FEAT_8000_0001_ECX] = 4057 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4058 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4059 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4060 CPUID_EXT3_TOPOEXT, 4061 .features[FEAT_8000_0008_EBX] = 4062 CPUID_8000_0008_EBX_IBPB, 4063 .features[FEAT_7_0_EBX] = 4064 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4065 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4066 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 4067 /* 4068 * Missing: XSAVES (not supported by some Linux versions, 4069 * including v4.1 to v4.12). 4070 * KVM doesn't yet expose any XSAVES state save component. 4071 */ 4072 .features[FEAT_XSAVE] = 4073 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4074 CPUID_XSAVE_XGETBV1, 4075 .features[FEAT_6_EAX] = 4076 CPUID_6_EAX_ARAT, 4077 .features[FEAT_SVM] = 4078 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4079 .xlevel = 0x8000001E, 4080 .model_id = "Hygon Dhyana Processor", 4081 .cache_info = &epyc_cache_info, 4082 }, 4083 { 4084 .name = "EPYC-Rome", 4085 .level = 0xd, 4086 .vendor = CPUID_VENDOR_AMD, 4087 .family = 23, 4088 .model = 49, 4089 .stepping = 0, 4090 .features[FEAT_1_EDX] = 4091 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4092 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4093 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4094 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4095 CPUID_VME | CPUID_FP87, 4096 .features[FEAT_1_ECX] = 4097 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4098 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 4099 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4100 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4101 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 4102 .features[FEAT_8000_0001_EDX] = 4103 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4104 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4105 CPUID_EXT2_SYSCALL, 4106 .features[FEAT_8000_0001_ECX] = 4107 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4108 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4109 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4110 CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, 4111 .features[FEAT_8000_0008_EBX] = 4112 CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | 4113 CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | 4114 CPUID_8000_0008_EBX_STIBP, 4115 .features[FEAT_7_0_EBX] = 4116 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4117 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4118 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 4119 CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB, 4120 .features[FEAT_7_0_ECX] = 4121 CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID, 4122 .features[FEAT_XSAVE] = 4123 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4124 CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, 4125 .features[FEAT_6_EAX] = 4126 CPUID_6_EAX_ARAT, 4127 .features[FEAT_SVM] = 4128 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4129 .xlevel = 0x8000001E, 4130 .model_id = "AMD EPYC-Rome Processor", 4131 .cache_info = &epyc_rome_cache_info, 4132 }, 4133 }; 4134 4135 /* KVM-specific features that are automatically added/removed 4136 * from all CPU models when KVM is enabled. 4137 */ 4138 static PropValue kvm_default_props[] = { 4139 { "kvmclock", "on" }, 4140 { "kvm-nopiodelay", "on" }, 4141 { "kvm-asyncpf", "on" }, 4142 { "kvm-steal-time", "on" }, 4143 { "kvm-pv-eoi", "on" }, 4144 { "kvmclock-stable-bit", "on" }, 4145 { "x2apic", "on" }, 4146 { "kvm-msi-ext-dest-id", "off" }, 4147 { "acpi", "off" }, 4148 { "monitor", "off" }, 4149 { "svm", "off" }, 4150 { NULL, NULL }, 4151 }; 4152 4153 /* TCG-specific defaults that override all CPU models when using TCG 4154 */ 4155 static PropValue tcg_default_props[] = { 4156 { "vme", "off" }, 4157 { NULL, NULL }, 4158 }; 4159 4160 4161 /* 4162 * We resolve CPU model aliases using -v1 when using "-machine 4163 * none", but this is just for compatibility while libvirt isn't 4164 * adapted to resolve CPU model versions before creating VMs. 4165 * See "Runnability guarantee of CPU models" at 4166 * docs/system/deprecated.rst. 4167 */ 4168 X86CPUVersion default_cpu_version = 1; 4169 4170 void x86_cpu_set_default_version(X86CPUVersion version) 4171 { 4172 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ 4173 assert(version != CPU_VERSION_AUTO); 4174 default_cpu_version = version; 4175 } 4176 4177 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) 4178 { 4179 int v = 0; 4180 const X86CPUVersionDefinition *vdef = 4181 x86_cpu_def_get_versions(model->cpudef); 4182 while (vdef->version) { 4183 v = vdef->version; 4184 vdef++; 4185 } 4186 return v; 4187 } 4188 4189 /* Return the actual version being used for a specific CPU model */ 4190 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) 4191 { 4192 X86CPUVersion v = model->version; 4193 if (v == CPU_VERSION_AUTO) { 4194 v = default_cpu_version; 4195 } 4196 if (v == CPU_VERSION_LATEST) { 4197 return x86_cpu_model_last_version(model); 4198 } 4199 return v; 4200 } 4201 4202 void x86_cpu_change_kvm_default(const char *prop, const char *value) 4203 { 4204 PropValue *pv; 4205 for (pv = kvm_default_props; pv->prop; pv++) { 4206 if (!strcmp(pv->prop, prop)) { 4207 pv->value = value; 4208 break; 4209 } 4210 } 4211 4212 /* It is valid to call this function only for properties that 4213 * are already present in the kvm_default_props table. 4214 */ 4215 assert(pv->prop); 4216 } 4217 4218 static bool lmce_supported(void) 4219 { 4220 uint64_t mce_cap = 0; 4221 4222 #ifdef CONFIG_KVM 4223 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 4224 return false; 4225 } 4226 #endif 4227 4228 return !!(mce_cap & MCG_LMCE_P); 4229 } 4230 4231 #define CPUID_MODEL_ID_SZ 48 4232 4233 /** 4234 * cpu_x86_fill_model_id: 4235 * Get CPUID model ID string from host CPU. 4236 * 4237 * @str should have at least CPUID_MODEL_ID_SZ bytes 4238 * 4239 * The function does NOT add a null terminator to the string 4240 * automatically. 4241 */ 4242 static int cpu_x86_fill_model_id(char *str) 4243 { 4244 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 4245 int i; 4246 4247 for (i = 0; i < 3; i++) { 4248 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 4249 memcpy(str + i * 16 + 0, &eax, 4); 4250 memcpy(str + i * 16 + 4, &ebx, 4); 4251 memcpy(str + i * 16 + 8, &ecx, 4); 4252 memcpy(str + i * 16 + 12, &edx, 4); 4253 } 4254 return 0; 4255 } 4256 4257 static Property max_x86_cpu_properties[] = { 4258 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 4259 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 4260 DEFINE_PROP_END_OF_LIST() 4261 }; 4262 4263 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 4264 { 4265 DeviceClass *dc = DEVICE_CLASS(oc); 4266 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4267 4268 xcc->ordering = 9; 4269 4270 xcc->model_description = 4271 "Enables all features supported by the accelerator in the current host"; 4272 4273 device_class_set_props(dc, max_x86_cpu_properties); 4274 } 4275 4276 static void max_x86_cpu_initfn(Object *obj) 4277 { 4278 X86CPU *cpu = X86_CPU(obj); 4279 CPUX86State *env = &cpu->env; 4280 KVMState *s = kvm_state; 4281 4282 /* We can't fill the features array here because we don't know yet if 4283 * "migratable" is true or false. 4284 */ 4285 cpu->max_features = true; 4286 4287 if (accel_uses_host_cpuid()) { 4288 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 4289 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 4290 int family, model, stepping; 4291 4292 host_vendor_fms(vendor, &family, &model, &stepping); 4293 cpu_x86_fill_model_id(model_id); 4294 4295 object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); 4296 object_property_set_int(OBJECT(cpu), "family", family, &error_abort); 4297 object_property_set_int(OBJECT(cpu), "model", model, &error_abort); 4298 object_property_set_int(OBJECT(cpu), "stepping", stepping, 4299 &error_abort); 4300 object_property_set_str(OBJECT(cpu), "model-id", model_id, 4301 &error_abort); 4302 4303 if (kvm_enabled()) { 4304 env->cpuid_min_level = 4305 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 4306 env->cpuid_min_xlevel = 4307 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 4308 env->cpuid_min_xlevel2 = 4309 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 4310 } else { 4311 env->cpuid_min_level = 4312 hvf_get_supported_cpuid(0x0, 0, R_EAX); 4313 env->cpuid_min_xlevel = 4314 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 4315 env->cpuid_min_xlevel2 = 4316 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 4317 } 4318 4319 if (lmce_supported()) { 4320 object_property_set_bool(OBJECT(cpu), "lmce", true, &error_abort); 4321 } 4322 } else { 4323 object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD, 4324 &error_abort); 4325 object_property_set_int(OBJECT(cpu), "family", 6, &error_abort); 4326 object_property_set_int(OBJECT(cpu), "model", 6, &error_abort); 4327 object_property_set_int(OBJECT(cpu), "stepping", 3, &error_abort); 4328 object_property_set_str(OBJECT(cpu), "model-id", 4329 "QEMU TCG CPU version " QEMU_HW_VERSION, 4330 &error_abort); 4331 } 4332 4333 object_property_set_bool(OBJECT(cpu), "pmu", true, &error_abort); 4334 } 4335 4336 static const TypeInfo max_x86_cpu_type_info = { 4337 .name = X86_CPU_TYPE_NAME("max"), 4338 .parent = TYPE_X86_CPU, 4339 .instance_init = max_x86_cpu_initfn, 4340 .class_init = max_x86_cpu_class_init, 4341 }; 4342 4343 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 4344 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 4345 { 4346 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4347 4348 xcc->host_cpuid_required = true; 4349 xcc->ordering = 8; 4350 4351 #if defined(CONFIG_KVM) 4352 xcc->model_description = 4353 "KVM processor with all supported host features "; 4354 #elif defined(CONFIG_HVF) 4355 xcc->model_description = 4356 "HVF processor with all supported host features "; 4357 #endif 4358 } 4359 4360 static const TypeInfo host_x86_cpu_type_info = { 4361 .name = X86_CPU_TYPE_NAME("host"), 4362 .parent = X86_CPU_TYPE_NAME("max"), 4363 .class_init = host_x86_cpu_class_init, 4364 }; 4365 4366 #endif 4367 4368 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 4369 { 4370 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 4371 4372 switch (f->type) { 4373 case CPUID_FEATURE_WORD: 4374 { 4375 const char *reg = get_register_name_32(f->cpuid.reg); 4376 assert(reg); 4377 return g_strdup_printf("CPUID.%02XH:%s", 4378 f->cpuid.eax, reg); 4379 } 4380 case MSR_FEATURE_WORD: 4381 return g_strdup_printf("MSR(%02XH)", 4382 f->msr.index); 4383 } 4384 4385 return NULL; 4386 } 4387 4388 static bool x86_cpu_have_filtered_features(X86CPU *cpu) 4389 { 4390 FeatureWord w; 4391 4392 for (w = 0; w < FEATURE_WORDS; w++) { 4393 if (cpu->filtered_features[w]) { 4394 return true; 4395 } 4396 } 4397 4398 return false; 4399 } 4400 4401 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, 4402 const char *verbose_prefix) 4403 { 4404 CPUX86State *env = &cpu->env; 4405 FeatureWordInfo *f = &feature_word_info[w]; 4406 int i; 4407 4408 if (!cpu->force_features) { 4409 env->features[w] &= ~mask; 4410 } 4411 cpu->filtered_features[w] |= mask; 4412 4413 if (!verbose_prefix) { 4414 return; 4415 } 4416 4417 for (i = 0; i < 64; ++i) { 4418 if ((1ULL << i) & mask) { 4419 g_autofree char *feat_word_str = feature_word_description(f, i); 4420 warn_report("%s: %s%s%s [bit %d]", 4421 verbose_prefix, 4422 feat_word_str, 4423 f->feat_names[i] ? "." : "", 4424 f->feat_names[i] ? f->feat_names[i] : "", i); 4425 } 4426 } 4427 } 4428 4429 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 4430 const char *name, void *opaque, 4431 Error **errp) 4432 { 4433 X86CPU *cpu = X86_CPU(obj); 4434 CPUX86State *env = &cpu->env; 4435 int64_t value; 4436 4437 value = (env->cpuid_version >> 8) & 0xf; 4438 if (value == 0xf) { 4439 value += (env->cpuid_version >> 20) & 0xff; 4440 } 4441 visit_type_int(v, name, &value, errp); 4442 } 4443 4444 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 4445 const char *name, void *opaque, 4446 Error **errp) 4447 { 4448 X86CPU *cpu = X86_CPU(obj); 4449 CPUX86State *env = &cpu->env; 4450 const int64_t min = 0; 4451 const int64_t max = 0xff + 0xf; 4452 int64_t value; 4453 4454 if (!visit_type_int(v, name, &value, errp)) { 4455 return; 4456 } 4457 if (value < min || value > max) { 4458 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4459 name ? name : "null", value, min, max); 4460 return; 4461 } 4462 4463 env->cpuid_version &= ~0xff00f00; 4464 if (value > 0x0f) { 4465 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 4466 } else { 4467 env->cpuid_version |= value << 8; 4468 } 4469 } 4470 4471 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 4472 const char *name, void *opaque, 4473 Error **errp) 4474 { 4475 X86CPU *cpu = X86_CPU(obj); 4476 CPUX86State *env = &cpu->env; 4477 int64_t value; 4478 4479 value = (env->cpuid_version >> 4) & 0xf; 4480 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 4481 visit_type_int(v, name, &value, errp); 4482 } 4483 4484 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 4485 const char *name, void *opaque, 4486 Error **errp) 4487 { 4488 X86CPU *cpu = X86_CPU(obj); 4489 CPUX86State *env = &cpu->env; 4490 const int64_t min = 0; 4491 const int64_t max = 0xff; 4492 int64_t value; 4493 4494 if (!visit_type_int(v, name, &value, errp)) { 4495 return; 4496 } 4497 if (value < min || value > max) { 4498 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4499 name ? name : "null", value, min, max); 4500 return; 4501 } 4502 4503 env->cpuid_version &= ~0xf00f0; 4504 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 4505 } 4506 4507 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 4508 const char *name, void *opaque, 4509 Error **errp) 4510 { 4511 X86CPU *cpu = X86_CPU(obj); 4512 CPUX86State *env = &cpu->env; 4513 int64_t value; 4514 4515 value = env->cpuid_version & 0xf; 4516 visit_type_int(v, name, &value, errp); 4517 } 4518 4519 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 4520 const char *name, void *opaque, 4521 Error **errp) 4522 { 4523 X86CPU *cpu = X86_CPU(obj); 4524 CPUX86State *env = &cpu->env; 4525 const int64_t min = 0; 4526 const int64_t max = 0xf; 4527 int64_t value; 4528 4529 if (!visit_type_int(v, name, &value, errp)) { 4530 return; 4531 } 4532 if (value < min || value > max) { 4533 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4534 name ? name : "null", value, min, max); 4535 return; 4536 } 4537 4538 env->cpuid_version &= ~0xf; 4539 env->cpuid_version |= value & 0xf; 4540 } 4541 4542 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 4543 { 4544 X86CPU *cpu = X86_CPU(obj); 4545 CPUX86State *env = &cpu->env; 4546 char *value; 4547 4548 value = g_malloc(CPUID_VENDOR_SZ + 1); 4549 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 4550 env->cpuid_vendor3); 4551 return value; 4552 } 4553 4554 static void x86_cpuid_set_vendor(Object *obj, const char *value, 4555 Error **errp) 4556 { 4557 X86CPU *cpu = X86_CPU(obj); 4558 CPUX86State *env = &cpu->env; 4559 int i; 4560 4561 if (strlen(value) != CPUID_VENDOR_SZ) { 4562 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 4563 return; 4564 } 4565 4566 env->cpuid_vendor1 = 0; 4567 env->cpuid_vendor2 = 0; 4568 env->cpuid_vendor3 = 0; 4569 for (i = 0; i < 4; i++) { 4570 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 4571 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 4572 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 4573 } 4574 } 4575 4576 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 4577 { 4578 X86CPU *cpu = X86_CPU(obj); 4579 CPUX86State *env = &cpu->env; 4580 char *value; 4581 int i; 4582 4583 value = g_malloc(48 + 1); 4584 for (i = 0; i < 48; i++) { 4585 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 4586 } 4587 value[48] = '\0'; 4588 return value; 4589 } 4590 4591 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 4592 Error **errp) 4593 { 4594 X86CPU *cpu = X86_CPU(obj); 4595 CPUX86State *env = &cpu->env; 4596 int c, len, i; 4597 4598 if (model_id == NULL) { 4599 model_id = ""; 4600 } 4601 len = strlen(model_id); 4602 memset(env->cpuid_model, 0, 48); 4603 for (i = 0; i < 48; i++) { 4604 if (i >= len) { 4605 c = '\0'; 4606 } else { 4607 c = (uint8_t)model_id[i]; 4608 } 4609 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 4610 } 4611 } 4612 4613 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 4614 void *opaque, Error **errp) 4615 { 4616 X86CPU *cpu = X86_CPU(obj); 4617 int64_t value; 4618 4619 value = cpu->env.tsc_khz * 1000; 4620 visit_type_int(v, name, &value, errp); 4621 } 4622 4623 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 4624 void *opaque, Error **errp) 4625 { 4626 X86CPU *cpu = X86_CPU(obj); 4627 const int64_t min = 0; 4628 const int64_t max = INT64_MAX; 4629 int64_t value; 4630 4631 if (!visit_type_int(v, name, &value, errp)) { 4632 return; 4633 } 4634 if (value < min || value > max) { 4635 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4636 name ? name : "null", value, min, max); 4637 return; 4638 } 4639 4640 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 4641 } 4642 4643 /* Generic getter for "feature-words" and "filtered-features" properties */ 4644 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 4645 const char *name, void *opaque, 4646 Error **errp) 4647 { 4648 uint64_t *array = (uint64_t *)opaque; 4649 FeatureWord w; 4650 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 4651 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 4652 X86CPUFeatureWordInfoList *list = NULL; 4653 4654 for (w = 0; w < FEATURE_WORDS; w++) { 4655 FeatureWordInfo *wi = &feature_word_info[w]; 4656 /* 4657 * We didn't have MSR features when "feature-words" was 4658 * introduced. Therefore skipped other type entries. 4659 */ 4660 if (wi->type != CPUID_FEATURE_WORD) { 4661 continue; 4662 } 4663 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 4664 qwi->cpuid_input_eax = wi->cpuid.eax; 4665 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 4666 qwi->cpuid_input_ecx = wi->cpuid.ecx; 4667 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 4668 qwi->features = array[w]; 4669 4670 /* List will be in reverse order, but order shouldn't matter */ 4671 list_entries[w].next = list; 4672 list_entries[w].value = &word_infos[w]; 4673 list = &list_entries[w]; 4674 } 4675 4676 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 4677 } 4678 4679 /* Convert all '_' in a feature string option name to '-', to make feature 4680 * name conform to QOM property naming rule, which uses '-' instead of '_'. 4681 */ 4682 static inline void feat2prop(char *s) 4683 { 4684 while ((s = strchr(s, '_'))) { 4685 *s = '-'; 4686 } 4687 } 4688 4689 /* Return the feature property name for a feature flag bit */ 4690 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 4691 { 4692 const char *name; 4693 /* XSAVE components are automatically enabled by other features, 4694 * so return the original feature name instead 4695 */ 4696 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 4697 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 4698 4699 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 4700 x86_ext_save_areas[comp].bits) { 4701 w = x86_ext_save_areas[comp].feature; 4702 bitnr = ctz32(x86_ext_save_areas[comp].bits); 4703 } 4704 } 4705 4706 assert(bitnr < 64); 4707 assert(w < FEATURE_WORDS); 4708 name = feature_word_info[w].feat_names[bitnr]; 4709 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD)); 4710 return name; 4711 } 4712 4713 /* Compatibily hack to maintain legacy +-feat semantic, 4714 * where +-feat overwrites any feature set by 4715 * feat=on|feat even if the later is parsed after +-feat 4716 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 4717 */ 4718 static GList *plus_features, *minus_features; 4719 4720 static gint compare_string(gconstpointer a, gconstpointer b) 4721 { 4722 return g_strcmp0(a, b); 4723 } 4724 4725 /* Parse "+feature,-feature,feature=foo" CPU feature string 4726 */ 4727 static void x86_cpu_parse_featurestr(const char *typename, char *features, 4728 Error **errp) 4729 { 4730 char *featurestr; /* Single 'key=value" string being parsed */ 4731 static bool cpu_globals_initialized; 4732 bool ambiguous = false; 4733 4734 if (cpu_globals_initialized) { 4735 return; 4736 } 4737 cpu_globals_initialized = true; 4738 4739 if (!features) { 4740 return; 4741 } 4742 4743 for (featurestr = strtok(features, ","); 4744 featurestr; 4745 featurestr = strtok(NULL, ",")) { 4746 const char *name; 4747 const char *val = NULL; 4748 char *eq = NULL; 4749 char num[32]; 4750 GlobalProperty *prop; 4751 4752 /* Compatibility syntax: */ 4753 if (featurestr[0] == '+') { 4754 plus_features = g_list_append(plus_features, 4755 g_strdup(featurestr + 1)); 4756 continue; 4757 } else if (featurestr[0] == '-') { 4758 minus_features = g_list_append(minus_features, 4759 g_strdup(featurestr + 1)); 4760 continue; 4761 } 4762 4763 eq = strchr(featurestr, '='); 4764 if (eq) { 4765 *eq++ = 0; 4766 val = eq; 4767 } else { 4768 val = "on"; 4769 } 4770 4771 feat2prop(featurestr); 4772 name = featurestr; 4773 4774 if (g_list_find_custom(plus_features, name, compare_string)) { 4775 warn_report("Ambiguous CPU model string. " 4776 "Don't mix both \"+%s\" and \"%s=%s\"", 4777 name, name, val); 4778 ambiguous = true; 4779 } 4780 if (g_list_find_custom(minus_features, name, compare_string)) { 4781 warn_report("Ambiguous CPU model string. " 4782 "Don't mix both \"-%s\" and \"%s=%s\"", 4783 name, name, val); 4784 ambiguous = true; 4785 } 4786 4787 /* Special case: */ 4788 if (!strcmp(name, "tsc-freq")) { 4789 int ret; 4790 uint64_t tsc_freq; 4791 4792 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 4793 if (ret < 0 || tsc_freq > INT64_MAX) { 4794 error_setg(errp, "bad numerical value %s", val); 4795 return; 4796 } 4797 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 4798 val = num; 4799 name = "tsc-frequency"; 4800 } 4801 4802 prop = g_new0(typeof(*prop), 1); 4803 prop->driver = typename; 4804 prop->property = g_strdup(name); 4805 prop->value = g_strdup(val); 4806 qdev_prop_register_global(prop); 4807 } 4808 4809 if (ambiguous) { 4810 warn_report("Compatibility of ambiguous CPU model " 4811 "strings won't be kept on future QEMU versions"); 4812 } 4813 } 4814 4815 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 4816 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); 4817 4818 /* Build a list with the name of all features on a feature word array */ 4819 static void x86_cpu_list_feature_names(FeatureWordArray features, 4820 strList **feat_names) 4821 { 4822 FeatureWord w; 4823 strList **next = feat_names; 4824 4825 for (w = 0; w < FEATURE_WORDS; w++) { 4826 uint64_t filtered = features[w]; 4827 int i; 4828 for (i = 0; i < 64; i++) { 4829 if (filtered & (1ULL << i)) { 4830 strList *new = g_new0(strList, 1); 4831 new->value = g_strdup(x86_cpu_feature_name(w, i)); 4832 *next = new; 4833 next = &new->next; 4834 } 4835 } 4836 } 4837 } 4838 4839 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 4840 const char *name, void *opaque, 4841 Error **errp) 4842 { 4843 X86CPU *xc = X86_CPU(obj); 4844 strList *result = NULL; 4845 4846 x86_cpu_list_feature_names(xc->filtered_features, &result); 4847 visit_type_strList(v, "unavailable-features", &result, errp); 4848 } 4849 4850 /* Check for missing features that may prevent the CPU class from 4851 * running using the current machine and accelerator. 4852 */ 4853 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 4854 strList **missing_feats) 4855 { 4856 X86CPU *xc; 4857 Error *err = NULL; 4858 strList **next = missing_feats; 4859 4860 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 4861 strList *new = g_new0(strList, 1); 4862 new->value = g_strdup("kvm"); 4863 *missing_feats = new; 4864 return; 4865 } 4866 4867 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 4868 4869 x86_cpu_expand_features(xc, &err); 4870 if (err) { 4871 /* Errors at x86_cpu_expand_features should never happen, 4872 * but in case it does, just report the model as not 4873 * runnable at all using the "type" property. 4874 */ 4875 strList *new = g_new0(strList, 1); 4876 new->value = g_strdup("type"); 4877 *next = new; 4878 next = &new->next; 4879 error_free(err); 4880 } 4881 4882 x86_cpu_filter_features(xc, false); 4883 4884 x86_cpu_list_feature_names(xc->filtered_features, next); 4885 4886 object_unref(OBJECT(xc)); 4887 } 4888 4889 /* Print all cpuid feature names in featureset 4890 */ 4891 static void listflags(GList *features) 4892 { 4893 size_t len = 0; 4894 GList *tmp; 4895 4896 for (tmp = features; tmp; tmp = tmp->next) { 4897 const char *name = tmp->data; 4898 if ((len + strlen(name) + 1) >= 75) { 4899 qemu_printf("\n"); 4900 len = 0; 4901 } 4902 qemu_printf("%s%s", len == 0 ? " " : " ", name); 4903 len += strlen(name) + 1; 4904 } 4905 qemu_printf("\n"); 4906 } 4907 4908 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 4909 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 4910 { 4911 ObjectClass *class_a = (ObjectClass *)a; 4912 ObjectClass *class_b = (ObjectClass *)b; 4913 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 4914 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 4915 int ret; 4916 4917 if (cc_a->ordering != cc_b->ordering) { 4918 ret = cc_a->ordering - cc_b->ordering; 4919 } else { 4920 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a); 4921 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b); 4922 ret = strcmp(name_a, name_b); 4923 } 4924 return ret; 4925 } 4926 4927 static GSList *get_sorted_cpu_model_list(void) 4928 { 4929 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 4930 list = g_slist_sort(list, x86_cpu_list_compare); 4931 return list; 4932 } 4933 4934 static char *x86_cpu_class_get_model_id(X86CPUClass *xc) 4935 { 4936 Object *obj = object_new_with_class(OBJECT_CLASS(xc)); 4937 char *r = object_property_get_str(obj, "model-id", &error_abort); 4938 object_unref(obj); 4939 return r; 4940 } 4941 4942 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) 4943 { 4944 X86CPUVersion version; 4945 4946 if (!cc->model || !cc->model->is_alias) { 4947 return NULL; 4948 } 4949 version = x86_cpu_model_resolve_version(cc->model); 4950 if (version <= 0) { 4951 return NULL; 4952 } 4953 return x86_cpu_versioned_model_name(cc->model->cpudef, version); 4954 } 4955 4956 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 4957 { 4958 ObjectClass *oc = data; 4959 X86CPUClass *cc = X86_CPU_CLASS(oc); 4960 g_autofree char *name = x86_cpu_class_get_model_name(cc); 4961 g_autofree char *desc = g_strdup(cc->model_description); 4962 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc); 4963 g_autofree char *model_id = x86_cpu_class_get_model_id(cc); 4964 4965 if (!desc && alias_of) { 4966 if (cc->model && cc->model->version == CPU_VERSION_AUTO) { 4967 desc = g_strdup("(alias configured by machine type)"); 4968 } else { 4969 desc = g_strdup_printf("(alias of %s)", alias_of); 4970 } 4971 } 4972 if (!desc && cc->model && cc->model->note) { 4973 desc = g_strdup_printf("%s [%s]", model_id, cc->model->note); 4974 } 4975 if (!desc) { 4976 desc = g_strdup_printf("%s", model_id); 4977 } 4978 4979 qemu_printf("x86 %-20s %-58s\n", name, desc); 4980 } 4981 4982 /* list available CPU models and flags */ 4983 void x86_cpu_list(void) 4984 { 4985 int i, j; 4986 GSList *list; 4987 GList *names = NULL; 4988 4989 qemu_printf("Available CPUs:\n"); 4990 list = get_sorted_cpu_model_list(); 4991 g_slist_foreach(list, x86_cpu_list_entry, NULL); 4992 g_slist_free(list); 4993 4994 names = NULL; 4995 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 4996 FeatureWordInfo *fw = &feature_word_info[i]; 4997 for (j = 0; j < 64; j++) { 4998 if (fw->feat_names[j]) { 4999 names = g_list_append(names, (gpointer)fw->feat_names[j]); 5000 } 5001 } 5002 } 5003 5004 names = g_list_sort(names, (GCompareFunc)strcmp); 5005 5006 qemu_printf("\nRecognized CPUID flags:\n"); 5007 listflags(names); 5008 qemu_printf("\n"); 5009 g_list_free(names); 5010 } 5011 5012 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 5013 { 5014 ObjectClass *oc = data; 5015 X86CPUClass *cc = X86_CPU_CLASS(oc); 5016 CpuDefinitionInfoList **cpu_list = user_data; 5017 CpuDefinitionInfoList *entry; 5018 CpuDefinitionInfo *info; 5019 5020 info = g_malloc0(sizeof(*info)); 5021 info->name = x86_cpu_class_get_model_name(cc); 5022 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 5023 info->has_unavailable_features = true; 5024 info->q_typename = g_strdup(object_class_get_name(oc)); 5025 info->migration_safe = cc->migration_safe; 5026 info->has_migration_safe = true; 5027 info->q_static = cc->static_model; 5028 if (cc->model && cc->model->cpudef->deprecation_note) { 5029 info->deprecated = true; 5030 } else { 5031 info->deprecated = false; 5032 } 5033 /* 5034 * Old machine types won't report aliases, so that alias translation 5035 * doesn't break compatibility with previous QEMU versions. 5036 */ 5037 if (default_cpu_version != CPU_VERSION_LEGACY) { 5038 info->alias_of = x86_cpu_class_get_alias_of(cc); 5039 info->has_alias_of = !!info->alias_of; 5040 } 5041 5042 entry = g_malloc0(sizeof(*entry)); 5043 entry->value = info; 5044 entry->next = *cpu_list; 5045 *cpu_list = entry; 5046 } 5047 5048 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 5049 { 5050 CpuDefinitionInfoList *cpu_list = NULL; 5051 GSList *list = get_sorted_cpu_model_list(); 5052 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 5053 g_slist_free(list); 5054 return cpu_list; 5055 } 5056 5057 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 5058 bool migratable_only) 5059 { 5060 FeatureWordInfo *wi = &feature_word_info[w]; 5061 uint64_t r = 0; 5062 5063 if (kvm_enabled()) { 5064 switch (wi->type) { 5065 case CPUID_FEATURE_WORD: 5066 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 5067 wi->cpuid.ecx, 5068 wi->cpuid.reg); 5069 break; 5070 case MSR_FEATURE_WORD: 5071 r = kvm_arch_get_supported_msr_feature(kvm_state, 5072 wi->msr.index); 5073 break; 5074 } 5075 } else if (hvf_enabled()) { 5076 if (wi->type != CPUID_FEATURE_WORD) { 5077 return 0; 5078 } 5079 r = hvf_get_supported_cpuid(wi->cpuid.eax, 5080 wi->cpuid.ecx, 5081 wi->cpuid.reg); 5082 } else if (tcg_enabled()) { 5083 r = wi->tcg_features; 5084 } else { 5085 return ~0; 5086 } 5087 if (migratable_only) { 5088 r &= x86_cpu_get_migratable_flags(w); 5089 } 5090 return r; 5091 } 5092 5093 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 5094 { 5095 PropValue *pv; 5096 for (pv = props; pv->prop; pv++) { 5097 if (!pv->value) { 5098 continue; 5099 } 5100 object_property_parse(OBJECT(cpu), pv->prop, pv->value, 5101 &error_abort); 5102 } 5103 } 5104 5105 /* Apply properties for the CPU model version specified in model */ 5106 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) 5107 { 5108 const X86CPUVersionDefinition *vdef; 5109 X86CPUVersion version = x86_cpu_model_resolve_version(model); 5110 5111 if (version == CPU_VERSION_LEGACY) { 5112 return; 5113 } 5114 5115 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { 5116 PropValue *p; 5117 5118 for (p = vdef->props; p && p->prop; p++) { 5119 object_property_parse(OBJECT(cpu), p->prop, p->value, 5120 &error_abort); 5121 } 5122 5123 if (vdef->version == version) { 5124 break; 5125 } 5126 } 5127 5128 /* 5129 * If we reached the end of the list, version number was invalid 5130 */ 5131 assert(vdef->version == version); 5132 } 5133 5134 /* Load data from X86CPUDefinition into a X86CPU object 5135 */ 5136 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) 5137 { 5138 X86CPUDefinition *def = model->cpudef; 5139 CPUX86State *env = &cpu->env; 5140 const char *vendor; 5141 char host_vendor[CPUID_VENDOR_SZ + 1]; 5142 FeatureWord w; 5143 5144 /*NOTE: any property set by this function should be returned by 5145 * x86_cpu_static_props(), so static expansion of 5146 * query-cpu-model-expansion is always complete. 5147 */ 5148 5149 /* CPU models only set _minimum_ values for level/xlevel: */ 5150 object_property_set_uint(OBJECT(cpu), "min-level", def->level, 5151 &error_abort); 5152 object_property_set_uint(OBJECT(cpu), "min-xlevel", def->xlevel, 5153 &error_abort); 5154 5155 object_property_set_int(OBJECT(cpu), "family", def->family, &error_abort); 5156 object_property_set_int(OBJECT(cpu), "model", def->model, &error_abort); 5157 object_property_set_int(OBJECT(cpu), "stepping", def->stepping, 5158 &error_abort); 5159 object_property_set_str(OBJECT(cpu), "model-id", def->model_id, 5160 &error_abort); 5161 for (w = 0; w < FEATURE_WORDS; w++) { 5162 env->features[w] = def->features[w]; 5163 } 5164 5165 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 5166 cpu->legacy_cache = !def->cache_info; 5167 5168 /* Special cases not set in the X86CPUDefinition structs: */ 5169 /* TODO: in-kernel irqchip for hvf */ 5170 if (kvm_enabled()) { 5171 if (!kvm_irqchip_in_kernel()) { 5172 x86_cpu_change_kvm_default("x2apic", "off"); 5173 } else if (kvm_irqchip_is_split() && kvm_enable_x2apic()) { 5174 x86_cpu_change_kvm_default("kvm-msi-ext-dest-id", "on"); 5175 } 5176 5177 x86_cpu_apply_props(cpu, kvm_default_props); 5178 } else if (tcg_enabled()) { 5179 x86_cpu_apply_props(cpu, tcg_default_props); 5180 } 5181 5182 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 5183 5184 /* sysenter isn't supported in compatibility mode on AMD, 5185 * syscall isn't supported in compatibility mode on Intel. 5186 * Normally we advertise the actual CPU vendor, but you can 5187 * override this using the 'vendor' property if you want to use 5188 * KVM's sysenter/syscall emulation in compatibility mode and 5189 * when doing cross vendor migration 5190 */ 5191 vendor = def->vendor; 5192 if (accel_uses_host_cpuid()) { 5193 uint32_t ebx = 0, ecx = 0, edx = 0; 5194 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 5195 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 5196 vendor = host_vendor; 5197 } 5198 5199 object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); 5200 5201 x86_cpu_apply_version_props(cpu, model); 5202 5203 /* 5204 * Properties in versioned CPU model are not user specified features. 5205 * We can simply clear env->user_features here since it will be filled later 5206 * in x86_cpu_expand_features() based on plus_features and minus_features. 5207 */ 5208 memset(&env->user_features, 0, sizeof(env->user_features)); 5209 } 5210 5211 #ifndef CONFIG_USER_ONLY 5212 /* Return a QDict containing keys for all properties that can be included 5213 * in static expansion of CPU models. All properties set by x86_cpu_load_model() 5214 * must be included in the dictionary. 5215 */ 5216 static QDict *x86_cpu_static_props(void) 5217 { 5218 FeatureWord w; 5219 int i; 5220 static const char *props[] = { 5221 "min-level", 5222 "min-xlevel", 5223 "family", 5224 "model", 5225 "stepping", 5226 "model-id", 5227 "vendor", 5228 "lmce", 5229 NULL, 5230 }; 5231 static QDict *d; 5232 5233 if (d) { 5234 return d; 5235 } 5236 5237 d = qdict_new(); 5238 for (i = 0; props[i]; i++) { 5239 qdict_put_null(d, props[i]); 5240 } 5241 5242 for (w = 0; w < FEATURE_WORDS; w++) { 5243 FeatureWordInfo *fi = &feature_word_info[w]; 5244 int bit; 5245 for (bit = 0; bit < 64; bit++) { 5246 if (!fi->feat_names[bit]) { 5247 continue; 5248 } 5249 qdict_put_null(d, fi->feat_names[bit]); 5250 } 5251 } 5252 5253 return d; 5254 } 5255 5256 /* Add an entry to @props dict, with the value for property. */ 5257 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 5258 { 5259 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 5260 &error_abort); 5261 5262 qdict_put_obj(props, prop, value); 5263 } 5264 5265 /* Convert CPU model data from X86CPU object to a property dictionary 5266 * that can recreate exactly the same CPU model. 5267 */ 5268 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 5269 { 5270 QDict *sprops = x86_cpu_static_props(); 5271 const QDictEntry *e; 5272 5273 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 5274 const char *prop = qdict_entry_key(e); 5275 x86_cpu_expand_prop(cpu, props, prop); 5276 } 5277 } 5278 5279 /* Convert CPU model data from X86CPU object to a property dictionary 5280 * that can recreate exactly the same CPU model, including every 5281 * writeable QOM property. 5282 */ 5283 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 5284 { 5285 ObjectPropertyIterator iter; 5286 ObjectProperty *prop; 5287 5288 object_property_iter_init(&iter, OBJECT(cpu)); 5289 while ((prop = object_property_iter_next(&iter))) { 5290 /* skip read-only or write-only properties */ 5291 if (!prop->get || !prop->set) { 5292 continue; 5293 } 5294 5295 /* "hotplugged" is the only property that is configurable 5296 * on the command-line but will be set differently on CPUs 5297 * created using "-cpu ... -smp ..." and by CPUs created 5298 * on the fly by x86_cpu_from_model() for querying. Skip it. 5299 */ 5300 if (!strcmp(prop->name, "hotplugged")) { 5301 continue; 5302 } 5303 x86_cpu_expand_prop(cpu, props, prop->name); 5304 } 5305 } 5306 5307 static void object_apply_props(Object *obj, QDict *props, Error **errp) 5308 { 5309 const QDictEntry *prop; 5310 5311 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 5312 if (!object_property_set_qobject(obj, qdict_entry_key(prop), 5313 qdict_entry_value(prop), errp)) { 5314 break; 5315 } 5316 } 5317 } 5318 5319 /* Create X86CPU object according to model+props specification */ 5320 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 5321 { 5322 X86CPU *xc = NULL; 5323 X86CPUClass *xcc; 5324 Error *err = NULL; 5325 5326 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 5327 if (xcc == NULL) { 5328 error_setg(&err, "CPU model '%s' not found", model); 5329 goto out; 5330 } 5331 5332 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 5333 if (props) { 5334 object_apply_props(OBJECT(xc), props, &err); 5335 if (err) { 5336 goto out; 5337 } 5338 } 5339 5340 x86_cpu_expand_features(xc, &err); 5341 if (err) { 5342 goto out; 5343 } 5344 5345 out: 5346 if (err) { 5347 error_propagate(errp, err); 5348 object_unref(OBJECT(xc)); 5349 xc = NULL; 5350 } 5351 return xc; 5352 } 5353 5354 CpuModelExpansionInfo * 5355 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 5356 CpuModelInfo *model, 5357 Error **errp) 5358 { 5359 X86CPU *xc = NULL; 5360 Error *err = NULL; 5361 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 5362 QDict *props = NULL; 5363 const char *base_name; 5364 5365 xc = x86_cpu_from_model(model->name, 5366 model->has_props ? 5367 qobject_to(QDict, model->props) : 5368 NULL, &err); 5369 if (err) { 5370 goto out; 5371 } 5372 5373 props = qdict_new(); 5374 ret->model = g_new0(CpuModelInfo, 1); 5375 ret->model->props = QOBJECT(props); 5376 ret->model->has_props = true; 5377 5378 switch (type) { 5379 case CPU_MODEL_EXPANSION_TYPE_STATIC: 5380 /* Static expansion will be based on "base" only */ 5381 base_name = "base"; 5382 x86_cpu_to_dict(xc, props); 5383 break; 5384 case CPU_MODEL_EXPANSION_TYPE_FULL: 5385 /* As we don't return every single property, full expansion needs 5386 * to keep the original model name+props, and add extra 5387 * properties on top of that. 5388 */ 5389 base_name = model->name; 5390 x86_cpu_to_dict_full(xc, props); 5391 break; 5392 default: 5393 error_setg(&err, "Unsupported expansion type"); 5394 goto out; 5395 } 5396 5397 x86_cpu_to_dict(xc, props); 5398 5399 ret->model->name = g_strdup(base_name); 5400 5401 out: 5402 object_unref(OBJECT(xc)); 5403 if (err) { 5404 error_propagate(errp, err); 5405 qapi_free_CpuModelExpansionInfo(ret); 5406 ret = NULL; 5407 } 5408 return ret; 5409 } 5410 #endif /* !CONFIG_USER_ONLY */ 5411 5412 static gchar *x86_gdb_arch_name(CPUState *cs) 5413 { 5414 #ifdef TARGET_X86_64 5415 return g_strdup("i386:x86-64"); 5416 #else 5417 return g_strdup("i386"); 5418 #endif 5419 } 5420 5421 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 5422 { 5423 X86CPUModel *model = data; 5424 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5425 CPUClass *cc = CPU_CLASS(oc); 5426 5427 xcc->model = model; 5428 xcc->migration_safe = true; 5429 cc->deprecation_note = model->cpudef->deprecation_note; 5430 } 5431 5432 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) 5433 { 5434 g_autofree char *typename = x86_cpu_type_name(name); 5435 TypeInfo ti = { 5436 .name = typename, 5437 .parent = TYPE_X86_CPU, 5438 .class_init = x86_cpu_cpudef_class_init, 5439 .class_data = model, 5440 }; 5441 5442 type_register(&ti); 5443 } 5444 5445 static void x86_register_cpudef_types(X86CPUDefinition *def) 5446 { 5447 X86CPUModel *m; 5448 const X86CPUVersionDefinition *vdef; 5449 5450 /* AMD aliases are handled at runtime based on CPUID vendor, so 5451 * they shouldn't be set on the CPU model table. 5452 */ 5453 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 5454 /* catch mistakes instead of silently truncating model_id when too long */ 5455 assert(def->model_id && strlen(def->model_id) <= 48); 5456 5457 /* Unversioned model: */ 5458 m = g_new0(X86CPUModel, 1); 5459 m->cpudef = def; 5460 m->version = CPU_VERSION_AUTO; 5461 m->is_alias = true; 5462 x86_register_cpu_model_type(def->name, m); 5463 5464 /* Versioned models: */ 5465 5466 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { 5467 X86CPUModel *m = g_new0(X86CPUModel, 1); 5468 g_autofree char *name = 5469 x86_cpu_versioned_model_name(def, vdef->version); 5470 m->cpudef = def; 5471 m->version = vdef->version; 5472 m->note = vdef->note; 5473 x86_register_cpu_model_type(name, m); 5474 5475 if (vdef->alias) { 5476 X86CPUModel *am = g_new0(X86CPUModel, 1); 5477 am->cpudef = def; 5478 am->version = vdef->version; 5479 am->is_alias = true; 5480 x86_register_cpu_model_type(vdef->alias, am); 5481 } 5482 } 5483 5484 } 5485 5486 #if !defined(CONFIG_USER_ONLY) 5487 5488 void cpu_clear_apic_feature(CPUX86State *env) 5489 { 5490 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 5491 } 5492 5493 #endif /* !CONFIG_USER_ONLY */ 5494 5495 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 5496 uint32_t *eax, uint32_t *ebx, 5497 uint32_t *ecx, uint32_t *edx) 5498 { 5499 X86CPU *cpu = env_archcpu(env); 5500 CPUState *cs = env_cpu(env); 5501 uint32_t die_offset; 5502 uint32_t limit; 5503 uint32_t signature[3]; 5504 X86CPUTopoInfo topo_info; 5505 5506 topo_info.dies_per_pkg = env->nr_dies; 5507 topo_info.cores_per_die = cs->nr_cores; 5508 topo_info.threads_per_core = cs->nr_threads; 5509 5510 /* Calculate & apply limits for different index ranges */ 5511 if (index >= 0xC0000000) { 5512 limit = env->cpuid_xlevel2; 5513 } else if (index >= 0x80000000) { 5514 limit = env->cpuid_xlevel; 5515 } else if (index >= 0x40000000) { 5516 limit = 0x40000001; 5517 } else { 5518 limit = env->cpuid_level; 5519 } 5520 5521 if (index > limit) { 5522 /* Intel documentation states that invalid EAX input will 5523 * return the same information as EAX=cpuid_level 5524 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 5525 */ 5526 index = env->cpuid_level; 5527 } 5528 5529 switch(index) { 5530 case 0: 5531 *eax = env->cpuid_level; 5532 *ebx = env->cpuid_vendor1; 5533 *edx = env->cpuid_vendor2; 5534 *ecx = env->cpuid_vendor3; 5535 break; 5536 case 1: 5537 *eax = env->cpuid_version; 5538 *ebx = (cpu->apic_id << 24) | 5539 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 5540 *ecx = env->features[FEAT_1_ECX]; 5541 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 5542 *ecx |= CPUID_EXT_OSXSAVE; 5543 } 5544 *edx = env->features[FEAT_1_EDX]; 5545 if (cs->nr_cores * cs->nr_threads > 1) { 5546 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 5547 *edx |= CPUID_HT; 5548 } 5549 if (!cpu->enable_pmu) { 5550 *ecx &= ~CPUID_EXT_PDCM; 5551 } 5552 break; 5553 case 2: 5554 /* cache info: needed for Pentium Pro compatibility */ 5555 if (cpu->cache_info_passthrough) { 5556 host_cpuid(index, 0, eax, ebx, ecx, edx); 5557 break; 5558 } 5559 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 5560 *ebx = 0; 5561 if (!cpu->enable_l3_cache) { 5562 *ecx = 0; 5563 } else { 5564 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 5565 } 5566 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 5567 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 5568 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 5569 break; 5570 case 4: 5571 /* cache info: needed for Core compatibility */ 5572 if (cpu->cache_info_passthrough) { 5573 host_cpuid(index, count, eax, ebx, ecx, edx); 5574 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 5575 *eax &= ~0xFC000000; 5576 if ((*eax & 31) && cs->nr_cores > 1) { 5577 *eax |= (cs->nr_cores - 1) << 26; 5578 } 5579 } else { 5580 *eax = 0; 5581 switch (count) { 5582 case 0: /* L1 dcache info */ 5583 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 5584 1, cs->nr_cores, 5585 eax, ebx, ecx, edx); 5586 break; 5587 case 1: /* L1 icache info */ 5588 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 5589 1, cs->nr_cores, 5590 eax, ebx, ecx, edx); 5591 break; 5592 case 2: /* L2 cache info */ 5593 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 5594 cs->nr_threads, cs->nr_cores, 5595 eax, ebx, ecx, edx); 5596 break; 5597 case 3: /* L3 cache info */ 5598 die_offset = apicid_die_offset(&topo_info); 5599 if (cpu->enable_l3_cache) { 5600 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 5601 (1 << die_offset), cs->nr_cores, 5602 eax, ebx, ecx, edx); 5603 break; 5604 } 5605 /* fall through */ 5606 default: /* end of info */ 5607 *eax = *ebx = *ecx = *edx = 0; 5608 break; 5609 } 5610 } 5611 break; 5612 case 5: 5613 /* MONITOR/MWAIT Leaf */ 5614 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 5615 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 5616 *ecx = cpu->mwait.ecx; /* flags */ 5617 *edx = cpu->mwait.edx; /* mwait substates */ 5618 break; 5619 case 6: 5620 /* Thermal and Power Leaf */ 5621 *eax = env->features[FEAT_6_EAX]; 5622 *ebx = 0; 5623 *ecx = 0; 5624 *edx = 0; 5625 break; 5626 case 7: 5627 /* Structured Extended Feature Flags Enumeration Leaf */ 5628 if (count == 0) { 5629 /* Maximum ECX value for sub-leaves */ 5630 *eax = env->cpuid_level_func7; 5631 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 5632 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 5633 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 5634 *ecx |= CPUID_7_0_ECX_OSPKE; 5635 } 5636 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 5637 } else if (count == 1) { 5638 *eax = env->features[FEAT_7_1_EAX]; 5639 *ebx = 0; 5640 *ecx = 0; 5641 *edx = 0; 5642 } else { 5643 *eax = 0; 5644 *ebx = 0; 5645 *ecx = 0; 5646 *edx = 0; 5647 } 5648 break; 5649 case 9: 5650 /* Direct Cache Access Information Leaf */ 5651 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 5652 *ebx = 0; 5653 *ecx = 0; 5654 *edx = 0; 5655 break; 5656 case 0xA: 5657 /* Architectural Performance Monitoring Leaf */ 5658 if (kvm_enabled() && cpu->enable_pmu) { 5659 KVMState *s = cs->kvm_state; 5660 5661 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 5662 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 5663 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 5664 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 5665 } else if (hvf_enabled() && cpu->enable_pmu) { 5666 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 5667 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 5668 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 5669 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 5670 } else { 5671 *eax = 0; 5672 *ebx = 0; 5673 *ecx = 0; 5674 *edx = 0; 5675 } 5676 break; 5677 case 0xB: 5678 /* Extended Topology Enumeration Leaf */ 5679 if (!cpu->enable_cpuid_0xb) { 5680 *eax = *ebx = *ecx = *edx = 0; 5681 break; 5682 } 5683 5684 *ecx = count & 0xff; 5685 *edx = cpu->apic_id; 5686 5687 switch (count) { 5688 case 0: 5689 *eax = apicid_core_offset(&topo_info); 5690 *ebx = cs->nr_threads; 5691 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5692 break; 5693 case 1: 5694 *eax = apicid_pkg_offset(&topo_info); 5695 *ebx = cs->nr_cores * cs->nr_threads; 5696 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5697 break; 5698 default: 5699 *eax = 0; 5700 *ebx = 0; 5701 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5702 } 5703 5704 assert(!(*eax & ~0x1f)); 5705 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5706 break; 5707 case 0x1F: 5708 /* V2 Extended Topology Enumeration Leaf */ 5709 if (env->nr_dies < 2) { 5710 *eax = *ebx = *ecx = *edx = 0; 5711 break; 5712 } 5713 5714 *ecx = count & 0xff; 5715 *edx = cpu->apic_id; 5716 switch (count) { 5717 case 0: 5718 *eax = apicid_core_offset(&topo_info); 5719 *ebx = cs->nr_threads; 5720 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5721 break; 5722 case 1: 5723 *eax = apicid_die_offset(&topo_info); 5724 *ebx = cs->nr_cores * cs->nr_threads; 5725 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5726 break; 5727 case 2: 5728 *eax = apicid_pkg_offset(&topo_info); 5729 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; 5730 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; 5731 break; 5732 default: 5733 *eax = 0; 5734 *ebx = 0; 5735 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5736 } 5737 assert(!(*eax & ~0x1f)); 5738 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5739 break; 5740 case 0xD: { 5741 /* Processor Extended State */ 5742 *eax = 0; 5743 *ebx = 0; 5744 *ecx = 0; 5745 *edx = 0; 5746 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 5747 break; 5748 } 5749 5750 if (count == 0) { 5751 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 5752 *eax = env->features[FEAT_XSAVE_COMP_LO]; 5753 *edx = env->features[FEAT_XSAVE_COMP_HI]; 5754 /* 5755 * The initial value of xcr0 and ebx == 0, On host without kvm 5756 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 5757 * even through guest update xcr0, this will crash some legacy guest 5758 * (e.g., CentOS 6), So set ebx == ecx to workaroud it. 5759 */ 5760 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0); 5761 } else if (count == 1) { 5762 *eax = env->features[FEAT_XSAVE]; 5763 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 5764 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 5765 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 5766 *eax = esa->size; 5767 *ebx = esa->offset; 5768 } 5769 } 5770 break; 5771 } 5772 case 0x14: { 5773 /* Intel Processor Trace Enumeration */ 5774 *eax = 0; 5775 *ebx = 0; 5776 *ecx = 0; 5777 *edx = 0; 5778 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 5779 !kvm_enabled()) { 5780 break; 5781 } 5782 5783 if (count == 0) { 5784 *eax = INTEL_PT_MAX_SUBLEAF; 5785 *ebx = INTEL_PT_MINIMAL_EBX; 5786 *ecx = INTEL_PT_MINIMAL_ECX; 5787 if (env->features[FEAT_14_0_ECX] & CPUID_14_0_ECX_LIP) { 5788 *ecx |= CPUID_14_0_ECX_LIP; 5789 } 5790 } else if (count == 1) { 5791 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 5792 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 5793 } 5794 break; 5795 } 5796 case 0x40000000: 5797 /* 5798 * CPUID code in kvm_arch_init_vcpu() ignores stuff 5799 * set here, but we restrict to TCG none the less. 5800 */ 5801 if (tcg_enabled() && cpu->expose_tcg) { 5802 memcpy(signature, "TCGTCGTCGTCG", 12); 5803 *eax = 0x40000001; 5804 *ebx = signature[0]; 5805 *ecx = signature[1]; 5806 *edx = signature[2]; 5807 } else { 5808 *eax = 0; 5809 *ebx = 0; 5810 *ecx = 0; 5811 *edx = 0; 5812 } 5813 break; 5814 case 0x40000001: 5815 *eax = 0; 5816 *ebx = 0; 5817 *ecx = 0; 5818 *edx = 0; 5819 break; 5820 case 0x80000000: 5821 *eax = env->cpuid_xlevel; 5822 *ebx = env->cpuid_vendor1; 5823 *edx = env->cpuid_vendor2; 5824 *ecx = env->cpuid_vendor3; 5825 break; 5826 case 0x80000001: 5827 *eax = env->cpuid_version; 5828 *ebx = 0; 5829 *ecx = env->features[FEAT_8000_0001_ECX]; 5830 *edx = env->features[FEAT_8000_0001_EDX]; 5831 5832 /* The Linux kernel checks for the CMPLegacy bit and 5833 * discards multiple thread information if it is set. 5834 * So don't set it here for Intel to make Linux guests happy. 5835 */ 5836 if (cs->nr_cores * cs->nr_threads > 1) { 5837 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 5838 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 5839 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 5840 *ecx |= 1 << 1; /* CmpLegacy bit */ 5841 } 5842 } 5843 break; 5844 case 0x80000002: 5845 case 0x80000003: 5846 case 0x80000004: 5847 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 5848 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 5849 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 5850 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 5851 break; 5852 case 0x80000005: 5853 /* cache info (L1 cache) */ 5854 if (cpu->cache_info_passthrough) { 5855 host_cpuid(index, 0, eax, ebx, ecx, edx); 5856 break; 5857 } 5858 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | 5859 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 5860 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | 5861 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 5862 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 5863 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 5864 break; 5865 case 0x80000006: 5866 /* cache info (L2 cache) */ 5867 if (cpu->cache_info_passthrough) { 5868 host_cpuid(index, 0, eax, ebx, ecx, edx); 5869 break; 5870 } 5871 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | 5872 (L2_DTLB_2M_ENTRIES << 16) | 5873 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | 5874 (L2_ITLB_2M_ENTRIES); 5875 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | 5876 (L2_DTLB_4K_ENTRIES << 16) | 5877 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | 5878 (L2_ITLB_4K_ENTRIES); 5879 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 5880 cpu->enable_l3_cache ? 5881 env->cache_info_amd.l3_cache : NULL, 5882 ecx, edx); 5883 break; 5884 case 0x80000007: 5885 *eax = 0; 5886 *ebx = 0; 5887 *ecx = 0; 5888 *edx = env->features[FEAT_8000_0007_EDX]; 5889 break; 5890 case 0x80000008: 5891 /* virtual & phys address size in low 2 bytes. */ 5892 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5893 /* 64 bit processor */ 5894 *eax = cpu->phys_bits; /* configurable physical bits */ 5895 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 5896 *eax |= 0x00003900; /* 57 bits virtual */ 5897 } else { 5898 *eax |= 0x00003000; /* 48 bits virtual */ 5899 } 5900 } else { 5901 *eax = cpu->phys_bits; 5902 } 5903 *ebx = env->features[FEAT_8000_0008_EBX]; 5904 if (cs->nr_cores * cs->nr_threads > 1) { 5905 /* 5906 * Bits 15:12 is "The number of bits in the initial 5907 * Core::X86::Apic::ApicId[ApicId] value that indicate 5908 * thread ID within a package". 5909 * Bits 7:0 is "The number of threads in the package is NC+1" 5910 */ 5911 *ecx = (apicid_pkg_offset(&topo_info) << 12) | 5912 ((cs->nr_cores * cs->nr_threads) - 1); 5913 } else { 5914 *ecx = 0; 5915 } 5916 *edx = 0; 5917 break; 5918 case 0x8000000A: 5919 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5920 *eax = 0x00000001; /* SVM Revision */ 5921 *ebx = 0x00000010; /* nr of ASIDs */ 5922 *ecx = 0; 5923 *edx = env->features[FEAT_SVM]; /* optional features */ 5924 } else { 5925 *eax = 0; 5926 *ebx = 0; 5927 *ecx = 0; 5928 *edx = 0; 5929 } 5930 break; 5931 case 0x8000001D: 5932 *eax = 0; 5933 if (cpu->cache_info_passthrough) { 5934 host_cpuid(index, count, eax, ebx, ecx, edx); 5935 break; 5936 } 5937 switch (count) { 5938 case 0: /* L1 dcache info */ 5939 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, 5940 &topo_info, eax, ebx, ecx, edx); 5941 break; 5942 case 1: /* L1 icache info */ 5943 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, 5944 &topo_info, eax, ebx, ecx, edx); 5945 break; 5946 case 2: /* L2 cache info */ 5947 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, 5948 &topo_info, eax, ebx, ecx, edx); 5949 break; 5950 case 3: /* L3 cache info */ 5951 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, 5952 &topo_info, eax, ebx, ecx, edx); 5953 break; 5954 default: /* end of info */ 5955 *eax = *ebx = *ecx = *edx = 0; 5956 break; 5957 } 5958 break; 5959 case 0x8000001E: 5960 if (cpu->core_id <= 255) { 5961 encode_topo_cpuid8000001e(cpu, &topo_info, eax, ebx, ecx, edx); 5962 } else { 5963 *eax = 0; 5964 *ebx = 0; 5965 *ecx = 0; 5966 *edx = 0; 5967 } 5968 break; 5969 case 0xC0000000: 5970 *eax = env->cpuid_xlevel2; 5971 *ebx = 0; 5972 *ecx = 0; 5973 *edx = 0; 5974 break; 5975 case 0xC0000001: 5976 /* Support for VIA CPU's CPUID instruction */ 5977 *eax = env->cpuid_version; 5978 *ebx = 0; 5979 *ecx = 0; 5980 *edx = env->features[FEAT_C000_0001_EDX]; 5981 break; 5982 case 0xC0000002: 5983 case 0xC0000003: 5984 case 0xC0000004: 5985 /* Reserved for the future, and now filled with zero */ 5986 *eax = 0; 5987 *ebx = 0; 5988 *ecx = 0; 5989 *edx = 0; 5990 break; 5991 case 0x8000001F: 5992 *eax = sev_enabled() ? 0x2 : 0; 5993 *ebx = sev_get_cbit_position(); 5994 *ebx |= sev_get_reduced_phys_bits() << 6; 5995 *ecx = 0; 5996 *edx = 0; 5997 break; 5998 default: 5999 /* reserved values: zero */ 6000 *eax = 0; 6001 *ebx = 0; 6002 *ecx = 0; 6003 *edx = 0; 6004 break; 6005 } 6006 } 6007 6008 static void x86_cpu_reset(DeviceState *dev) 6009 { 6010 CPUState *s = CPU(dev); 6011 X86CPU *cpu = X86_CPU(s); 6012 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 6013 CPUX86State *env = &cpu->env; 6014 target_ulong cr4; 6015 uint64_t xcr0; 6016 int i; 6017 6018 xcc->parent_reset(dev); 6019 6020 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 6021 6022 env->old_exception = -1; 6023 6024 /* init to reset state */ 6025 6026 env->hflags2 |= HF2_GIF_MASK; 6027 env->hflags &= ~HF_GUEST_MASK; 6028 6029 cpu_x86_update_cr0(env, 0x60000010); 6030 env->a20_mask = ~0x0; 6031 env->smbase = 0x30000; 6032 env->msr_smi_count = 0; 6033 6034 env->idt.limit = 0xffff; 6035 env->gdt.limit = 0xffff; 6036 env->ldt.limit = 0xffff; 6037 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 6038 env->tr.limit = 0xffff; 6039 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 6040 6041 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 6042 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 6043 DESC_R_MASK | DESC_A_MASK); 6044 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 6045 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6046 DESC_A_MASK); 6047 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 6048 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6049 DESC_A_MASK); 6050 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 6051 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6052 DESC_A_MASK); 6053 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 6054 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6055 DESC_A_MASK); 6056 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 6057 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6058 DESC_A_MASK); 6059 6060 env->eip = 0xfff0; 6061 env->regs[R_EDX] = env->cpuid_version; 6062 6063 env->eflags = 0x2; 6064 6065 /* FPU init */ 6066 for (i = 0; i < 8; i++) { 6067 env->fptags[i] = 1; 6068 } 6069 cpu_set_fpuc(env, 0x37f); 6070 6071 env->mxcsr = 0x1f80; 6072 /* All units are in INIT state. */ 6073 env->xstate_bv = 0; 6074 6075 env->pat = 0x0007040600070406ULL; 6076 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 6077 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 6078 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 6079 } 6080 6081 memset(env->dr, 0, sizeof(env->dr)); 6082 env->dr[6] = DR6_FIXED_1; 6083 env->dr[7] = DR7_FIXED_1; 6084 cpu_breakpoint_remove_all(s, BP_CPU); 6085 cpu_watchpoint_remove_all(s, BP_CPU); 6086 6087 cr4 = 0; 6088 xcr0 = XSTATE_FP_MASK; 6089 6090 #ifdef CONFIG_USER_ONLY 6091 /* Enable all the features for user-mode. */ 6092 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 6093 xcr0 |= XSTATE_SSE_MASK; 6094 } 6095 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6096 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6097 if (env->features[esa->feature] & esa->bits) { 6098 xcr0 |= 1ull << i; 6099 } 6100 } 6101 6102 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 6103 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 6104 } 6105 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 6106 cr4 |= CR4_FSGSBASE_MASK; 6107 } 6108 #endif 6109 6110 env->xcr0 = xcr0; 6111 cpu_x86_update_cr4(env, cr4); 6112 6113 /* 6114 * SDM 11.11.5 requires: 6115 * - IA32_MTRR_DEF_TYPE MSR.E = 0 6116 * - IA32_MTRR_PHYSMASKn.V = 0 6117 * All other bits are undefined. For simplification, zero it all. 6118 */ 6119 env->mtrr_deftype = 0; 6120 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 6121 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 6122 6123 env->interrupt_injected = -1; 6124 env->exception_nr = -1; 6125 env->exception_pending = 0; 6126 env->exception_injected = 0; 6127 env->exception_has_payload = false; 6128 env->exception_payload = 0; 6129 env->nmi_injected = false; 6130 #if !defined(CONFIG_USER_ONLY) 6131 /* We hard-wire the BSP to the first CPU. */ 6132 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 6133 6134 s->halted = !cpu_is_bsp(cpu); 6135 6136 if (kvm_enabled()) { 6137 kvm_arch_reset_vcpu(cpu); 6138 } 6139 #endif 6140 } 6141 6142 #ifndef CONFIG_USER_ONLY 6143 bool cpu_is_bsp(X86CPU *cpu) 6144 { 6145 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 6146 } 6147 6148 /* TODO: remove me, when reset over QOM tree is implemented */ 6149 static void x86_cpu_machine_reset_cb(void *opaque) 6150 { 6151 X86CPU *cpu = opaque; 6152 cpu_reset(CPU(cpu)); 6153 } 6154 #endif 6155 6156 static void mce_init(X86CPU *cpu) 6157 { 6158 CPUX86State *cenv = &cpu->env; 6159 unsigned int bank; 6160 6161 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 6162 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 6163 (CPUID_MCE | CPUID_MCA)) { 6164 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 6165 (cpu->enable_lmce ? MCG_LMCE_P : 0); 6166 cenv->mcg_ctl = ~(uint64_t)0; 6167 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 6168 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 6169 } 6170 } 6171 } 6172 6173 #ifndef CONFIG_USER_ONLY 6174 APICCommonClass *apic_get_class(void) 6175 { 6176 const char *apic_type = "apic"; 6177 6178 /* TODO: in-kernel irqchip for hvf */ 6179 if (kvm_apic_in_kernel()) { 6180 apic_type = "kvm-apic"; 6181 } else if (xen_enabled()) { 6182 apic_type = "xen-apic"; 6183 } else if (whpx_apic_in_platform()) { 6184 apic_type = "whpx-apic"; 6185 } 6186 6187 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 6188 } 6189 6190 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 6191 { 6192 APICCommonState *apic; 6193 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 6194 6195 cpu->apic_state = DEVICE(object_new_with_class(apic_class)); 6196 6197 object_property_add_child(OBJECT(cpu), "lapic", 6198 OBJECT(cpu->apic_state)); 6199 object_unref(OBJECT(cpu->apic_state)); 6200 6201 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 6202 /* TODO: convert to link<> */ 6203 apic = APIC_COMMON(cpu->apic_state); 6204 apic->cpu = cpu; 6205 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 6206 } 6207 6208 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6209 { 6210 APICCommonState *apic; 6211 static bool apic_mmio_map_once; 6212 6213 if (cpu->apic_state == NULL) { 6214 return; 6215 } 6216 qdev_realize(DEVICE(cpu->apic_state), NULL, errp); 6217 6218 /* Map APIC MMIO area */ 6219 apic = APIC_COMMON(cpu->apic_state); 6220 if (!apic_mmio_map_once) { 6221 memory_region_add_subregion_overlap(get_system_memory(), 6222 apic->apicbase & 6223 MSR_IA32_APICBASE_BASE, 6224 &apic->io_memory, 6225 0x1000); 6226 apic_mmio_map_once = true; 6227 } 6228 } 6229 6230 static void x86_cpu_machine_done(Notifier *n, void *unused) 6231 { 6232 X86CPU *cpu = container_of(n, X86CPU, machine_done); 6233 MemoryRegion *smram = 6234 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 6235 6236 if (smram) { 6237 cpu->smram = g_new(MemoryRegion, 1); 6238 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 6239 smram, 0, 4 * GiB); 6240 memory_region_set_enabled(cpu->smram, true); 6241 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 6242 } 6243 } 6244 #else 6245 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6246 { 6247 } 6248 #endif 6249 6250 /* Note: Only safe for use on x86(-64) hosts */ 6251 static uint32_t x86_host_phys_bits(void) 6252 { 6253 uint32_t eax; 6254 uint32_t host_phys_bits; 6255 6256 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 6257 if (eax >= 0x80000008) { 6258 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 6259 /* Note: According to AMD doc 25481 rev 2.34 they have a field 6260 * at 23:16 that can specify a maximum physical address bits for 6261 * the guest that can override this value; but I've not seen 6262 * anything with that set. 6263 */ 6264 host_phys_bits = eax & 0xff; 6265 } else { 6266 /* It's an odd 64 bit machine that doesn't have the leaf for 6267 * physical address bits; fall back to 36 that's most older 6268 * Intel. 6269 */ 6270 host_phys_bits = 36; 6271 } 6272 6273 return host_phys_bits; 6274 } 6275 6276 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 6277 { 6278 if (*min < value) { 6279 *min = value; 6280 } 6281 } 6282 6283 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 6284 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 6285 { 6286 CPUX86State *env = &cpu->env; 6287 FeatureWordInfo *fi = &feature_word_info[w]; 6288 uint32_t eax = fi->cpuid.eax; 6289 uint32_t region = eax & 0xF0000000; 6290 6291 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 6292 if (!env->features[w]) { 6293 return; 6294 } 6295 6296 switch (region) { 6297 case 0x00000000: 6298 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 6299 break; 6300 case 0x80000000: 6301 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 6302 break; 6303 case 0xC0000000: 6304 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 6305 break; 6306 } 6307 6308 if (eax == 7) { 6309 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7, 6310 fi->cpuid.ecx); 6311 } 6312 } 6313 6314 /* Calculate XSAVE components based on the configured CPU feature flags */ 6315 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 6316 { 6317 CPUX86State *env = &cpu->env; 6318 int i; 6319 uint64_t mask; 6320 6321 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 6322 env->features[FEAT_XSAVE_COMP_LO] = 0; 6323 env->features[FEAT_XSAVE_COMP_HI] = 0; 6324 return; 6325 } 6326 6327 mask = 0; 6328 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6329 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6330 if (env->features[esa->feature] & esa->bits) { 6331 mask |= (1ULL << i); 6332 } 6333 } 6334 6335 env->features[FEAT_XSAVE_COMP_LO] = mask; 6336 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 6337 } 6338 6339 /***** Steps involved on loading and filtering CPUID data 6340 * 6341 * When initializing and realizing a CPU object, the steps 6342 * involved in setting up CPUID data are: 6343 * 6344 * 1) Loading CPU model definition (X86CPUDefinition). This is 6345 * implemented by x86_cpu_load_model() and should be completely 6346 * transparent, as it is done automatically by instance_init. 6347 * No code should need to look at X86CPUDefinition structs 6348 * outside instance_init. 6349 * 6350 * 2) CPU expansion. This is done by realize before CPUID 6351 * filtering, and will make sure host/accelerator data is 6352 * loaded for CPU models that depend on host capabilities 6353 * (e.g. "host"). Done by x86_cpu_expand_features(). 6354 * 6355 * 3) CPUID filtering. This initializes extra data related to 6356 * CPUID, and checks if the host supports all capabilities 6357 * required by the CPU. Runnability of a CPU model is 6358 * determined at this step. Done by x86_cpu_filter_features(). 6359 * 6360 * Some operations don't require all steps to be performed. 6361 * More precisely: 6362 * 6363 * - CPU instance creation (instance_init) will run only CPU 6364 * model loading. CPU expansion can't run at instance_init-time 6365 * because host/accelerator data may be not available yet. 6366 * - CPU realization will perform both CPU model expansion and CPUID 6367 * filtering, and return an error in case one of them fails. 6368 * - query-cpu-definitions needs to run all 3 steps. It needs 6369 * to run CPUID filtering, as the 'unavailable-features' 6370 * field is set based on the filtering results. 6371 * - The query-cpu-model-expansion QMP command only needs to run 6372 * CPU model loading and CPU expansion. It should not filter 6373 * any CPUID data based on host capabilities. 6374 */ 6375 6376 /* Expand CPU configuration data, based on configured features 6377 * and host/accelerator capabilities when appropriate. 6378 */ 6379 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 6380 { 6381 CPUX86State *env = &cpu->env; 6382 FeatureWord w; 6383 int i; 6384 GList *l; 6385 6386 for (l = plus_features; l; l = l->next) { 6387 const char *prop = l->data; 6388 if (!object_property_set_bool(OBJECT(cpu), prop, true, errp)) { 6389 return; 6390 } 6391 } 6392 6393 for (l = minus_features; l; l = l->next) { 6394 const char *prop = l->data; 6395 if (!object_property_set_bool(OBJECT(cpu), prop, false, errp)) { 6396 return; 6397 } 6398 } 6399 6400 /*TODO: Now cpu->max_features doesn't overwrite features 6401 * set using QOM properties, and we can convert 6402 * plus_features & minus_features to global properties 6403 * inside x86_cpu_parse_featurestr() too. 6404 */ 6405 if (cpu->max_features) { 6406 for (w = 0; w < FEATURE_WORDS; w++) { 6407 /* Override only features that weren't set explicitly 6408 * by the user. 6409 */ 6410 env->features[w] |= 6411 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 6412 ~env->user_features[w] & 6413 ~feature_word_info[w].no_autoenable_flags; 6414 } 6415 } 6416 6417 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) { 6418 FeatureDep *d = &feature_dependencies[i]; 6419 if (!(env->features[d->from.index] & d->from.mask)) { 6420 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask; 6421 6422 /* Not an error unless the dependent feature was added explicitly. */ 6423 mark_unavailable_features(cpu, d->to.index, 6424 unavailable_features & env->user_features[d->to.index], 6425 "This feature depends on other features that were not requested"); 6426 6427 env->features[d->to.index] &= ~unavailable_features; 6428 } 6429 } 6430 6431 if (!kvm_enabled() || !cpu->expose_kvm) { 6432 env->features[FEAT_KVM] = 0; 6433 } 6434 6435 x86_cpu_enable_xsave_components(cpu); 6436 6437 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 6438 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 6439 if (cpu->full_cpuid_auto_level) { 6440 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 6441 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 6442 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 6443 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 6444 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); 6445 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 6446 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 6447 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 6448 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 6449 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 6450 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 6451 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 6452 6453 /* Intel Processor Trace requires CPUID[0x14] */ 6454 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) { 6455 if (cpu->intel_pt_auto_level) { 6456 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 6457 } else if (cpu->env.cpuid_min_level < 0x14) { 6458 mark_unavailable_features(cpu, FEAT_7_0_EBX, 6459 CPUID_7_0_EBX_INTEL_PT, 6460 "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,min-level=0x14\""); 6461 } 6462 } 6463 6464 /* CPU topology with multi-dies support requires CPUID[0x1F] */ 6465 if (env->nr_dies > 1) { 6466 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); 6467 } 6468 6469 /* SVM requires CPUID[0x8000000A] */ 6470 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 6471 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 6472 } 6473 6474 /* SEV requires CPUID[0x8000001F] */ 6475 if (sev_enabled()) { 6476 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 6477 } 6478 } 6479 6480 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 6481 if (env->cpuid_level_func7 == UINT32_MAX) { 6482 env->cpuid_level_func7 = env->cpuid_min_level_func7; 6483 } 6484 if (env->cpuid_level == UINT32_MAX) { 6485 env->cpuid_level = env->cpuid_min_level; 6486 } 6487 if (env->cpuid_xlevel == UINT32_MAX) { 6488 env->cpuid_xlevel = env->cpuid_min_xlevel; 6489 } 6490 if (env->cpuid_xlevel2 == UINT32_MAX) { 6491 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 6492 } 6493 } 6494 6495 /* 6496 * Finishes initialization of CPUID data, filters CPU feature 6497 * words based on host availability of each feature. 6498 * 6499 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 6500 */ 6501 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) 6502 { 6503 CPUX86State *env = &cpu->env; 6504 FeatureWord w; 6505 const char *prefix = NULL; 6506 6507 if (verbose) { 6508 prefix = accel_uses_host_cpuid() 6509 ? "host doesn't support requested feature" 6510 : "TCG doesn't support requested feature"; 6511 } 6512 6513 for (w = 0; w < FEATURE_WORDS; w++) { 6514 uint64_t host_feat = 6515 x86_cpu_get_supported_feature_word(w, false); 6516 uint64_t requested_features = env->features[w]; 6517 uint64_t unavailable_features = requested_features & ~host_feat; 6518 mark_unavailable_features(cpu, w, unavailable_features, prefix); 6519 } 6520 6521 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 6522 kvm_enabled()) { 6523 KVMState *s = CPU(cpu)->kvm_state; 6524 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 6525 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 6526 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 6527 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 6528 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 6529 6530 if (!eax_0 || 6531 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 6532 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 6533 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 6534 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 6535 INTEL_PT_ADDR_RANGES_NUM) || 6536 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 6537 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 6538 ((ecx_0 & CPUID_14_0_ECX_LIP) != 6539 (env->features[FEAT_14_0_ECX] & CPUID_14_0_ECX_LIP))) { 6540 /* 6541 * Processor Trace capabilities aren't configurable, so if the 6542 * host can't emulate the capabilities we report on 6543 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 6544 */ 6545 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix); 6546 } 6547 } 6548 } 6549 6550 static void x86_cpu_hyperv_realize(X86CPU *cpu) 6551 { 6552 size_t len; 6553 6554 /* Hyper-V vendor id */ 6555 if (!cpu->hyperv_vendor) { 6556 memcpy(cpu->hyperv_vendor_id, "Microsoft Hv", 12); 6557 } else { 6558 len = strlen(cpu->hyperv_vendor); 6559 6560 if (len > 12) { 6561 warn_report("hv-vendor-id truncated to 12 characters"); 6562 len = 12; 6563 } 6564 memset(cpu->hyperv_vendor_id, 0, 12); 6565 memcpy(cpu->hyperv_vendor_id, cpu->hyperv_vendor, len); 6566 } 6567 6568 /* 'Hv#1' interface identification*/ 6569 cpu->hyperv_interface_id[0] = 0x31237648; 6570 cpu->hyperv_interface_id[1] = 0; 6571 cpu->hyperv_interface_id[2] = 0; 6572 cpu->hyperv_interface_id[3] = 0; 6573 6574 /* Hypervisor system identity */ 6575 cpu->hyperv_version_id[0] = 0x00001bbc; 6576 cpu->hyperv_version_id[1] = 0x00060001; 6577 6578 /* Hypervisor implementation limits */ 6579 cpu->hyperv_limits[0] = 64; 6580 cpu->hyperv_limits[1] = 0; 6581 cpu->hyperv_limits[2] = 0; 6582 } 6583 6584 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 6585 { 6586 CPUState *cs = CPU(dev); 6587 X86CPU *cpu = X86_CPU(dev); 6588 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6589 CPUX86State *env = &cpu->env; 6590 Error *local_err = NULL; 6591 static bool ht_warned; 6592 6593 if (xcc->host_cpuid_required) { 6594 if (!accel_uses_host_cpuid()) { 6595 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6596 error_setg(&local_err, "CPU model '%s' requires KVM", name); 6597 goto out; 6598 } 6599 } 6600 6601 if (cpu->max_features && accel_uses_host_cpuid()) { 6602 if (enable_cpu_pm) { 6603 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 6604 &cpu->mwait.ecx, &cpu->mwait.edx); 6605 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 6606 if (kvm_enabled() && kvm_has_waitpkg()) { 6607 env->features[FEAT_7_0_ECX] |= CPUID_7_0_ECX_WAITPKG; 6608 } 6609 } 6610 if (kvm_enabled() && cpu->ucode_rev == 0) { 6611 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state, 6612 MSR_IA32_UCODE_REV); 6613 } 6614 } 6615 6616 if (cpu->ucode_rev == 0) { 6617 /* The default is the same as KVM's. */ 6618 if (IS_AMD_CPU(env)) { 6619 cpu->ucode_rev = 0x01000065; 6620 } else { 6621 cpu->ucode_rev = 0x100000000ULL; 6622 } 6623 } 6624 6625 /* mwait extended info: needed for Core compatibility */ 6626 /* We always wake on interrupt even if host does not have the capability */ 6627 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 6628 6629 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 6630 error_setg(errp, "apic-id property was not initialized properly"); 6631 return; 6632 } 6633 6634 x86_cpu_expand_features(cpu, &local_err); 6635 if (local_err) { 6636 goto out; 6637 } 6638 6639 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); 6640 6641 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { 6642 error_setg(&local_err, 6643 accel_uses_host_cpuid() ? 6644 "Host doesn't support requested features" : 6645 "TCG doesn't support requested features"); 6646 goto out; 6647 } 6648 6649 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 6650 * CPUID[1].EDX. 6651 */ 6652 if (IS_AMD_CPU(env)) { 6653 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 6654 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 6655 & CPUID_EXT2_AMD_ALIASES); 6656 } 6657 6658 /* For 64bit systems think about the number of physical bits to present. 6659 * ideally this should be the same as the host; anything other than matching 6660 * the host can cause incorrect guest behaviour. 6661 * QEMU used to pick the magic value of 40 bits that corresponds to 6662 * consumer AMD devices but nothing else. 6663 */ 6664 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 6665 if (accel_uses_host_cpuid()) { 6666 uint32_t host_phys_bits = x86_host_phys_bits(); 6667 static bool warned; 6668 6669 /* Print a warning if the user set it to a value that's not the 6670 * host value. 6671 */ 6672 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 6673 !warned) { 6674 warn_report("Host physical bits (%u)" 6675 " does not match phys-bits property (%u)", 6676 host_phys_bits, cpu->phys_bits); 6677 warned = true; 6678 } 6679 6680 if (cpu->host_phys_bits) { 6681 /* The user asked for us to use the host physical bits */ 6682 cpu->phys_bits = host_phys_bits; 6683 if (cpu->host_phys_bits_limit && 6684 cpu->phys_bits > cpu->host_phys_bits_limit) { 6685 cpu->phys_bits = cpu->host_phys_bits_limit; 6686 } 6687 } 6688 6689 if (cpu->phys_bits && 6690 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 6691 cpu->phys_bits < 32)) { 6692 error_setg(errp, "phys-bits should be between 32 and %u " 6693 " (but is %u)", 6694 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 6695 return; 6696 } 6697 } else { 6698 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 6699 error_setg(errp, "TCG only supports phys-bits=%u", 6700 TCG_PHYS_ADDR_BITS); 6701 return; 6702 } 6703 } 6704 /* 0 means it was not explicitly set by the user (or by machine 6705 * compat_props or by the host code above). In this case, the default 6706 * is the value used by TCG (40). 6707 */ 6708 if (cpu->phys_bits == 0) { 6709 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 6710 } 6711 } else { 6712 /* For 32 bit systems don't use the user set value, but keep 6713 * phys_bits consistent with what we tell the guest. 6714 */ 6715 if (cpu->phys_bits != 0) { 6716 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 6717 return; 6718 } 6719 6720 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 6721 cpu->phys_bits = 36; 6722 } else { 6723 cpu->phys_bits = 32; 6724 } 6725 } 6726 6727 /* Cache information initialization */ 6728 if (!cpu->legacy_cache) { 6729 if (!xcc->model || !xcc->model->cpudef->cache_info) { 6730 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6731 error_setg(errp, 6732 "CPU model '%s' doesn't support legacy-cache=off", name); 6733 return; 6734 } 6735 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 6736 *xcc->model->cpudef->cache_info; 6737 } else { 6738 /* Build legacy cache information */ 6739 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 6740 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 6741 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 6742 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 6743 6744 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 6745 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 6746 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 6747 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 6748 6749 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 6750 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 6751 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 6752 env->cache_info_amd.l3_cache = &legacy_l3_cache; 6753 } 6754 6755 /* Process Hyper-V enlightenments */ 6756 x86_cpu_hyperv_realize(cpu); 6757 6758 cpu_exec_realizefn(cs, &local_err); 6759 if (local_err != NULL) { 6760 error_propagate(errp, local_err); 6761 return; 6762 } 6763 6764 #ifndef CONFIG_USER_ONLY 6765 MachineState *ms = MACHINE(qdev_get_machine()); 6766 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 6767 6768 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { 6769 x86_cpu_apic_create(cpu, &local_err); 6770 if (local_err != NULL) { 6771 goto out; 6772 } 6773 } 6774 #endif 6775 6776 mce_init(cpu); 6777 6778 #ifndef CONFIG_USER_ONLY 6779 if (tcg_enabled()) { 6780 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 6781 cpu->cpu_as_root = g_new(MemoryRegion, 1); 6782 6783 /* Outer container... */ 6784 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 6785 memory_region_set_enabled(cpu->cpu_as_root, true); 6786 6787 /* ... with two regions inside: normal system memory with low 6788 * priority, and... 6789 */ 6790 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 6791 get_system_memory(), 0, ~0ull); 6792 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 6793 memory_region_set_enabled(cpu->cpu_as_mem, true); 6794 6795 cs->num_ases = 2; 6796 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 6797 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 6798 6799 /* ... SMRAM with higher priority, linked from /machine/smram. */ 6800 cpu->machine_done.notify = x86_cpu_machine_done; 6801 qemu_add_machine_init_done_notifier(&cpu->machine_done); 6802 } 6803 #endif 6804 6805 qemu_init_vcpu(cs); 6806 6807 /* 6808 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 6809 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 6810 * based on inputs (sockets,cores,threads), it is still better to give 6811 * users a warning. 6812 * 6813 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 6814 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 6815 */ 6816 if (IS_AMD_CPU(env) && 6817 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 6818 cs->nr_threads > 1 && !ht_warned) { 6819 warn_report("This family of AMD CPU doesn't support " 6820 "hyperthreading(%d)", 6821 cs->nr_threads); 6822 error_printf("Please configure -smp options properly" 6823 " or try enabling topoext feature.\n"); 6824 ht_warned = true; 6825 } 6826 6827 x86_cpu_apic_realize(cpu, &local_err); 6828 if (local_err != NULL) { 6829 goto out; 6830 } 6831 cpu_reset(cs); 6832 6833 xcc->parent_realize(dev, &local_err); 6834 6835 out: 6836 if (local_err != NULL) { 6837 error_propagate(errp, local_err); 6838 return; 6839 } 6840 } 6841 6842 static void x86_cpu_unrealizefn(DeviceState *dev) 6843 { 6844 X86CPU *cpu = X86_CPU(dev); 6845 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6846 6847 #ifndef CONFIG_USER_ONLY 6848 cpu_remove_sync(CPU(dev)); 6849 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 6850 #endif 6851 6852 if (cpu->apic_state) { 6853 object_unparent(OBJECT(cpu->apic_state)); 6854 cpu->apic_state = NULL; 6855 } 6856 6857 xcc->parent_unrealize(dev); 6858 } 6859 6860 typedef struct BitProperty { 6861 FeatureWord w; 6862 uint64_t mask; 6863 } BitProperty; 6864 6865 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 6866 void *opaque, Error **errp) 6867 { 6868 X86CPU *cpu = X86_CPU(obj); 6869 BitProperty *fp = opaque; 6870 uint64_t f = cpu->env.features[fp->w]; 6871 bool value = (f & fp->mask) == fp->mask; 6872 visit_type_bool(v, name, &value, errp); 6873 } 6874 6875 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 6876 void *opaque, Error **errp) 6877 { 6878 DeviceState *dev = DEVICE(obj); 6879 X86CPU *cpu = X86_CPU(obj); 6880 BitProperty *fp = opaque; 6881 bool value; 6882 6883 if (dev->realized) { 6884 qdev_prop_set_after_realize(dev, name, errp); 6885 return; 6886 } 6887 6888 if (!visit_type_bool(v, name, &value, errp)) { 6889 return; 6890 } 6891 6892 if (value) { 6893 cpu->env.features[fp->w] |= fp->mask; 6894 } else { 6895 cpu->env.features[fp->w] &= ~fp->mask; 6896 } 6897 cpu->env.user_features[fp->w] |= fp->mask; 6898 } 6899 6900 /* Register a boolean property to get/set a single bit in a uint32_t field. 6901 * 6902 * The same property name can be registered multiple times to make it affect 6903 * multiple bits in the same FeatureWord. In that case, the getter will return 6904 * true only if all bits are set. 6905 */ 6906 static void x86_cpu_register_bit_prop(X86CPUClass *xcc, 6907 const char *prop_name, 6908 FeatureWord w, 6909 int bitnr) 6910 { 6911 ObjectClass *oc = OBJECT_CLASS(xcc); 6912 BitProperty *fp; 6913 ObjectProperty *op; 6914 uint64_t mask = (1ULL << bitnr); 6915 6916 op = object_class_property_find(oc, prop_name); 6917 if (op) { 6918 fp = op->opaque; 6919 assert(fp->w == w); 6920 fp->mask |= mask; 6921 } else { 6922 fp = g_new0(BitProperty, 1); 6923 fp->w = w; 6924 fp->mask = mask; 6925 object_class_property_add(oc, prop_name, "bool", 6926 x86_cpu_get_bit_prop, 6927 x86_cpu_set_bit_prop, 6928 NULL, fp); 6929 } 6930 } 6931 6932 static void x86_cpu_register_feature_bit_props(X86CPUClass *xcc, 6933 FeatureWord w, 6934 int bitnr) 6935 { 6936 FeatureWordInfo *fi = &feature_word_info[w]; 6937 const char *name = fi->feat_names[bitnr]; 6938 6939 if (!name) { 6940 return; 6941 } 6942 6943 /* Property names should use "-" instead of "_". 6944 * Old names containing underscores are registered as aliases 6945 * using object_property_add_alias() 6946 */ 6947 assert(!strchr(name, '_')); 6948 /* aliases don't use "|" delimiters anymore, they are registered 6949 * manually using object_property_add_alias() */ 6950 assert(!strchr(name, '|')); 6951 x86_cpu_register_bit_prop(xcc, name, w, bitnr); 6952 } 6953 6954 #if !defined(CONFIG_USER_ONLY) 6955 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 6956 { 6957 X86CPU *cpu = X86_CPU(cs); 6958 CPUX86State *env = &cpu->env; 6959 GuestPanicInformation *panic_info = NULL; 6960 6961 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 6962 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 6963 6964 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 6965 6966 assert(HV_CRASH_PARAMS >= 5); 6967 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 6968 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 6969 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 6970 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 6971 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 6972 } 6973 6974 return panic_info; 6975 } 6976 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 6977 const char *name, void *opaque, 6978 Error **errp) 6979 { 6980 CPUState *cs = CPU(obj); 6981 GuestPanicInformation *panic_info; 6982 6983 if (!cs->crash_occurred) { 6984 error_setg(errp, "No crash occured"); 6985 return; 6986 } 6987 6988 panic_info = x86_cpu_get_crash_info(cs); 6989 if (panic_info == NULL) { 6990 error_setg(errp, "No crash information"); 6991 return; 6992 } 6993 6994 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 6995 errp); 6996 qapi_free_GuestPanicInformation(panic_info); 6997 } 6998 #endif /* !CONFIG_USER_ONLY */ 6999 7000 static void x86_cpu_initfn(Object *obj) 7001 { 7002 X86CPU *cpu = X86_CPU(obj); 7003 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 7004 CPUX86State *env = &cpu->env; 7005 7006 env->nr_dies = 1; 7007 cpu_set_cpustate_pointers(cpu); 7008 7009 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 7010 x86_cpu_get_feature_words, 7011 NULL, NULL, (void *)env->features); 7012 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 7013 x86_cpu_get_feature_words, 7014 NULL, NULL, (void *)cpu->filtered_features); 7015 7016 object_property_add_alias(obj, "sse3", obj, "pni"); 7017 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq"); 7018 object_property_add_alias(obj, "sse4-1", obj, "sse4.1"); 7019 object_property_add_alias(obj, "sse4-2", obj, "sse4.2"); 7020 object_property_add_alias(obj, "xd", obj, "nx"); 7021 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt"); 7022 object_property_add_alias(obj, "i64", obj, "lm"); 7023 7024 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl"); 7025 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust"); 7026 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt"); 7027 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm"); 7028 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy"); 7029 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr"); 7030 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core"); 7031 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb"); 7032 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay"); 7033 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu"); 7034 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf"); 7035 object_property_add_alias(obj, "kvm_asyncpf_int", obj, "kvm-asyncpf-int"); 7036 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time"); 7037 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi"); 7038 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt"); 7039 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control"); 7040 object_property_add_alias(obj, "svm_lock", obj, "svm-lock"); 7041 object_property_add_alias(obj, "nrip_save", obj, "nrip-save"); 7042 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale"); 7043 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean"); 7044 object_property_add_alias(obj, "pause_filter", obj, "pause-filter"); 7045 object_property_add_alias(obj, "sse4_1", obj, "sse4.1"); 7046 object_property_add_alias(obj, "sse4_2", obj, "sse4.2"); 7047 7048 if (xcc->model) { 7049 x86_cpu_load_model(cpu, xcc->model); 7050 } 7051 } 7052 7053 static int64_t x86_cpu_get_arch_id(CPUState *cs) 7054 { 7055 X86CPU *cpu = X86_CPU(cs); 7056 7057 return cpu->apic_id; 7058 } 7059 7060 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 7061 { 7062 X86CPU *cpu = X86_CPU(cs); 7063 7064 return cpu->env.cr[0] & CR0_PG_MASK; 7065 } 7066 7067 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 7068 { 7069 X86CPU *cpu = X86_CPU(cs); 7070 7071 cpu->env.eip = value; 7072 } 7073 7074 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 7075 { 7076 X86CPU *cpu = X86_CPU(cs); 7077 CPUX86State *env = &cpu->env; 7078 7079 #if !defined(CONFIG_USER_ONLY) 7080 if (interrupt_request & CPU_INTERRUPT_POLL) { 7081 return CPU_INTERRUPT_POLL; 7082 } 7083 #endif 7084 if (interrupt_request & CPU_INTERRUPT_SIPI) { 7085 return CPU_INTERRUPT_SIPI; 7086 } 7087 7088 if (env->hflags2 & HF2_GIF_MASK) { 7089 if ((interrupt_request & CPU_INTERRUPT_SMI) && 7090 !(env->hflags & HF_SMM_MASK)) { 7091 return CPU_INTERRUPT_SMI; 7092 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 7093 !(env->hflags2 & HF2_NMI_MASK)) { 7094 return CPU_INTERRUPT_NMI; 7095 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 7096 return CPU_INTERRUPT_MCE; 7097 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 7098 (((env->hflags2 & HF2_VINTR_MASK) && 7099 (env->hflags2 & HF2_HIF_MASK)) || 7100 (!(env->hflags2 & HF2_VINTR_MASK) && 7101 (env->eflags & IF_MASK && 7102 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 7103 return CPU_INTERRUPT_HARD; 7104 #if !defined(CONFIG_USER_ONLY) 7105 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 7106 (env->eflags & IF_MASK) && 7107 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 7108 return CPU_INTERRUPT_VIRQ; 7109 #endif 7110 } 7111 } 7112 7113 return 0; 7114 } 7115 7116 static bool x86_cpu_has_work(CPUState *cs) 7117 { 7118 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 7119 } 7120 7121 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 7122 { 7123 X86CPU *cpu = X86_CPU(cs); 7124 CPUX86State *env = &cpu->env; 7125 7126 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 7127 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 7128 : bfd_mach_i386_i8086); 7129 info->print_insn = print_insn_i386; 7130 7131 info->cap_arch = CS_ARCH_X86; 7132 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 7133 : env->hflags & HF_CS32_MASK ? CS_MODE_32 7134 : CS_MODE_16); 7135 info->cap_insn_unit = 1; 7136 info->cap_insn_split = 8; 7137 } 7138 7139 void x86_update_hflags(CPUX86State *env) 7140 { 7141 uint32_t hflags; 7142 #define HFLAG_COPY_MASK \ 7143 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 7144 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 7145 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 7146 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 7147 7148 hflags = env->hflags & HFLAG_COPY_MASK; 7149 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 7150 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 7151 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 7152 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 7153 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 7154 7155 if (env->cr[4] & CR4_OSFXSR_MASK) { 7156 hflags |= HF_OSFXSR_MASK; 7157 } 7158 7159 if (env->efer & MSR_EFER_LMA) { 7160 hflags |= HF_LMA_MASK; 7161 } 7162 7163 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 7164 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 7165 } else { 7166 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 7167 (DESC_B_SHIFT - HF_CS32_SHIFT); 7168 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 7169 (DESC_B_SHIFT - HF_SS32_SHIFT); 7170 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 7171 !(hflags & HF_CS32_MASK)) { 7172 hflags |= HF_ADDSEG_MASK; 7173 } else { 7174 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 7175 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 7176 } 7177 } 7178 env->hflags = hflags; 7179 } 7180 7181 static Property x86_cpu_properties[] = { 7182 #ifdef CONFIG_USER_ONLY 7183 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 7184 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 7185 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 7186 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 7187 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0), 7188 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 7189 #else 7190 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 7191 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 7192 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 7193 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1), 7194 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 7195 #endif 7196 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 7197 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 7198 7199 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, 7200 HYPERV_SPINLOCK_NEVER_NOTIFY), 7201 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 7202 HYPERV_FEAT_RELAXED, 0), 7203 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 7204 HYPERV_FEAT_VAPIC, 0), 7205 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 7206 HYPERV_FEAT_TIME, 0), 7207 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 7208 HYPERV_FEAT_CRASH, 0), 7209 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 7210 HYPERV_FEAT_RESET, 0), 7211 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 7212 HYPERV_FEAT_VPINDEX, 0), 7213 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 7214 HYPERV_FEAT_RUNTIME, 0), 7215 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 7216 HYPERV_FEAT_SYNIC, 0), 7217 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 7218 HYPERV_FEAT_STIMER, 0), 7219 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 7220 HYPERV_FEAT_FREQUENCIES, 0), 7221 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 7222 HYPERV_FEAT_REENLIGHTENMENT, 0), 7223 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 7224 HYPERV_FEAT_TLBFLUSH, 0), 7225 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 7226 HYPERV_FEAT_EVMCS, 0), 7227 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 7228 HYPERV_FEAT_IPI, 0), 7229 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 7230 HYPERV_FEAT_STIMER_DIRECT, 0), 7231 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, 7232 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), 7233 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 7234 7235 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 7236 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 7237 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false), 7238 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 7239 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 7240 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 7241 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 7242 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 7243 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7, 7244 UINT32_MAX), 7245 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 7246 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 7247 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 7248 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 7249 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 7250 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 7251 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0), 7252 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 7253 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor), 7254 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 7255 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 7256 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 7257 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 7258 false), 7259 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 7260 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 7261 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 7262 true), 7263 /* 7264 * lecacy_cache defaults to true unless the CPU model provides its 7265 * own cache information (see x86_cpu_load_def()). 7266 */ 7267 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 7268 7269 /* 7270 * From "Requirements for Implementing the Microsoft 7271 * Hypervisor Interface": 7272 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 7273 * 7274 * "Starting with Windows Server 2012 and Windows 8, if 7275 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 7276 * the hypervisor imposes no specific limit to the number of VPs. 7277 * In this case, Windows Server 2012 guest VMs may use more than 7278 * 64 VPs, up to the maximum supported number of processors applicable 7279 * to the specific Windows version being used." 7280 */ 7281 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 7282 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 7283 false), 7284 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 7285 true), 7286 DEFINE_PROP_END_OF_LIST() 7287 }; 7288 7289 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 7290 { 7291 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7292 CPUClass *cc = CPU_CLASS(oc); 7293 DeviceClass *dc = DEVICE_CLASS(oc); 7294 FeatureWord w; 7295 7296 device_class_set_parent_realize(dc, x86_cpu_realizefn, 7297 &xcc->parent_realize); 7298 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 7299 &xcc->parent_unrealize); 7300 device_class_set_props(dc, x86_cpu_properties); 7301 7302 device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset); 7303 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 7304 7305 cc->class_by_name = x86_cpu_class_by_name; 7306 cc->parse_features = x86_cpu_parse_featurestr; 7307 cc->has_work = x86_cpu_has_work; 7308 7309 #ifdef CONFIG_TCG 7310 tcg_cpu_common_class_init(cc); 7311 #endif /* CONFIG_TCG */ 7312 7313 cc->dump_state = x86_cpu_dump_state; 7314 cc->set_pc = x86_cpu_set_pc; 7315 cc->gdb_read_register = x86_cpu_gdb_read_register; 7316 cc->gdb_write_register = x86_cpu_gdb_write_register; 7317 cc->get_arch_id = x86_cpu_get_arch_id; 7318 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 7319 7320 #ifndef CONFIG_USER_ONLY 7321 cc->asidx_from_attrs = x86_asidx_from_attrs; 7322 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 7323 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug; 7324 cc->get_crash_info = x86_cpu_get_crash_info; 7325 cc->write_elf64_note = x86_cpu_write_elf64_note; 7326 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 7327 cc->write_elf32_note = x86_cpu_write_elf32_note; 7328 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 7329 cc->vmsd = &vmstate_x86_cpu; 7330 #endif /* !CONFIG_USER_ONLY */ 7331 7332 cc->gdb_arch_name = x86_gdb_arch_name; 7333 #ifdef TARGET_X86_64 7334 cc->gdb_core_xml_file = "i386-64bit.xml"; 7335 cc->gdb_num_core_regs = 66; 7336 #else 7337 cc->gdb_core_xml_file = "i386-32bit.xml"; 7338 cc->gdb_num_core_regs = 50; 7339 #endif 7340 cc->disas_set_info = x86_disas_set_info; 7341 7342 dc->user_creatable = true; 7343 7344 object_class_property_add(oc, "family", "int", 7345 x86_cpuid_version_get_family, 7346 x86_cpuid_version_set_family, NULL, NULL); 7347 object_class_property_add(oc, "model", "int", 7348 x86_cpuid_version_get_model, 7349 x86_cpuid_version_set_model, NULL, NULL); 7350 object_class_property_add(oc, "stepping", "int", 7351 x86_cpuid_version_get_stepping, 7352 x86_cpuid_version_set_stepping, NULL, NULL); 7353 object_class_property_add_str(oc, "vendor", 7354 x86_cpuid_get_vendor, 7355 x86_cpuid_set_vendor); 7356 object_class_property_add_str(oc, "model-id", 7357 x86_cpuid_get_model_id, 7358 x86_cpuid_set_model_id); 7359 object_class_property_add(oc, "tsc-frequency", "int", 7360 x86_cpuid_get_tsc_freq, 7361 x86_cpuid_set_tsc_freq, NULL, NULL); 7362 /* 7363 * The "unavailable-features" property has the same semantics as 7364 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 7365 * QMP command: they list the features that would have prevented the 7366 * CPU from running if the "enforce" flag was set. 7367 */ 7368 object_class_property_add(oc, "unavailable-features", "strList", 7369 x86_cpu_get_unavailable_features, 7370 NULL, NULL, NULL); 7371 7372 #if !defined(CONFIG_USER_ONLY) 7373 object_class_property_add(oc, "crash-information", "GuestPanicInformation", 7374 x86_cpu_get_crash_info_qom, NULL, NULL, NULL); 7375 #endif 7376 7377 for (w = 0; w < FEATURE_WORDS; w++) { 7378 int bitnr; 7379 for (bitnr = 0; bitnr < 64; bitnr++) { 7380 x86_cpu_register_feature_bit_props(xcc, w, bitnr); 7381 } 7382 } 7383 } 7384 7385 static const TypeInfo x86_cpu_type_info = { 7386 .name = TYPE_X86_CPU, 7387 .parent = TYPE_CPU, 7388 .instance_size = sizeof(X86CPU), 7389 .instance_init = x86_cpu_initfn, 7390 .abstract = true, 7391 .class_size = sizeof(X86CPUClass), 7392 .class_init = x86_cpu_common_class_init, 7393 }; 7394 7395 7396 /* "base" CPU model, used by query-cpu-model-expansion */ 7397 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 7398 { 7399 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7400 7401 xcc->static_model = true; 7402 xcc->migration_safe = true; 7403 xcc->model_description = "base CPU model type with no features enabled"; 7404 xcc->ordering = 8; 7405 } 7406 7407 static const TypeInfo x86_base_cpu_type_info = { 7408 .name = X86_CPU_TYPE_NAME("base"), 7409 .parent = TYPE_X86_CPU, 7410 .class_init = x86_cpu_base_class_init, 7411 }; 7412 7413 static void x86_cpu_register_types(void) 7414 { 7415 int i; 7416 7417 type_register_static(&x86_cpu_type_info); 7418 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 7419 x86_register_cpudef_types(&builtin_x86_defs[i]); 7420 } 7421 type_register_static(&max_x86_cpu_type_info); 7422 type_register_static(&x86_base_cpu_type_info); 7423 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 7424 type_register_static(&host_x86_cpu_type_info); 7425 #endif 7426 } 7427 7428 type_init(x86_cpu_register_types) 7429